[openchrome-devel] drm-openchrome: Branch 'drm-next-5.6' - 94 commits - drivers/gpu/drm drivers/gpu/host1x include/drm/gpu_scheduler.h

Kevin Brace kevinbrace at kemper.freedesktop.org
Fri Feb 7 19:18:16 UTC 2020


 drivers/gpu/drm/amd/amdgpu/Makefile                              |    1 
 drivers/gpu/drm/amd/amdgpu/amdgpu.h                              |    4 
 drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c                          |    2 
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c                 |    1 
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c                          |  229 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h                          |    7 
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c                       |   14 
 drivers/gpu/drm/amd/amdgpu/amdgpu_df.h                           |    3 
 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c                          |   96 
 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h                          |    3 
 drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h                          |    5 
 drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h                          |    2 
 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c                          |   20 
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c                          |    2 
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h                          |   26 
 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c                         |   92 
 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h                         |    2 
 drivers/gpu/drm/amd/amdgpu/athub_v1_0.c                          |    4 
 drivers/gpu/drm/amd/amdgpu/athub_v2_0.c                          |    4 
 drivers/gpu/drm/amd/amdgpu/df_v3_6.c                             |   59 
 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c                           |   11 
 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c                            |    5 
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c                            |  220 +-
 drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c                            |  978 ++++++++++
 drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h                            |   35 
 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c                           |   39 
 drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c                            |    3 
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c                            |   15 
 drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c                           |    2 
 drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c                           |    2 
 drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c                          |    4 
 drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c                          |    4 
 drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c                          |  705 +++++++
 drivers/gpu/drm/amd/amdgpu/navi10_ih.c                           |    2 
 drivers/gpu/drm/amd/amdgpu/nv.c                                  |    8 
 drivers/gpu/drm/amd/amdgpu/psp_v11_0.c                           |   37 
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c                           |    4 
 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c                           |    4 
 drivers/gpu/drm/amd/amdgpu/si_dma.c                              |    2 
 drivers/gpu/drm/amd/amdgpu/soc15.c                               |   32 
 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c                            |    2 
 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c                            |    2 
 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c                            |    2 
 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c                            |    2 
 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c                            |    2 
 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c                            |    4 
 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h                            |    1 
 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c                            |  105 -
 drivers/gpu/drm/amd/amdgpu/vega10_ih.c                           |    2 
 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c            |   10 
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c                |   82 
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c           |   19 
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c        |   13 
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c      |    2 
 drivers/gpu/drm/amd/display/dc/bios/command_table2.c             |   78 
 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c                 |   34 
 drivers/gpu/drm/amd/display/dc/core/dc.c                         |   17 
 drivers/gpu/drm/amd/display/dc/core/dc_link.c                    |   30 
 drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c                 |   42 
 drivers/gpu/drm/amd/display/dc/dc.h                              |    3 
 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c                  |   73 
 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h                  |    1 
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c        |   30 
 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c                |   12 
 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c            |    2 
 drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c   |   19 
 drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c |   24 
 drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c   |   24 
 drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h            |    4 
 drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c                      |    3 
 drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h                  |   17 
 drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c                |   25 
 drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h                |    8 
 drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c                  |   19 
 drivers/gpu/drm/amd/display/modules/color/color_gamma.c          |  307 +--
 drivers/gpu/drm/amd/display/modules/color/color_gamma.h          |    4 
 drivers/gpu/drm/amd/display/modules/freesync/freesync.c          |    2 
 drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h                  |    2 
 drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c       |    8 
 drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c      |    4 
 drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h          |    3 
 drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h         |    8 
 drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h         |    6 
 drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h        |  264 ++
 drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h       |  748 +++++++
 drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h |  128 +
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c                       |   18 
 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c                |   23 
 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c                 |    6 
 drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h                   |    2 
 drivers/gpu/drm/amd/powerplay/inc/smu_types.h                    |    2 
 drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h              |    5 
 drivers/gpu/drm/amd/powerplay/navi10_ppt.c                       |  187 +
 drivers/gpu/drm/amd/powerplay/smu_internal.h                     |    3 
 drivers/gpu/drm/amd/powerplay/smu_v11_0.c                        |    6 
 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c             |   12 
 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c             |   12 
 drivers/gpu/drm/amd/powerplay/vega20_ppt.c                       |   28 
 drivers/gpu/drm/nouveau/dispnv50/core.h                          |    6 
 drivers/gpu/drm/nouveau/dispnv50/corec37d.c                      |   23 
 drivers/gpu/drm/nouveau/dispnv50/corec57d.c                      |    9 
 drivers/gpu/drm/nouveau/dispnv50/disp.c                          |   16 
 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c                 |    6 
 drivers/gpu/drm/radeon/radeon_display.c                          |    9 
 drivers/gpu/drm/radeon/radeon_mode.h                             |    1 
 drivers/gpu/drm/scheduler/sched_entity.c                         |    2 
 drivers/gpu/drm/tegra/drm.c                                      |   49 
 drivers/gpu/drm/tegra/gem.c                                      |   10 
 drivers/gpu/drm/tegra/plane.c                                    |   44 
 drivers/gpu/drm/tegra/sor.c                                      |   49 
 drivers/gpu/host1x/job.c                                         |   34 
 include/drm/gpu_scheduler.h                                      |    5 
 112 files changed, 4510 insertions(+), 942 deletions(-)

New commits:
commit 16e9063b91f8f4845e7f65d1e5c0460c0e13e9b6
Merge: fab99d966fdd 9f880327160f
Author: Kevin Brace <kevinbrace at gmx.com>
Date:   Fri Feb 7 11:09:03 2020 -0800

     Merge tag 'drm-next-2020-02-07' of git://anongit.freedesktop.org/drm/drm into drm-next-5.6
    
    drm fixes for 5.6-rc1
    
    tegra:
    - merge window regression fixes
    
    nouveau:
    - couple of volta/turing modesetting fixes
    
    amdgpu:
    - EDC fixes for Arcturus
    - GDDR6 memory training fixe
    - Fix for reading gfx clockgating registers while in GFXOFF state
    - i2c freq fixes
    - Misc display fixes
    - TLB invalidation fix when using semaphores
    - VCN 2.5 instancing fixes
    - Switch raven1 gfxoff to a blacklist
    - Coreboot workaround for KV/KB
    - Root cause dongle fixes for display and revert workaround
    - Enable GPU reset for renoir and navi
    - Navi overclocking fixes
    - Fix up confusing warnings in display clock validation on raven
    
    amdkfd:
    - SDMA fix
    
    radeon:
    - Misc LUT fixes

commit 9f880327160feb695de03caa29604883b0d00087
Merge: a345cc0d393e 58fe03d6dec9
Author: Dave Airlie <airlied at redhat.com>
Date:   Fri Feb 7 12:29:35 2020 +1000

    Merge tag 'amd-drm-next-5.6-2020-02-05' of git://people.freedesktop.org/~agd5f/linux into drm-next
    
    amd-drm-next-5.6-2020-02-05:
    
    amdgpu:
    - EDC fixes for Arcturus
    - GDDR6 memory training fixe
    - Fix for reading gfx clockgating registers while in GFXOFF state
    - i2c freq fixes
    - Misc display fixes
    - TLB invalidation fix when using semaphores
    - VCN 2.5 instancing fixes
    - Switch raven1 gfxoff to a blacklist
    - Coreboot workaround for KV/KB
    - Root cause dongle fixes for display and revert workaround
    - Enable GPU reset for renoir and navi
    - Navi overclocking fixes
    - Fix up confusing warnings in display clock validation on raven
    
    amdkfd:
    - SDMA fix
    
    radeon:
    - Misc LUT fixes
    
    Signed-off-by: Dave Airlie <airlied at redhat.com>
    From: Alex Deucher <alexdeucher at gmail.com>
    Link: https://patchwork.freedesktop.org/patch/msgid/20200206035458.3894-1-alexander.deucher@amd.com

commit a345cc0d393edddde8e83aadf8be5223316442aa
Merge: e139e8aed0d3 137c4ba7163a
Author: Dave Airlie <airlied at redhat.com>
Date:   Fri Feb 7 12:23:24 2020 +1000

    Merge branch 'linux-5.6' of git://github.com/skeggsb/linux into drm-next
    
    Just a couple of fixes to Volta/Turing modesetting on some systems.
    
    Signed-off-by: Dave Airlie <airlied at redhat.com>
    From: Ben Skeggs <skeggsb at gmail.com>
    Link: https://patchwork.freedesktop.org/patch/msgid/ <CACAvsv7=eP+Ai1ouoMyYyo1xMF0pTQki=owYjJkS=NpvKQd1fg at mail.gmail.com

commit e139e8aed0d3a536b67446cb1755a61f52e8d4c9
Merge: b45f1b3b585e 98ae41adb252
Author: Dave Airlie <airlied at redhat.com>
Date:   Fri Feb 7 12:22:23 2020 +1000

    Merge tag 'drm/tegra/for-5.6-rc1-fixes' of git://anongit.freedesktop.org/tegra/linux into drm-next
    
    drm/tegra: Fixes for v5.6-rc1
    
    These are a couple of quick fixes for regressions that were found during
    the first two weeks of the merge window.
    
    Signed-off-by: Dave Airlie <airlied at redhat.com>
    
    From: Thierry Reding <thierry.reding at gmail.com>
    Link: https://patchwork.freedesktop.org/patch/msgid/20200206172753.2185390-1-thierry.reding@gmail.com

commit 98ae41adb252866158dd4e998551dfa85e612bed
Author: Thierry Reding <treding at nvidia.com>
Date:   Tue Feb 4 14:59:26 2020 +0100

    gpu: host1x: Set DMA direction only for DMA-mapped buffer objects
    
    The DMA direction is only used by the DMA API, so there is no use in
    setting it when a buffer object isn't mapped with the DMA API.
    
    Signed-off-by: Thierry Reding <treding at nvidia.com>
    Tested-by: Dmitry Osipenko <digetx at gmail.com>
    Reviewed-by: Dmitry Osipenko <digetx at gmail.com>

diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index 0d53c08e9972..6c689b5cc32c 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -248,6 +248,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
 				goto unpin;
 			}
 
+			job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
 			job->unpins[job->num_unpins].dev = host->dev;
 			phys_addr = sg_dma_address(sgt->sgl);
 		}
@@ -255,7 +256,6 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
 		job->addr_phys[job->num_unpins] = phys_addr;
 		job->gather_addr_phys[i] = phys_addr;
 
-		job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
 		job->unpins[job->num_unpins].bo = g->bo;
 		job->unpins[job->num_unpins].sgt = sgt;
 		job->num_unpins++;
commit 273da5a046965ccf0ec79eb63f2d5173467e20fa
Author: Thierry Reding <treding at nvidia.com>
Date:   Tue Feb 4 14:59:25 2020 +0100

    drm/tegra: Reuse IOVA mapping where possible
    
    This partially reverts the DMA API support that was recently merged
    because it was causing performance regressions on older Tegra devices.
    Unfortunately, the cache maintenance performed by dma_map_sg() and
    dma_unmap_sg() causes performance to drop by a factor of 10.
    
    The right solution for this would be to cache mappings for buffers per
    consumer device, but that's a bit involved. Instead, we simply revert to
    the old behaviour of sharing IOVA mappings when we know that devices can
    do so (i.e. they share the same IOMMU domain).
    
    Cc: <stable at vger.kernel.org> # v5.5
    Reported-by: Dmitry Osipenko <digetx at gmail.com>
    Signed-off-by: Thierry Reding <treding at nvidia.com>
    Tested-by: Dmitry Osipenko <digetx at gmail.com>
    Reviewed-by: Dmitry Osipenko <digetx at gmail.com>

diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index bc15b430156d..c46b4d4190ac 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -60,8 +60,16 @@ static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
 	/*
 	 * If we've manually mapped the buffer object through the IOMMU, make
 	 * sure to return the IOVA address of our mapping.
+	 *
+	 * Similarly, for buffers that have been allocated by the DMA API the
+	 * physical address can be used for devices that are not attached to
+	 * an IOMMU. For these devices, callers must pass a valid pointer via
+	 * the @phys argument.
+	 *
+	 * Imported buffers were also already mapped at import time, so the
+	 * existing mapping can be reused.
 	 */
-	if (phys && obj->mm) {
+	if (phys) {
 		*phys = obj->iova;
 		return NULL;
 	}
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index cadcdd9ea427..9ccfb56e9b01 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -3,6 +3,8 @@
  * Copyright (C) 2017 NVIDIA CORPORATION.  All rights reserved.
  */
 
+#include <linux/iommu.h>
+
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_fourcc.h>
@@ -107,21 +109,27 @@ const struct drm_plane_funcs tegra_plane_funcs = {
 
 static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
 {
+	struct iommu_domain *domain = iommu_get_domain_for_dev(dc->dev);
 	unsigned int i;
 	int err;
 
 	for (i = 0; i < state->base.fb->format->num_planes; i++) {
 		struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
+		dma_addr_t phys_addr, *phys;
+		struct sg_table *sgt;
 
-		if (!dc->client.group) {
-			struct sg_table *sgt;
+		if (!domain || dc->client.group)
+			phys = &phys_addr;
+		else
+			phys = NULL;
 
-			sgt = host1x_bo_pin(dc->dev, &bo->base, NULL);
-			if (IS_ERR(sgt)) {
-				err = PTR_ERR(sgt);
-				goto unpin;
-			}
+		sgt = host1x_bo_pin(dc->dev, &bo->base, phys);
+		if (IS_ERR(sgt)) {
+			err = PTR_ERR(sgt);
+			goto unpin;
+		}
 
+		if (sgt) {
 			err = dma_map_sg(dc->dev, sgt->sgl, sgt->nents,
 					 DMA_TO_DEVICE);
 			if (err == 0) {
@@ -143,7 +151,7 @@ static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
 			state->iova[i] = sg_dma_address(sgt->sgl);
 			state->sgt[i] = sgt;
 		} else {
-			state->iova[i] = bo->iova;
+			state->iova[i] = phys_addr;
 		}
 	}
 
@@ -156,9 +164,11 @@ unpin:
 		struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
 		struct sg_table *sgt = state->sgt[i];
 
-		dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
-		host1x_bo_unpin(dc->dev, &bo->base, sgt);
+		if (sgt)
+			dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
+				     DMA_TO_DEVICE);
 
+		host1x_bo_unpin(dc->dev, &bo->base, sgt);
 		state->iova[i] = DMA_MAPPING_ERROR;
 		state->sgt[i] = NULL;
 	}
@@ -172,17 +182,13 @@ static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state)
 
 	for (i = 0; i < state->base.fb->format->num_planes; i++) {
 		struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
+		struct sg_table *sgt = state->sgt[i];
 
-		if (!dc->client.group) {
-			struct sg_table *sgt = state->sgt[i];
-
-			if (sgt) {
-				dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
-					     DMA_TO_DEVICE);
-				host1x_bo_unpin(dc->dev, &bo->base, sgt);
-			}
-		}
+		if (sgt)
+			dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
+				     DMA_TO_DEVICE);
 
+		host1x_bo_unpin(dc->dev, &bo->base, sgt);
 		state->iova[i] = DMA_MAPPING_ERROR;
 		state->sgt[i] = NULL;
 	}
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index 25ca54de8fc5..0d53c08e9972 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -8,6 +8,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/err.h>
 #include <linux/host1x.h>
+#include <linux/iommu.h>
 #include <linux/kref.h>
 #include <linux/module.h>
 #include <linux/scatterlist.h>
@@ -101,9 +102,11 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
 {
 	struct host1x_client *client = job->client;
 	struct device *dev = client->dev;
+	struct iommu_domain *domain;
 	unsigned int i;
 	int err;
 
+	domain = iommu_get_domain_for_dev(dev);
 	job->num_unpins = 0;
 
 	for (i = 0; i < job->num_relocs; i++) {
@@ -117,7 +120,19 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
 			goto unpin;
 		}
 
-		if (client->group)
+		/*
+		 * If the client device is not attached to an IOMMU, the
+		 * physical address of the buffer object can be used.
+		 *
+		 * Similarly, when an IOMMU domain is shared between all
+		 * host1x clients, the IOVA is already available, so no
+		 * need to map the buffer object again.
+		 *
+		 * XXX Note that this isn't always safe to do because it
+		 * relies on an assumption that no cache maintenance is
+		 * needed on the buffer objects.
+		 */
+		if (!domain || client->group)
 			phys = &phys_addr;
 		else
 			phys = NULL;
@@ -176,6 +191,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
 		dma_addr_t phys_addr;
 		unsigned long shift;
 		struct iova *alloc;
+		dma_addr_t *phys;
 		unsigned int j;
 
 		g->bo = host1x_bo_get(g->bo);
@@ -184,7 +200,17 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
 			goto unpin;
 		}
 
-		sgt = host1x_bo_pin(host->dev, g->bo, NULL);
+		/**
+		 * If the host1x is not attached to an IOMMU, there is no need
+		 * to map the buffer object for the host1x, since the physical
+		 * address can simply be used.
+		 */
+		if (!iommu_get_domain_for_dev(host->dev))
+			phys = &phys_addr;
+		else
+			phys = NULL;
+
+		sgt = host1x_bo_pin(host->dev, g->bo, phys);
 		if (IS_ERR(sgt)) {
 			err = PTR_ERR(sgt);
 			goto unpin;
@@ -214,7 +240,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
 
 			job->unpins[job->num_unpins].size = gather_size;
 			phys_addr = iova_dma_addr(&host->iova, alloc);
-		} else {
+		} else if (sgt) {
 			err = dma_map_sg(host->dev, sgt->sgl, sgt->nents,
 					 DMA_TO_DEVICE);
 			if (!err) {
commit 2d9384ff91770a71bd1ff24c25952ef1187a0e9c
Author: Thierry Reding <treding at nvidia.com>
Date:   Tue Feb 4 14:59:24 2020 +0100

    drm/tegra: Relax IOMMU usage criteria on old Tegra
    
    Older Tegra devices only allow addressing 32 bits of memory, so whether
    or not the host1x is attached to an IOMMU doesn't matter. host1x IOMMU
    attachment is only needed on devices that can address memory beyond the
    32-bit boundary and where the host1x doesn't support the wide GATHER
    opcode that allows it to access buffers at higher addresses.
    
    Cc: <stable at vger.kernel.org> # v5.5
    Signed-off-by: Thierry Reding <treding at nvidia.com>
    Tested-by: Dmitry Osipenko <digetx at gmail.com>
    Reviewed-by: Dmitry Osipenko <digetx at gmail.com>

diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index aa9e49f04988..bd268028fb3d 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1037,23 +1037,9 @@ void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
 	free_pages((unsigned long)virt, get_order(size));
 }
 
-static int host1x_drm_probe(struct host1x_device *dev)
+static bool host1x_drm_wants_iommu(struct host1x_device *dev)
 {
-	struct drm_driver *driver = &tegra_drm_driver;
 	struct iommu_domain *domain;
-	struct tegra_drm *tegra;
-	struct drm_device *drm;
-	int err;
-
-	drm = drm_dev_alloc(driver, &dev->dev);
-	if (IS_ERR(drm))
-		return PTR_ERR(drm);
-
-	tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
-	if (!tegra) {
-		err = -ENOMEM;
-		goto put;
-	}
 
 	/*
 	 * If the Tegra DRM clients are backed by an IOMMU, push buffers are
@@ -1082,9 +1068,38 @@ static int host1x_drm_probe(struct host1x_device *dev)
 	 * up the device tree appropriately. This is considered an problem
 	 * of integration, so care must be taken for the DT to be consistent.
 	 */
-	domain = iommu_get_domain_for_dev(drm->dev->parent);
+	domain = iommu_get_domain_for_dev(dev->dev.parent);
+
+	/*
+	 * Tegra20 and Tegra30 don't support addressing memory beyond the
+	 * 32-bit boundary, so the regular GATHER opcodes will always be
+	 * sufficient and whether or not the host1x is attached to an IOMMU
+	 * doesn't matter.
+	 */
+	if (!domain && dma_get_mask(dev->dev.parent) <= DMA_BIT_MASK(32))
+		return true;
+
+	return domain != NULL;
+}
+
+static int host1x_drm_probe(struct host1x_device *dev)
+{
+	struct drm_driver *driver = &tegra_drm_driver;
+	struct tegra_drm *tegra;
+	struct drm_device *drm;
+	int err;
+
+	drm = drm_dev_alloc(driver, &dev->dev);
+	if (IS_ERR(drm))
+		return PTR_ERR(drm);
+
+	tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
+	if (!tegra) {
+		err = -ENOMEM;
+		goto put;
+	}
 
-	if (domain && iommu_present(&platform_bus_type)) {
+	if (host1x_drm_wants_iommu(dev) && iommu_present(&platform_bus_type)) {
 		tegra->domain = iommu_domain_alloc(&platform_bus_type);
 		if (!tegra->domain) {
 			err = -ENOMEM;
commit 58fe03d6dec908a1bec07eea7e94907af5c07eec
Author: Lyude Paul <lyude at redhat.com>
Date:   Fri Jan 24 14:10:46 2020 -0500

    drm/amd/dm/mst: Ignore payload update failures
    
    Disabling a display on MST can potentially happen after the entire MST
    topology has been removed, which means that we can't communicate with
    the topology at all in this scenario. Likewise, this also means that we
    can't properly update payloads on the topology and as such, it's a good
    idea to ignore payload update failures when disabling displays.
    Currently, amdgpu makes the mistake of halting the payload update
    process when any payload update failures occur, resulting in leaving
    DC's local copies of the payload tables out of date.
    
    This ends up causing problems with hotplugging MST topologies, and
    causes modesets on the second hotplug to fail like so:
    
    [drm] Failed to updateMST allocation table forpipe idx:1
    ------------[ cut here ]------------
    WARNING: CPU: 5 PID: 1511 at
    drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_link.c:2677
    update_mst_stream_alloc_table+0x11e/0x130 [amdgpu]
    Modules linked in: cdc_ether usbnet fuse xt_conntrack nf_conntrack
    nf_defrag_ipv6 libcrc32c nf_defrag_ipv4 ipt_REJECT nf_reject_ipv4
    nft_counter nft_compat nf_tables nfnetlink tun bridge stp llc sunrpc
    vfat fat wmi_bmof uvcvideo snd_hda_codec_realtek snd_hda_codec_generic
    snd_hda_codec_hdmi videobuf2_vmalloc snd_hda_intel videobuf2_memops
    videobuf2_v4l2 snd_intel_dspcfg videobuf2_common crct10dif_pclmul
    snd_hda_codec videodev crc32_pclmul snd_hwdep snd_hda_core
    ghash_clmulni_intel snd_seq mc joydev pcspkr snd_seq_device snd_pcm
    sp5100_tco k10temp i2c_piix4 snd_timer thinkpad_acpi ledtrig_audio snd
    wmi soundcore video i2c_scmi acpi_cpufreq ip_tables amdgpu(O)
    rtsx_pci_sdmmc amd_iommu_v2 gpu_sched mmc_core i2c_algo_bit ttm
    drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops cec drm
    crc32c_intel serio_raw hid_multitouch r8152 mii nvme r8169 nvme_core
    rtsx_pci pinctrl_amd
    CPU: 5 PID: 1511 Comm: gnome-shell Tainted: G           O      5.5.0-rc7Lyude-Test+ #4
    Hardware name: LENOVO FA495SIT26/FA495SIT26, BIOS R12ET22W(0.22 ) 01/31/2019
    RIP: 0010:update_mst_stream_alloc_table+0x11e/0x130 [amdgpu]
    Code: 28 00 00 00 75 2b 48 8d 65 e0 5b 41 5c 41 5d 41 5e 5d c3 0f b6 06
    49 89 1c 24 41 88 44 24 08 0f b6 46 01 41 88 44 24 09 eb 93 <0f> 0b e9
    2f ff ff ff e8 a6 82 a3 c2 66 0f 1f 44 00 00 0f 1f 44 00
    RSP: 0018:ffffac428127f5b0 EFLAGS: 00010202
    RAX: 0000000000000002 RBX: ffff8d1e166eee80 RCX: 0000000000000000
    RDX: ffffac428127f668 RSI: ffff8d1e166eee80 RDI: ffffac428127f610
    RBP: ffffac428127f640 R08: ffffffffc03d94a8 R09: 0000000000000000
    R10: ffff8d1e24b02000 R11: ffffac428127f5b0 R12: ffff8d1e1b83d000
    R13: ffff8d1e1bea0b08 R14: 0000000000000002 R15: 0000000000000002
    FS:  00007fab23ffcd80(0000) GS:ffff8d1e28b40000(0000) knlGS:0000000000000000
    CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
    CR2: 00007f151f1711e8 CR3: 00000005997c0000 CR4: 00000000003406e0
    Call Trace:
     ? mutex_lock+0xe/0x30
     dc_link_allocate_mst_payload+0x9a/0x210 [amdgpu]
     ? dm_read_reg_func+0x39/0xb0 [amdgpu]
     ? core_link_enable_stream+0x656/0x730 [amdgpu]
     core_link_enable_stream+0x656/0x730 [amdgpu]
     dce110_apply_ctx_to_hw+0x58e/0x5d0 [amdgpu]
     ? dcn10_verify_allow_pstate_change_high+0x1d/0x280 [amdgpu]
     ? dcn10_wait_for_mpcc_disconnect+0x3c/0x130 [amdgpu]
     dc_commit_state+0x292/0x770 [amdgpu]
     ? add_timer+0x101/0x1f0
     ? ttm_bo_put+0x1a1/0x2f0 [ttm]
     amdgpu_dm_atomic_commit_tail+0xb59/0x1ff0 [amdgpu]
     ? amdgpu_move_blit.constprop.0+0xb8/0x1f0 [amdgpu]
     ? amdgpu_bo_move+0x16d/0x2b0 [amdgpu]
     ? ttm_bo_handle_move_mem+0x118/0x570 [ttm]
     ? ttm_bo_validate+0x134/0x150 [ttm]
     ? dm_plane_helper_prepare_fb+0x1b9/0x2a0 [amdgpu]
     ? _cond_resched+0x15/0x30
     ? wait_for_completion_timeout+0x38/0x160
     ? _cond_resched+0x15/0x30
     ? wait_for_completion_interruptible+0x33/0x190
     commit_tail+0x94/0x130 [drm_kms_helper]
     drm_atomic_helper_commit+0x113/0x140 [drm_kms_helper]
     drm_atomic_helper_set_config+0x70/0xb0 [drm_kms_helper]
     drm_mode_setcrtc+0x194/0x6a0 [drm]
     ? _cond_resched+0x15/0x30
     ? mutex_lock+0xe/0x30
     ? drm_mode_getcrtc+0x180/0x180 [drm]
     drm_ioctl_kernel+0xaa/0xf0 [drm]
     drm_ioctl+0x208/0x390 [drm]
     ? drm_mode_getcrtc+0x180/0x180 [drm]
     amdgpu_drm_ioctl+0x49/0x80 [amdgpu]
     do_vfs_ioctl+0x458/0x6d0
     ksys_ioctl+0x5e/0x90
     __x64_sys_ioctl+0x16/0x20
     do_syscall_64+0x55/0x1b0
     entry_SYSCALL_64_after_hwframe+0x44/0xa9
    RIP: 0033:0x7fab2121f87b
    Code: 0f 1e fa 48 8b 05 0d 96 2c 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff
    ff ff c3 66 0f 1f 44 00 00 f3 0f 1e fa b8 10 00 00 00 0f 05 <48> 3d 01
    f0 ff ff 73 01 c3 48 8b 0d dd 95 2c 00 f7 d8 64 89 01 48
    RSP: 002b:00007ffd045f9068 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
    RAX: ffffffffffffffda RBX: 00007ffd045f90a0 RCX: 00007fab2121f87b
    RDX: 00007ffd045f90a0 RSI: 00000000c06864a2 RDI: 000000000000000b
    RBP: 00007ffd045f90a0 R08: 0000000000000000 R09: 000055dbd2985d10
    R10: 000055dbd2196280 R11: 0000000000000246 R12: 00000000c06864a2
    R13: 000000000000000b R14: 0000000000000000 R15: 000055dbd2196280
    ---[ end trace 6ea888c24d2059cd ]---
    
    Note as well, I have only been able to reproduce this on setups with 2
    MST displays.
    
    Changes since v1:
    * Don't return false when part 1 or part 2 of updating the payloads
      fails, we don't want to abort at any step of the process even if
      things fail
    
    Reviewed-by: Mikita Lipski <Mikita.Lipski at amd.com>
    Signed-off-by: Lyude Paul <lyude at redhat.com>
    Acked-by: Harry Wentland <harry.wentland at amd.com>
    Cc: stable at vger.kernel.org
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 069b7a6f5597..318b474ff20e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -216,7 +216,8 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
 		drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
 	}
 
-	ret = drm_dp_update_payload_part1(mst_mgr);
+	/* It's OK for this to fail */
+	drm_dp_update_payload_part1(mst_mgr);
 
 	/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
 	 * AUX message. The sequence is slot 1-63 allocated sequence for each
@@ -225,9 +226,6 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
 
 	get_payload_table(aconnector, proposed_table);
 
-	if (ret)
-		return false;
-
 	return true;
 }
 
@@ -285,7 +283,6 @@ bool dm_helpers_dp_mst_send_payload_allocation(
 	struct amdgpu_dm_connector *aconnector;
 	struct drm_dp_mst_topology_mgr *mst_mgr;
 	struct drm_dp_mst_port *mst_port;
-	int ret;
 
 	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
 
@@ -299,10 +296,8 @@ bool dm_helpers_dp_mst_send_payload_allocation(
 	if (!mst_mgr->mst_state)
 		return false;
 
-	ret = drm_dp_update_payload_part2(mst_mgr);
-
-	if (ret)
-		return false;
+	/* It's OK for this to fail */
+	drm_dp_update_payload_part2(mst_mgr);
 
 	if (!enable)
 		drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
commit 7b913a76a6cdda87e538df50df496624407373bf
Author: Alex Deucher <alexander.deucher at amd.com>
Date:   Tue Feb 4 09:07:19 2020 -0500

    drm/amdgpu: update default voltage for boot od table for navi1x
    
    It needed to be updated as well so it will show the proper values
    if you reset to the defaults.
    
    Bug: https://gitlab.freedesktop.org/drm/amd/issues/1020
    Reviewed-by: Evan Quan <evan.quan at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 2c8c4cbce548..19a9846b730e 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -1977,7 +1977,7 @@ static int navi10_setup_od_limits(struct smu_context *smu) {
 }
 
 static int navi10_set_default_od_settings(struct smu_context *smu, bool initialize) {
-	OverDriveTable_t *od_table;
+	OverDriveTable_t *od_table, *boot_od_table;
 	int ret = 0;
 
 	ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t));
@@ -1985,6 +1985,7 @@ static int navi10_set_default_od_settings(struct smu_context *smu, bool initiali
 		return ret;
 
 	od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table;
+	boot_od_table = (OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
 	if (initialize) {
 		ret = navi10_setup_od_limits(smu);
 		if (ret) {
@@ -1998,6 +1999,8 @@ static int navi10_set_default_od_settings(struct smu_context *smu, bool initiali
 										od_table->GfxclkFreq1);
 				if (ret)
 					od_table->GfxclkVolt1 = 0;
+				if (boot_od_table)
+					boot_od_table->GfxclkVolt1 = od_table->GfxclkVolt1;
 			}
 
 			if (!od_table->GfxclkVolt2) {
@@ -2006,6 +2009,8 @@ static int navi10_set_default_od_settings(struct smu_context *smu, bool initiali
 										od_table->GfxclkFreq2);
 				if (ret)
 					od_table->GfxclkVolt2 = 0;
+				if (boot_od_table)
+					boot_od_table->GfxclkVolt2 = od_table->GfxclkVolt2;
 			}
 
 			if (!od_table->GfxclkVolt3) {
@@ -2014,6 +2019,8 @@ static int navi10_set_default_od_settings(struct smu_context *smu, bool initiali
 										od_table->GfxclkFreq3);
 				if (ret)
 					od_table->GfxclkVolt3 = 0;
+				if (boot_od_table)
+					boot_od_table->GfxclkVolt3 = od_table->GfxclkVolt3;
 			}
 		}
 	}
commit 1064ad4aeef94f51ca230ac639a9e996fb7867a0
Author: Alex Deucher <alexander.deucher at amd.com>
Date:   Wed Jan 29 12:42:57 2020 -0500

    drm/amdgpu/smu10: fix smu10_get_clock_by_type_with_voltage
    
    Cull out 0 clocks to avoid a warning in DC.
    
    Bug: https://gitlab.freedesktop.org/drm/amd/issues/963
    Reviewed-by: Evan Quan <evan.quan at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 273126cfc37d..689072a312a7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -1080,9 +1080,11 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
 
 	clocks->num_levels = 0;
 	for (i = 0; i < pclk_vol_table->count; i++) {
-		clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk  * 10;
-		clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol;
-		clocks->num_levels++;
+		if (pclk_vol_table->entries[i].clk) {
+			clocks->data[clocks->num_levels].clocks_in_khz = pclk_vol_table->entries[i].clk  * 10;
+			clocks->data[clocks->num_levels].voltage_in_mv = pclk_vol_table->entries[i].vol;
+			clocks->num_levels++;
+		}
 	}
 
 	return 0;
commit 4d0a72b66065dd7e274bad6aa450196d42fd8f84
Author: Alex Deucher <alexander.deucher at amd.com>
Date:   Tue Jan 28 13:19:51 2020 -0500

    drm/amdgpu/smu10: fix smu10_get_clock_by_type_with_latency
    
    Only send non-0 clocks to DC for validation.  This mirrors
    what the windows driver does.
    
    Bug: https://gitlab.freedesktop.org/drm/amd/issues/963
    Reviewed-by: Evan Quan <evan.quan at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 4e8ab139bb3b..273126cfc37d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -1026,12 +1026,15 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
 
 	clocks->num_levels = 0;
 	for (i = 0; i < pclk_vol_table->count; i++) {
-		clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
-		clocks->data[i].latency_in_us = latency_required ?
-						smu10_get_mem_latency(hwmgr,
-						pclk_vol_table->entries[i].clk) :
-						0;
-		clocks->num_levels++;
+		if (pclk_vol_table->entries[i].clk) {
+			clocks->data[clocks->num_levels].clocks_in_khz =
+				pclk_vol_table->entries[i].clk * 10;
+			clocks->data[clocks->num_levels].latency_in_us = latency_required ?
+				smu10_get_mem_latency(hwmgr,
+						      pclk_vol_table->entries[i].clk) :
+				0;
+			clocks->num_levels++;
+		}
 	}
 
 	return 0;
commit c37243579d6c881c575dcfb54cf31c9ded88f946
Author: Alex Deucher <alexander.deucher at amd.com>
Date:   Tue Jan 28 14:39:45 2020 -0500

    drm/amdgpu/display: handle multiple numbers of fclks in dcn_calcs.c (v2)
    
    We might get different numbers of clocks from powerplay depending
    on what the OEM has populated.
    
    v2: add assert for at least one level
    
    Bug: https://gitlab.freedesktop.org/drm/amd/issues/963
    Reviewed-by: Nicholas Kazlauskas <nicholas.kazlauskas at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index a27d84ca15a5..1a37550731de 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -1435,6 +1435,7 @@ void dcn_bw_update_from_pplib(struct dc *dc)
 	struct dc_context *ctx = dc->ctx;
 	struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
 	bool res;
+	unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx;
 
 	/* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
 	res = dm_pp_get_clock_levels_by_type_with_voltage(
@@ -1446,17 +1447,28 @@ void dcn_bw_update_from_pplib(struct dc *dc)
 		res = verify_clock_values(&fclks);
 
 	if (res) {
-		ASSERT(fclks.num_levels >= 3);
-		dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 32 * (fclks.data[0].clocks_in_khz / 1000.0) / 1000.0;
-		dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->number_of_channels *
-				(fclks.data[fclks.num_levels - (fclks.num_levels > 2 ? 3 : 2)].clocks_in_khz / 1000.0)
-				* ddr4_dram_factor_single_Channel / 1000.0;
-		dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->number_of_channels *
-				(fclks.data[fclks.num_levels - 2].clocks_in_khz / 1000.0)
-				* ddr4_dram_factor_single_Channel / 1000.0;
-		dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->number_of_channels *
-				(fclks.data[fclks.num_levels - 1].clocks_in_khz / 1000.0)
-				* ddr4_dram_factor_single_Channel / 1000.0;
+		ASSERT(fclks.num_levels);
+
+		vmin0p65_idx = 0;
+		vmid0p72_idx = fclks.num_levels -
+			(fclks.num_levels > 2 ? 3 : (fclks.num_levels > 1 ? 2 : 1));
+		vnom0p8_idx = fclks.num_levels - (fclks.num_levels > 1 ? 2 : 1);
+		vmax0p9_idx = fclks.num_levels - 1;
+
+		dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 =
+			32 * (fclks.data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0;
+		dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 =
+			dc->dcn_soc->number_of_channels *
+			(fclks.data[vmid0p72_idx].clocks_in_khz / 1000.0)
+			* ddr4_dram_factor_single_Channel / 1000.0;
+		dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 =
+			dc->dcn_soc->number_of_channels *
+			(fclks.data[vnom0p8_idx].clocks_in_khz / 1000.0)
+			* ddr4_dram_factor_single_Channel / 1000.0;
+		dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 =
+			dc->dcn_soc->number_of_channels *
+			(fclks.data[vmax0p9_idx].clocks_in_khz / 1000.0)
+			* ddr4_dram_factor_single_Channel / 1000.0;
 	} else
 		BREAK_TO_DEBUGGER();
 
commit 0531aa6eb38bfa9514609e2727558a051da7365f
Author: Alex Deucher <alexander.deucher at amd.com>
Date:   Sat Jan 25 13:30:45 2020 -0500

    drm/amdgpu: fetch default VDDC curve voltages (v2)
    
    Ask the SMU for the default VDDC curve voltage values.  This
    properly reports the VDDC values in the OD interface.
    
    v2: only update if the original values are 0
    
    Bug: https://gitlab.freedesktop.org/drm/amd/issues/1020
    Reviewed-by: Evan Quan <evan.quan at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
    Cc: stable at vger.kernel.org # 5.5.x

diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 272e58510d0d..2c8c4cbce548 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -121,6 +121,8 @@ static struct smu_11_0_cmn2aisc_mapping navi10_message_map[SMU_MSG_MAX_COUNT] =
 	MSG_MAP(ArmD3,			PPSMC_MSG_ArmD3),
 	MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE,PPSMC_MSG_DALDisableDummyPstateChange),
 	MSG_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE,	PPSMC_MSG_DALEnableDummyPstateChange),
+	MSG_MAP(GetVoltageByDpm,		     PPSMC_MSG_GetVoltageByDpm),
+	MSG_MAP(GetVoltageByDpmOverdrive,	     PPSMC_MSG_GetVoltageByDpmOverdrive),
 };
 
 static struct smu_11_0_cmn2aisc_mapping navi10_clk_map[SMU_CLK_COUNT] = {
@@ -1934,6 +1936,28 @@ static int navi10_od_setting_check_range(struct smu_11_0_overdrive_table *od_tab
 	return 0;
 }
 
+static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
+						     uint16_t *voltage,
+						     uint32_t freq)
+{
+	uint32_t param = (freq & 0xFFFF) | (PPCLK_GFXCLK << 16);
+	uint32_t value = 0;
+	int ret;
+
+	ret = smu_send_smc_msg_with_param(smu,
+					  SMU_MSG_GetVoltageByDpm,
+					  param);
+	if (ret) {
+		pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
+		return ret;
+	}
+
+	smu_read_smc_arg(smu, &value);
+	*voltage = (uint16_t)value;
+
+	return 0;
+}
+
 static int navi10_setup_od_limits(struct smu_context *smu) {
 	struct smu_11_0_overdrive_table *overdrive_table = NULL;
 	struct smu_11_0_powerplay_table *powerplay_table = NULL;
@@ -1960,16 +1984,40 @@ static int navi10_set_default_od_settings(struct smu_context *smu, bool initiali
 	if (ret)
 		return ret;
 
+	od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table;
 	if (initialize) {
 		ret = navi10_setup_od_limits(smu);
 		if (ret) {
 			pr_err("Failed to retrieve board OD limits\n");
 			return ret;
 		}
+		if (od_table) {
+			if (!od_table->GfxclkVolt1) {
+				ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
+										&od_table->GfxclkVolt1,
+										od_table->GfxclkFreq1);
+				if (ret)
+					od_table->GfxclkVolt1 = 0;
+			}
+
+			if (!od_table->GfxclkVolt2) {
+				ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
+										&od_table->GfxclkVolt2,
+										od_table->GfxclkFreq2);
+				if (ret)
+					od_table->GfxclkVolt2 = 0;
+			}
 
+			if (!od_table->GfxclkVolt3) {
+				ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
+										&od_table->GfxclkVolt3,
+										od_table->GfxclkFreq3);
+				if (ret)
+					od_table->GfxclkVolt3 = 0;
+			}
+		}
 	}
 
-	od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table;
 	if (od_table) {
 		navi10_dump_od_table(od_table);
 	}
commit 93c5f1f66c6ad4a3b180c1644f74e1b3b4be7864
Author: Matt Coffin <mcoffin13 at gmail.com>
Date:   Sat Jan 25 13:04:05 2020 -0500

    drm/amdgpu/smu_v11_0: Correct behavior of restoring default tables (v2)
    
    Previously, the syfs functionality for restoring the default powerplay
    table was sourcing it's information from the currently-staged powerplay
    table.
    
    This patch adds a step to cache the first overdrive table that we see on
    boot, so that it can be used later to "restore" the powerplay table
    
    v2: sqaush my original with Matt's fix
    
    Bug: https://gitlab.freedesktop.org/drm/amd/issues/1020
    Signed-off-by: Matt Coffin <mcoffin13 at gmail.com>
    Reviewed-by: Alex Deucher <alexander.deucher at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
    Cc: stable at vger.kernel.org # 5.5.x

diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 3237eb1ff708..97b6714e83e6 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -273,6 +273,7 @@ struct smu_table_context
 	uint8_t                         thermal_controller_type;
 
 	void				*overdrive_table;
+	void                            *boot_overdrive_table;
 };
 
 struct smu_dpm_context {
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index a0a6dc2c190a..272e58510d0d 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -2065,6 +2065,13 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL
 			return ret;
 		od_table->UclkFmax = input[1];
 		break;
+	case PP_OD_RESTORE_DEFAULT_TABLE:
+		if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) {
+			pr_err("Overdrive table was not initialized!\n");
+			return -EINVAL;
+		}
+		memcpy(table_context->overdrive_table, table_context->boot_overdrive_table, sizeof(OverDriveTable_t));
+		break;
 	case PP_OD_COMMIT_DPM_TABLE:
 		navi10_dump_od_table(od_table);
 		ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 02f8c9cb89d9..0dc49479a7eb 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -1882,6 +1882,12 @@ int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize,
 			pr_err("Failed to export overdrive table!\n");
 			return ret;
 		}
+		if (!table_context->boot_overdrive_table) {
+			table_context->boot_overdrive_table = kmemdup(table_context->overdrive_table, overdrive_table_size, GFP_KERNEL);
+			if (!table_context->boot_overdrive_table) {
+				return -ENOMEM;
+			}
+		}
 	}
 	ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true);
 	if (ret) {
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 38febd5ca4da..4ad8d6c14ee5 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -1706,22 +1706,11 @@ static int vega20_set_default_od_settings(struct smu_context *smu,
 	struct smu_table_context *table_context = &smu->smu_table;
 	int ret;
 
-	if (initialize) {
-		if (table_context->overdrive_table)
-			return -EINVAL;
-
-		table_context->overdrive_table = kzalloc(sizeof(OverDriveTable_t), GFP_KERNEL);
-
-		if (!table_context->overdrive_table)
-			return -ENOMEM;
-
-		ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0,
-				       table_context->overdrive_table, false);
-		if (ret) {
-			pr_err("Failed to export over drive table!\n");
-			return ret;
-		}
+	ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t));
+	if (ret)
+		return ret;
 
+	if (initialize) {
 		ret = vega20_set_default_od8_setttings(smu);
 		if (ret)
 			return ret;
@@ -2778,12 +2767,11 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu,
 		break;
 
 	case PP_OD_RESTORE_DEFAULT_TABLE:
-		ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false);
-		if (ret) {
-			pr_err("Failed to export over drive table!\n");
-			return ret;
+		if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) {
+			pr_err("Overdrive table was not initialized!\n");
+			return -EINVAL;
 		}
-
+		memcpy(table_context->overdrive_table, table_context->boot_overdrive_table, sizeof(OverDriveTable_t));
 		break;
 
 	case PP_OD_COMMIT_DPM_TABLE:
commit ee23a518fdc2c1dd1aaaf3a2c7ffdd6c83b396ec
Author: Alex Deucher <alexander.deucher at amd.com>
Date:   Sat Jan 25 11:27:06 2020 -0500

    drm/amdgpu/navi10: add OD_RANGE for navi overclocking
    
    So users can see the range of valid values.
    
    Bug: https://gitlab.freedesktop.org/drm/amd/issues/1020
    Reviewed-by: Evan Quan <evan.quan at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
    Cc: stable at vger.kernel.org # 5.5.x

diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 7c3629f64419..a0a6dc2c190a 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -739,6 +739,15 @@ static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_tabl
 	return od_table->cap[feature];
 }
 
+static void navi10_od_setting_get_range(struct smu_11_0_overdrive_table *od_table,
+					enum SMU_11_0_ODSETTING_ID setting,
+					uint32_t *min, uint32_t *max)
+{
+	if (min)
+		*min = od_table->min[setting];
+	if (max)
+		*max = od_table->max[setting];
+}
 
 static int navi10_print_clk_levels(struct smu_context *smu,
 			enum smu_clk_type clk_type, char *buf)
@@ -757,6 +766,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
 	OverDriveTable_t *od_table =
 		(OverDriveTable_t *)table_context->overdrive_table;
 	struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
+	uint32_t min_value, max_value;
 
 	switch (clk_type) {
 	case SMU_GFXCLK:
@@ -869,6 +879,55 @@ static int navi10_print_clk_levels(struct smu_context *smu,
 			}
 			size += sprintf(buf + size, "%d: %uMHz @ %umV\n", i, curve_settings[0], curve_settings[1] / NAVI10_VOLTAGE_SCALE);
 		}
+		break;
+	case SMU_OD_RANGE:
+		if (!smu->od_enabled || !od_table || !od_settings)
+			break;
+		size = sprintf(buf, "%s:\n", "OD_RANGE");
+
+		if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) {
+			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
+						    &min_value, NULL);
+			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX,
+						    NULL, &max_value);
+			size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+					min_value, max_value);
+		}
+
+		if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) {
+			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX,
+						    &min_value, &max_value);
+			size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
+					min_value, max_value);
+		}
+
+		if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) {
+			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
+						    &min_value, &max_value);
+			size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
+					min_value, max_value);
+			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1,
+						    &min_value, &max_value);
+			size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
+					min_value, max_value);
+			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2,
+						    &min_value, &max_value);
+			size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
+					min_value, max_value);
+			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2,
+						    &min_value, &max_value);
+			size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
+					min_value, max_value);
+			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3,
+						    &min_value, &max_value);
+			size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
+					min_value, max_value);
+			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3,
+						    &min_value, &max_value);
+			size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
+					min_value, max_value);
+		}
+
 		break;
 	default:
 		break;
commit 45826e9c4e9e952db43053f4fbed58ec602a410f
Author: Alex Deucher <alexander.deucher at amd.com>
Date:   Sat Jan 25 11:51:41 2020 -0500

    drm/amdgpu/navi: fix index for OD MCLK
    
    You can only adjust the max mclk, not the min.
    
    Bug: https://gitlab.freedesktop.org/drm/amd/issues/1020
    Reviewed-by: Evan Quan <evan.quan at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
    Cc: stable at vger.kernel.org # 5.5.x

diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index e59cd2c74dc9..7c3629f64419 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -845,7 +845,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
 		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX))
 			break;
 		size += sprintf(buf + size, "OD_MCLK:\n");
-		size += sprintf(buf + size, "0: %uMHz\n", od_table->UclkFmax);
+		size += sprintf(buf + size, "1: %uMHz\n", od_table->UclkFmax);
 		break;
 	case SMU_OD_VDDC_CURVE:
 		if (!smu->od_enabled || !od_table || !od_settings)
commit a30a8c2f884c2835383cc5cce134fafbcaa62c1f
Author: Bhawanpreet Lakha <Bhawanpreet.Lakha at amd.com>
Date:   Fri Dec 6 13:16:08 2019 -0500

    drm/amd/display: Fix HW/SW state mismatch
    
    [Why]
    When we disable a connector we don't explicitly remove it from the module so the
    display is still cached(SW) in the hdcp_module.
    
    SST: no issues because we can only have 1 display per link
    
    MST: We have x displays per link, now if we disable 1 we don't remove it from the
    module so the module has x display cached(SW).
    
    If we try to enable HDCP, psp verification will fail because we are reporting x
    displays while the HW only has x-1 display enabled
    
    [How]
    Check the callback for when we disable stream and call remove display.
    
    Signed-off-by: Bhawanpreet Lakha <Bhawanpreet.Lakha at amd.com>
    Reviewed-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index ae329335dfcc..0acd3409dd6c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -135,6 +135,20 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
 	mutex_unlock(&hdcp_w->mutex);
 }
 
+static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
+			 unsigned int link_index,
+			 struct amdgpu_dm_connector *aconnector)
+{
+	struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+
+	mutex_lock(&hdcp_w->mutex);
+	hdcp_w->aconnector = aconnector;
+
+	mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
+
+	process_output(hdcp_w);
+	mutex_unlock(&hdcp_w->mutex);
+}
 void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
 {
 	struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
@@ -303,6 +317,11 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
 	memset(link, 0, sizeof(*link));
 
 	display->index = aconnector->base.index;
+
+	if (config->dpms_off) {
+		hdcp_remove_display(hdcp_work, link_index, aconnector);
+		return;
+	}
 	display->state = MOD_HDCP_DISPLAY_ACTIVE;
 
 	if (aconnector->dc_sink != NULL)
commit 47eed65178e8fa626097648eb90cb6b31952efbc
Author: Mikita Lipski <mikita.lipski at amd.com>
Date:   Fri Jan 31 09:51:23 2020 -0500

    drm/amd/display: Fix a typo when computing dsc configuration
    
    [why]
    Remove a backslash symbol accidentally left in increase bpp function
    when computing mst dsc configuration.
    
    Signed-off-by: Mikita Lipski <mikita.lipski at amd.com>
    Reviewed-by: Zhan Liu <zhan.liu at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 96b391e4b3e7..5672f7765919 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -632,7 +632,7 @@ static void increase_dsc_bpp(struct drm_atomic_state *state,
 			if (drm_dp_atomic_find_vcpi_slots(state,
 							  params[next_index].port->mgr,
 							  params[next_index].port,
-							  vars[next_index].pbn,\
+							  vars[next_index].pbn,
 							  dm_mst_get_pbn_divider(dc_link)) < 0)
 				return;
 			if (!drm_dp_mst_atomic_check(state)) {
commit 1cf8c930b378016846c88ef0f1444248033326ec
Author: Evan Quan <evan.quan at amd.com>
Date:   Thu Jan 30 16:46:38 2020 +0800

    drm/amd/powerplay: fix navi10 system intermittent reboot issue V2
    
    This workaround is needed only for Navi10 12 Gbps SKUs.
    
    V2: added SMU firmware version guard
    
    Signed-off-by: Evan Quan <evan.quan at amd.com>
    Reviewed-by: Feifei Xu <Feifei.Xu at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
    Cc: stable at vger.kernel.org

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 99469479e277..99ad4ddbe12f 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -21,6 +21,7 @@
  */
 
 #include <linux/firmware.h>
+#include <linux/pci.h>
 
 #include "pp_debug.h"
 #include "amdgpu.h"
@@ -1137,6 +1138,23 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
 		ret = smu_system_features_control(smu, true);
 		if (ret)
 			return ret;
+
+		if (adev->asic_type == CHIP_NAVI10) {
+			if ((adev->pdev->device == 0x731f && (adev->pdev->revision == 0xc2 ||
+							      adev->pdev->revision == 0xc3 ||
+							      adev->pdev->revision == 0xca ||
+							      adev->pdev->revision == 0xcb)) ||
+			    (adev->pdev->device == 0x66af && (adev->pdev->revision == 0xf3 ||
+							      adev->pdev->revision == 0xf4 ||
+							      adev->pdev->revision == 0xf5 ||
+							      adev->pdev->revision == 0xf6))) {
+				ret = smu_disable_umc_cdr_12gbps_workaround(smu);
+				if (ret) {
+					pr_err("Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
+					return ret;
+				}
+			}
+		}
 	}
 	if (adev->asic_type != CHIP_ARCTURUS) {
 		ret = smu_notify_display_change(smu);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index b0591a8dda41..3237eb1ff708 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -565,6 +565,7 @@ struct pptable_funcs {
 	int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max);
 	int (*override_pcie_parameters)(struct smu_context *smu);
 	uint32_t (*get_pptable_power_limit)(struct smu_context *smu);
+	int (*disable_umc_cdr_12gbps_workaround)(struct smu_context *smu);
 };
 
 int smu_load_microcode(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
index d8c9b7f91fcc..a5b4df146713 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
@@ -170,6 +170,8 @@
 	__SMU_DUMMY_MAP(SetSoftMinJpeg),              \
 	__SMU_DUMMY_MAP(SetHardMinFclkByFreq),        \
 	__SMU_DUMMY_MAP(DFCstateControl), \
+	__SMU_DUMMY_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE), \
+	__SMU_DUMMY_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE), \
 
 #undef __SMU_DUMMY_MAP
 #define __SMU_DUMMY_MAP(type)	SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h
index 373861ddccd0..406bfd187ce8 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h
@@ -120,7 +120,10 @@
 #define PPSMC_MSG_GetVoltageByDpmOverdrive       0x45
 #define PPSMC_MSG_BacoAudioD3PME                 0x48
 
-#define PPSMC_Message_Count                      0x49
+#define PPSMC_MSG_DALDisableDummyPstateChange    0x49
+#define PPSMC_MSG_DALEnableDummyPstateChange     0x4A
+
+#define PPSMC_Message_Count                      0x4B
 
 typedef uint32_t PPSMC_Result;
 typedef uint32_t PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index f1b27fc20c19..e59cd2c74dc9 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -119,6 +119,8 @@ static struct smu_11_0_cmn2aisc_mapping navi10_message_map[SMU_MSG_MAX_COUNT] =
 	MSG_MAP(PowerDownJpeg,		PPSMC_MSG_PowerDownJpeg),
 	MSG_MAP(BacoAudioD3PME,		PPSMC_MSG_BacoAudioD3PME),
 	MSG_MAP(ArmD3,			PPSMC_MSG_ArmD3),
+	MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE,PPSMC_MSG_DALDisableDummyPstateChange),
+	MSG_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE,	PPSMC_MSG_DALEnableDummyPstateChange),
 };
 
 static struct smu_11_0_cmn2aisc_mapping navi10_clk_map[SMU_CLK_COUNT] = {
@@ -2093,6 +2095,61 @@ static int navi10_run_btc(struct smu_context *smu)
 	return ret;
 }
 
+static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable)
+{
+	int result = 0;
+
+	if (!enable)
+		result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE);
+	else
+		result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE);
+
+	return result;
+}
+
+static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu)
+{
+	uint32_t uclk_count, uclk_min, uclk_max;
+	uint32_t smu_version;
+	int ret = 0;
+
+	ret = smu_get_smc_version(smu, NULL, &smu_version);
+	if (ret)
+		return ret;
+
+	/* This workaround is available only for 42.50 or later SMC firmwares */
+	if (smu_version < 0x2A3200)
+		return 0;
+
+	ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_count);
+	if (ret)
+		return ret;
+
+	ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)0, &uclk_min);
+	if (ret)
+		return ret;
+
+	ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)(uclk_count - 1), &uclk_max);
+	if (ret)
+		return ret;
+
+	/* Force UCLK out of the highest DPM */
+	ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, uclk_min);
+	if (ret)
+		return ret;
+
+	/* Revert the UCLK Hardmax */
+	ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, uclk_max);
+	if (ret)
+		return ret;
+
+	/*
+	 * In this case, SMU already disabled dummy pstate during enablement
+	 * of UCLK DPM, we have to re-enabled it.
+	 * */
+	return navi10_dummy_pstate_control(smu, true);
+}
+
 static const struct pptable_funcs navi10_ppt_funcs = {
 	.tables_init = navi10_tables_init,
 	.alloc_dpm_context = navi10_allocate_dpm_context,
@@ -2187,6 +2244,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
 	.od_edit_dpm_table = navi10_od_edit_dpm_table,
 	.get_pptable_power_limit = navi10_get_pptable_power_limit,
 	.run_btc = navi10_run_btc,
+	.disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround,
 };
 
 void navi10_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h
index 783319ec8bf9..7bd200ffcda8 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
@@ -207,4 +207,7 @@ int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg);
 #define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) \
 		((smu)->ppt_funcs->update_pcie_parameters ? (smu)->ppt_funcs->update_pcie_parameters((smu), (pcie_gen_cap), (pcie_width_cap)) : 0)
 
+#define smu_disable_umc_cdr_12gbps_workaround(smu) \
+	((smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround ? (smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround((smu)) : 0)
+
 #endif
commit f38abc15d157b7b31fa7f651dc8bf92858c963f8
Author: Yong Zhao <Yong.Zhao at amd.com>
Date:   Wed Jan 29 19:55:47 2020 -0500

    drm/amdkfd: Fix a bug in SDMA RLC queue counting under HWS mode
    
    The sdma_queue_count increment should be done before
    execute_queues_cpsch(), which calls pm_calc_rlib_size() where
    sdma_queue_count is used to calculate whether over_subscription is
    triggered.
    
    With the previous code, when a SDMA queue is created,
    compute_queue_count in pm_calc_rlib_size() is one more than the
    actual compute queue number, because the queue_count has been
    incremented while sdma_queue_count has not. This patch fixes that.
    
    Signed-off-by: Yong Zhao <Yong.Zhao at amd.com>
    Reviewed-by: Felix Kuehling <Felix.Kuehling at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 2870553a2ce0..80d22bf702e8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1237,16 +1237,18 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
 
 	list_add(&q->list, &qpd->queues_list);
 	qpd->queue_count++;
+
+	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+		dqm->sdma_queue_count++;
+	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
+		dqm->xgmi_sdma_queue_count++;
+
 	if (q->properties.is_active) {
 		dqm->queue_count++;
 		retval = execute_queues_cpsch(dqm,
 				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
 	}
 
-	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
-		dqm->sdma_queue_count++;
-	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
-		dqm->xgmi_sdma_queue_count++;
 	/*
 	 * Unconditionally increment this counter, regardless of the queue's
 	 * type or whether the queue is active.
commit c6a228be7f440ad5c341b3039f6033d4a8f070a4
Author: Nicholas Kazlauskas <nicholas.kazlauskas at amd.com>
Date:   Thu Jan 30 13:29:05 2020 -0500

    drm/amd/display: Only enable cursor on pipes that need it
    
    [Why]
    In current code we're essentially drawing the cursor on every pipe
    that contains it. This only works when the planes have the same
    scaling for src to dest rect, otherwise we'll get "double cursor" where
    one cursor is incorrectly filtered and offset from the real position.
    
    [How]
    Without dedicated cursor planes on DCN we require at least one pipe
    that matches the scaling of the current timing.
    
    This is an optimization and workaround for the most common case where
    the top-most plane is not scaled but the bottom-most plane is scaled.
    
    Whenever a pipe has a parent pipe in the blending tree whose recout
    fully contains the current pipe we can disable the pipe.
    
    This only applies when the pipe is actually visible of course.
    
    Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas at amd.com>
    Reviewed-by: Harry Wentland <harry.wentland at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index f2127afb37b2..1008ac8a0f2a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2911,6 +2911,33 @@ void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
 	hubbub->funcs->update_dchub(hubbub, dh_data);
 }
 
+static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
+{
+	struct pipe_ctx *test_pipe;
+	const struct rect *r1 = &pipe_ctx->plane_res.scl_data.recout, *r2;
+	int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b;
+
+	/**
+	 * Disable the cursor if there's another pipe above this with a
+	 * plane that contains this pipe's viewport to prevent double cursor
+	 * and incorrect scaling artifacts.
+	 */
+	for (test_pipe = pipe_ctx->top_pipe; test_pipe;
+	     test_pipe = test_pipe->top_pipe) {
+		if (!test_pipe->plane_state->visible)
+			continue;
+
+		r2 = &test_pipe->plane_res.scl_data.recout;
+		r2_r = r2->x + r2->width;
+		r2_b = r2->y + r2->height;
+
+		if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b)
+			return true;
+	}
+
+	return false;
+}
+
 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
 {
 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
@@ -2956,6 +2983,9 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
 			== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
 		pos_cpy.enable = false;
 
+	if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
+		pos_cpy.enable = false;
+
 	// Swap axis and mirror horizontally
 	if (param.rotation == ROTATION_ANGLE_90) {
 		uint32_t temp_x = pos_cpy.x;
commit 137c4ba7163ad9d5696b9fde78b1c0898a9c115b
Author: Ben Skeggs <bskeggs at redhat.com>
Date:   Mon Feb 3 03:37:07 2020 -0500

    drm/nouveau/kms/gv100-: avoid sending a core update until the first modeset
    
    The OR routing logic in NVKM does not expect to receive supervisor
    interrupts until the DD has provided consistent information on the
    ORs it's using and the EVO/NVD assembly state to match.
    
    The combination of changing window ownership + core channel update
    during display init triggered a situation where we'd disconnect an
    OR from the pad it was meant to still be driving on some systems.
    
    Signed-off-by: Ben Skeggs <bskeggs at redhat.com>

diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index f414171e40b4..3b36dc8d36b2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -90,7 +90,7 @@ corec37d_init(struct nv50_core *core)
 {
 	const u32 windows = 8; /*XXX*/
 	u32 *push, i;
-	if ((push = evo_wait(&core->chan, 2 + 5 * windows + 2))) {
+	if ((push = evo_wait(&core->chan, 2 + 5 * windows))) {
 		evo_mthd(push, 0x0208, 1);
 		evo_data(push, core->chan.sync.handle);
 		for (i = 0; i < windows; i++) {
@@ -100,8 +100,6 @@ corec37d_init(struct nv50_core *core)
 			evo_mthd(push, 0x1010 + (i * 0x080), 1);
 			evo_data(push, 0x00127fff);
 		}
-		evo_mthd(push, 0x0200, 1);
-		evo_data(push, 0x00000001);
 		evo_kick(push, &core->chan);
 		core->assign_windows = true;
 	}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
index b540606ac052..147adcd60937 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
@@ -27,7 +27,7 @@ corec57d_init(struct nv50_core *core)
 {
 	const u32 windows = 8; /*XXX*/
 	u32 *push, i;
-	if ((push = evo_wait(&core->chan, 2 + 5 * windows + 2))) {
+	if ((push = evo_wait(&core->chan, 2 + 5 * windows))) {
 		evo_mthd(push, 0x0208, 1);
 		evo_data(push, core->chan.sync.handle);
 		for (i = 0; i < windows; i++) {
@@ -37,8 +37,6 @@ corec57d_init(struct nv50_core *core)
 			evo_mthd(push, 0x1010 + (i * 0x080), 1);
 			evo_data(push, 0x00117fff);
 		}
-		evo_mthd(push, 0x0200, 1);
-		evo_data(push, 0x00000001);
 		evo_kick(push, &core->chan);
 		core->assign_windows = true;
 	}
commit 5bb88d07948b6779cb783ec0f08b4c1474d592dd
Author: Ben Skeggs <bskeggs at redhat.com>
Date:   Mon Feb 3 03:36:30 2020 -0500

    drm/nouveau/kms/gv100-: move window ownership setup into modesetting path
    
    For various complicated reasons, we need to avoid sending a core update
    method during display init.  Something, which we've been required to do
    on GV100 and up because we've been assigning windows to heads there and
    the HW is rather picky about when that's allowed.
    
    This moves window assignment into the modesetting path at a point where
    it's much safer to send our first update methods to NVDisplay.
    
    Signed-off-by: Ben Skeggs <bskeggs at redhat.com>

diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h
index df8336b593f7..ff94f3f6f264 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.h
@@ -6,6 +6,7 @@
 struct nv50_core {
 	const struct nv50_core_func *func;
 	struct nv50_dmac chan;
+	bool assign_windows;
 };
 
 int nv50_core_new(struct nouveau_drm *, struct nv50_core **);
@@ -18,6 +19,10 @@ struct nv50_core_func {
 			      struct nvif_device *);
 	void (*update)(struct nv50_core *, u32 *interlock, bool ntfy);
 
+	struct {
+		void (*owner)(struct nv50_core *);
+	} wndw;
+
 	const struct nv50_head_func *head;
 	const struct nv50_outp_func {
 		void (*ctrl)(struct nv50_core *, int or, u32 ctrl,
@@ -48,6 +53,7 @@ int core917d_new(struct nouveau_drm *, s32, struct nv50_core **);
 int corec37d_new(struct nouveau_drm *, s32, struct nv50_core **);
 int corec37d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
 void corec37d_update(struct nv50_core *, u32 *, bool);
+void corec37d_wndw_owner(struct nv50_core *);
 extern const struct nv50_outp_func sorc37d;
 
 int corec57d_new(struct nouveau_drm *, s32, struct nv50_core **);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index 40d9b654ab8c..f414171e40b4 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -24,6 +24,20 @@
 
 #include <nouveau_bo.h>
 
+void
+corec37d_wndw_owner(struct nv50_core *core)
+{
+	const u32 windows = 8; /*XXX*/
+	u32 *push, i;
+	if ((push = evo_wait(&core->chan, 2 * windows))) {
+		for (i = 0; i < windows; i++) {
+			evo_mthd(push, 0x1000 + (i * 0x080), 1);
+			evo_data(push, i >> 1);
+		}
+		evo_kick(push, &core->chan);
+	}
+}
+
 void
 corec37d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
 {
@@ -76,12 +90,11 @@ corec37d_init(struct nv50_core *core)
 {
 	const u32 windows = 8; /*XXX*/
 	u32 *push, i;
-	if ((push = evo_wait(&core->chan, 2 + 6 * windows + 2))) {
+	if ((push = evo_wait(&core->chan, 2 + 5 * windows + 2))) {
 		evo_mthd(push, 0x0208, 1);
 		evo_data(push, core->chan.sync.handle);
 		for (i = 0; i < windows; i++) {
-			evo_mthd(push, 0x1000 + (i * 0x080), 3);
-			evo_data(push, i >> 1);
+			evo_mthd(push, 0x1004 + (i * 0x080), 2);
 			evo_data(push, 0x0000001f);
 			evo_data(push, 0x00000000);
 			evo_mthd(push, 0x1010 + (i * 0x080), 1);
@@ -90,6 +103,7 @@ corec37d_init(struct nv50_core *core)
 		evo_mthd(push, 0x0200, 1);
 		evo_data(push, 0x00000001);
 		evo_kick(push, &core->chan);
+		core->assign_windows = true;
 	}
 }
 
@@ -99,6 +113,7 @@ corec37d = {
 	.ntfy_init = corec37d_ntfy_init,
 	.ntfy_wait_done = corec37d_ntfy_wait_done,
 	.update = corec37d_update,
+	.wndw.owner = corec37d_wndw_owner,
 	.head = &headc37d,
 	.sor = &sorc37d,
 };
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
index b606d68cda10..b540606ac052 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
@@ -27,12 +27,11 @@ corec57d_init(struct nv50_core *core)
 {
 	const u32 windows = 8; /*XXX*/
 	u32 *push, i;
-	if ((push = evo_wait(&core->chan, 2 + 6 * windows + 2))) {
+	if ((push = evo_wait(&core->chan, 2 + 5 * windows + 2))) {
 		evo_mthd(push, 0x0208, 1);
 		evo_data(push, core->chan.sync.handle);
 		for (i = 0; i < windows; i++) {
-			evo_mthd(push, 0x1000 + (i * 0x080), 3);
-			evo_data(push, i >> 1);
+			evo_mthd(push, 0x1004 + (i * 0x080), 2);
 			evo_data(push, 0x0000000f);
 			evo_data(push, 0x00000000);
 			evo_mthd(push, 0x1010 + (i * 0x080), 1);
@@ -41,6 +40,7 @@ corec57d_init(struct nv50_core *core)
 		evo_mthd(push, 0x0200, 1);
 		evo_data(push, 0x00000001);
 		evo_kick(push, &core->chan);
+		core->assign_windows = true;
 	}
 }
 
@@ -50,6 +50,7 @@ corec57d = {
 	.ntfy_init = corec37d_ntfy_init,
 	.ntfy_wait_done = corec37d_ntfy_wait_done,
 	.update = corec37d_update,
+	.wndw.owner = corec37d_wndw_owner,
 	.head = &headc57d,
 	.sor = &sorc37d,
 };
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 2f123082c85d..a3dc2ba19fb2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1933,6 +1933,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nv50_disp *disp = nv50_disp(dev);
 	struct nv50_atom *atom = nv50_atom(state);
+	struct nv50_core *core = disp->core;
 	struct nv50_outp_atom *outp, *outt;
 	u32 interlock[NV50_DISP_INTERLOCK__SIZE] = {};
 	int i;
@@ -2051,6 +2052,21 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
 		}
 	}
 
+	/* Update window->head assignment.
+	 *
+	 * This has to happen in an update that's not interlocked with
+	 * any window channels to avoid hitting HW error checks.
+	 *
+	 *TODO: Proper handling of window ownership (Turing apparently
+	 *      supports non-fixed mappings).
+	 */
+	if (core->assign_windows) {
+		core->func->wndw.owner(core);
+		core->func->update(core, interlock, false);
+		core->assign_windows = false;
+		interlock[NV50_DISP_INTERLOCK_CORE] = 0;
+	}
+
 	/* Update plane(s). */
 	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
 		struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
commit 58ae5284f663f0856a13bc6caad93a0fe49d7cb4
Author: Ben Skeggs <bskeggs at redhat.com>
Date:   Mon Feb 3 01:58:45 2020 -0500

    drm/nouveau/disp/gv100-: halt NV_PDISP_FE_RM_INTR_STAT_CTRL_DISP_ERROR storms
    
    Signed-off-by: Ben Skeggs <bskeggs at redhat.com>

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
index 3aa2cc3af1e2..c1032527f791 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
@@ -155,6 +155,12 @@ gv100_disp_intr_ctrl_disp(struct nv50_disp *disp)
 	if (stat & 0x00000008)
 		stat &= ~0x00000008;
 
+	if (stat & 0x00000080) {
+		u32 error = nvkm_mask(device, 0x611848, 0x00000000, 0x00000000);
+		nvkm_warn(subdev, "error %08x\n", error);
+		stat &= ~0x00000080;
+	}
+
 	if (stat & 0x00000100) {
 		unsigned long wndws = nvkm_rd32(device, 0x611858);
 		unsigned long other = nvkm_rd32(device, 0x61185c);
commit c472a0b0a1fd1688157b4ad6efc1c3fb8e571a53
Author: Thierry Reding <treding at nvidia.com>
Date:   Fri Jan 31 17:59:10 2020 +0100

    drm/tegra: sor: Initialize runtime PM before use
    
    Commit fd67e9c6ed5a ("drm/tegra: Do not implement runtime PM") replaced
    the generic runtime PM usage by a host1x bus-specific implementation in
    order to work around some assumptions baked into runtime PM that are in
    conflict with the requirements in the Tegra DRM driver.
    
    Unfortunately the new runtime PM callbacks are not setup yet at the time
    when the SOR driver first needs to resume the device to register the SOR
    pad clock, and accesses to register will cause the system to hang.
    
    Note that this only happens on Tegra124 and Tegra210 because those are
    the only SoCs where the SOR pad clock is registered from the SOR driver.
    Later generations use a SOR pad clock provided by the BPMP.
    
    Fix this by moving the registration of the SOR pad clock after the
    host1x client has been registered. That's somewhat suboptimal because
    this could potentially, though it's very unlikely, cause the Tegra DRM
    to be probed if the SOR happens to be the last subdevice to register,
    only to be immediately removed again if the SOR pad output clock fails
    to register. That's just a minor annoyance, though, and doesn't justify
    implementing a workaround.
    
    Fixes: fd67e9c6ed5a ("drm/tegra: Do not implement runtime PM")
    Signed-off-by: Thierry Reding <treding at nvidia.com>

diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index e79dd65c1b54..a9222841862e 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -3914,6 +3914,17 @@ static int tegra_sor_probe(struct platform_device *pdev)
 	platform_set_drvdata(pdev, sor);
 	pm_runtime_enable(&pdev->dev);
 
+	INIT_LIST_HEAD(&sor->client.list);
+	sor->client.ops = &sor_client_ops;
+	sor->client.dev = &pdev->dev;
+
+	err = host1x_client_register(&sor->client);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+			err);
+		goto rpm_disable;
+	}
+
 	/*
 	 * On Tegra210 and earlier, provide our own implementation for the
 	 * pad output clock.
@@ -3925,13 +3936,13 @@ static int tegra_sor_probe(struct platform_device *pdev)
 				      sor->index);
 		if (!name) {
 			err = -ENOMEM;
-			goto rpm_disable;
+			goto unregister;
 		}
 
 		err = host1x_client_resume(&sor->client);
 		if (err < 0) {
 			dev_err(sor->dev, "failed to resume: %d\n", err);
-			goto rpm_disable;
+			goto unregister;
 		}
 
 		sor->clk_pad = tegra_clk_sor_pad_register(sor, name);
@@ -3940,24 +3951,15 @@ static int tegra_sor_probe(struct platform_device *pdev)
 
 	if (IS_ERR(sor->clk_pad)) {
 		err = PTR_ERR(sor->clk_pad);
-		dev_err(&pdev->dev, "failed to register SOR pad clock: %d\n",
+		dev_err(sor->dev, "failed to register SOR pad clock: %d\n",
 			err);
-		goto rpm_disable;
-	}
-
-	INIT_LIST_HEAD(&sor->client.list);
-	sor->client.ops = &sor_client_ops;
-	sor->client.dev = &pdev->dev;
-
-	err = host1x_client_register(&sor->client);
-	if (err < 0) {
-		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
-			err);
-		goto rpm_disable;
+		goto unregister;
 	}
 
 	return 0;
 
+unregister:
+	host1x_client_unregister(&sor->client);
 rpm_disable:
 	pm_runtime_disable(&pdev->dev);
 remove:
commit ad2139cb80a7a9afbfe428d0448d351a84e22ee6
Author: Thierry Reding <treding at nvidia.com>
Date:   Fri Jan 31 17:59:09 2020 +0100

    drm/tegra: sor: Disable runtime PM on probe failure
    
    If the driver fails to probe, make sure to disable runtime PM again.
    
    While at it, make the cleanup code in ->remove() symmetric.
    
    Signed-off-by: Thierry Reding <treding at nvidia.com>

diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 30c96b15d7a3..e79dd65c1b54 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -3925,13 +3925,13 @@ static int tegra_sor_probe(struct platform_device *pdev)
 				      sor->index);
 		if (!name) {
 			err = -ENOMEM;
-			goto remove;
+			goto rpm_disable;
 		}
 
 		err = host1x_client_resume(&sor->client);
 		if (err < 0) {
 			dev_err(sor->dev, "failed to resume: %d\n", err);
-			goto remove;
+			goto rpm_disable;
 		}
 
 		sor->clk_pad = tegra_clk_sor_pad_register(sor, name);
@@ -3942,7 +3942,7 @@ static int tegra_sor_probe(struct platform_device *pdev)
 		err = PTR_ERR(sor->clk_pad);
 		dev_err(&pdev->dev, "failed to register SOR pad clock: %d\n",
 			err);
-		goto remove;
+		goto rpm_disable;
 	}
 
 	INIT_LIST_HEAD(&sor->client.list);
@@ -3953,11 +3953,13 @@ static int tegra_sor_probe(struct platform_device *pdev)
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
 			err);
-		goto remove;
+		goto rpm_disable;
 	}
 
 	return 0;
 
+rpm_disable:
+	pm_runtime_disable(&pdev->dev);
 remove:
 	if (sor->ops && sor->ops->remove)
 		sor->ops->remove(sor);
@@ -3971,8 +3973,6 @@ static int tegra_sor_remove(struct platform_device *pdev)
 	struct tegra_sor *sor = platform_get_drvdata(pdev);
 	int err;
 
-	pm_runtime_disable(&pdev->dev);
-
 	err = host1x_client_unregister(&sor->client);
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
@@ -3980,6 +3980,8 @@ static int tegra_sor_remove(struct platform_device *pdev)
 		return err;
 	}
 
+	pm_runtime_disable(&pdev->dev);
+
 	if (sor->ops && sor->ops->remove) {
 		err = sor->ops->remove(sor);
 		if (err < 0)
commit a5127a2dbe95dd72b6a21c98dee0857511f30357
Author: Thierry Reding <treding at nvidia.com>
Date:   Fri Jan 31 17:59:08 2020 +0100

    drm/tegra: sor: Suspend on clock registration failure
    
    Make sure the SOR module is suspenden after we fail to register the SOR
    pad output clock.
    
    Signed-off-by: Thierry Reding <treding at nvidia.com>

diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index f884185c5e9f..30c96b15d7a3 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -3921,15 +3921,16 @@ static int tegra_sor_probe(struct platform_device *pdev)
 	if (!sor->clk_pad) {
 		char *name;
 
-		err = host1x_client_resume(&sor->client);
-		if (err < 0) {
-			dev_err(sor->dev, "failed to resume: %d\n", err);
+		name = devm_kasprintf(sor->dev, GFP_KERNEL, "sor%u_pad_clkout",
+				      sor->index);
+		if (!name) {
+			err = -ENOMEM;
 			goto remove;
 		}
 
-		name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "sor%u_pad_clkout", sor->index);
-		if (!name) {
-			err = -ENOMEM;
+		err = host1x_client_resume(&sor->client);
+		if (err < 0) {
+			dev_err(sor->dev, "failed to resume: %d\n", err);
 			goto remove;
 		}
 
commit e0d5322c29ef9f19a445c4960e9f89b4a85571bc
Author: Alex Deucher <alexander.deucher at amd.com>
Date:   Sat Jan 25 11:30:25 2020 -0500

    drm/amdgpu/navi10: add mclk to navi10_get_clock_by_type_with_latency
    
    Doesn't seem to be used, but add it just in case.
    
    Reviewed-by: Matt Coffin <mcoffin13 at gmail.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 93c66c69ca28..f1b27fc20c19 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -949,6 +949,8 @@ static int navi10_get_clock_by_type_with_latency(struct smu_context *smu,
 	case SMU_GFXCLK:
 	case SMU_DCEFCLK:
 	case SMU_SOCCLK:
+	case SMU_MCLK:
+	case SMU_UCLK:
 		ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
 		if (ret)
 			return ret;
commit 968162204adf5f2292214356978357e9dcd35de1
Author: Nathan Chancellor <natechancellor at gmail.com>
Date:   Wed Jan 29 18:24:35 2020 -0700

    drm/amdgpu: Fix implicit enum conversion in gfx_v9_4_ras_error_inject
    
    Clang warns:
    
    ../drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c:967:35: warning: implicit
    conversion from enumeration type 'enum amdgpu_ras_block' to different
    enumeration type 'enum ta_ras_block' [-Wenum-conversion]
            block_info.block_id = info->head.block;
                                ~ ~~~~~~~~~~~^~~~~
    1 warning generated.
    
    Use the function added in commit 828cfa29093f ("drm/amdgpu: Fix amdgpu
    ras to ta enums conversion") that handles this conversion explicitly.
    
    Fixes: 4c461d89db4f ("drm/amdgpu: add RAS support for the gfx block of Arcturus")
    Link: https://github.com/ClangBuiltLinux/linux/issues/849
    Signed-off-by: Nathan Chancellor <natechancellor at gmail.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
index e19d275f3f7d..f099f13d7f1e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -964,7 +964,7 @@ int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
 		return -EINVAL;
 
-	block_info.block_id = info->head.block;
+	block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
 	block_info.sub_block_index = info->head.sub_block_index;
 	block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
 	block_info.address = info->address;
commit eca31a4ac89bd5af26eccc6c32acbee396950b30
Author: Daniel Vetter <daniel.vetter at ffwll.ch>
Date:   Wed Jan 29 09:09:05 2020 +0100

    radeon: completely remove lut leftovers
    
    This is an oversight from
    
    commit 42585395ebc1034a98937702849669f17eadb35f
    Author: Peter Rosin <peda at axentia.se>
    Date:   Thu Jul 13 18:25:36 2017 +0200
    
        drm: radeon: remove dead code and pointless local lut storage
    
    v2: Also remove leftover local variable.
    
    Cc: Peter Rosin <peda at axentia.se>
    Cc: Alex Deucher <alexander.deucher at amd.com>
    Cc: Michel Dänzer <michel.daenzer at amd.com>
    Reviewed-by: Michel Dänzer <mdaenzer at redhat.com>
    Signed-off-by: Daniel Vetter <daniel.vetter at intel.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 82fa7ab6d35c..d07c7db0c815 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -674,7 +674,6 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
 {
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_crtc *radeon_crtc;
-	int i;
 
 	radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
 	if (radeon_crtc == NULL)
@@ -703,12 +702,6 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
 	radeon_crtc->mode_set.num_connectors = 0;
 #endif
 
-	for (i = 0; i < 256; i++) {
-		radeon_crtc->lut_r[i] = i << 2;
-		radeon_crtc->lut_g[i] = i << 2;
-		radeon_crtc->lut_b[i] = i << 2;
-	}
-
 	if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom))
 		radeon_atombios_init_crtc(dev, radeon_crtc);
 	else
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index fd470d6bf3f4..96565171d13e 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -327,7 +327,6 @@ enum radeon_flip_status {
 struct radeon_crtc {
 	struct drm_crtc base;
 	int crtc_id;
-	u16 lut_r[256], lut_g[256], lut_b[256];
 	bool enabled;
 	bool can_tile;
 	bool cursor_out_of_bounds;
commit ded58c7bbbf3b52c018db99fd4d530c3448a0c83
Author: Zhan Liu <zhan.liu at amd.com>
Date:   Tue Jan 28 16:38:53 2020 -0500

    drm/amd/display: Move drm_dp_mst_atomic_check() to the front of dc_validate_global_state()
    
    [Why]
    Need to do atomic check first, then validate global state.
    If not, when connecting both MST and HDMI displays and
    set a bad mode via xrandr, system will hang.
    
    [How]
    Move drm_dp_mst_atomic_check() to the front of
    dc_validate_global_state().
    
    Signed-off-by: Zhan Liu <zhan.liu at amd.com>
    Reviewed-by: Mikita Lipski <mikita.lipski at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 2ac349849081..279541517a99 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -8167,6 +8167,16 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 			goto fail;
 #endif
 
+		/*
+		 * Perform validation of MST topology in the state:
+		 * We need to perform MST atomic check before calling
+		 * dc_validate_global_state(), or there is a chance
+		 * to get stuck in an infinite loop and hang eventually.
+		 */
+		ret = drm_dp_mst_atomic_check(state);
+		if (ret)
+			goto fail;
+
 		if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
 			ret = -EINVAL;
 			goto fail;
@@ -8195,10 +8205,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 				dc_retain_state(old_dm_state->context);
 		}
 	}
-	/* Perform validation of MST topology in the state*/
-	ret = drm_dp_mst_atomic_check(state);
-	if (ret)
-		goto fail;
 
 	/* Store the overall update type for use later in atomic check. */
 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
commit ec3d65082d7dabad6fa8f66a8ef166f2d522d6b2
Author: Daniel Vetter <daniel.vetter at ffwll.ch>
Date:   Tue Jan 28 17:09:52 2020 +0100

    radeon: insert 10ms sleep in dce5_crtc_load_lut
    
    Per at least one tester this is enough magic to recover the regression
    introduced for some people (but not all) in
    
    commit b8e2b0199cc377617dc238f5106352c06dcd3fa2
    Author: Peter Rosin <peda at axentia.se>
    Date:   Tue Jul 4 12:36:57 2017 +0200
    
        drm/fb-helper: factor out pseudo-palette
    
    which for radeon had the side-effect of refactoring out a seemingly
    redudant writing of the color palette.
    
    10ms in a fairly slow modeset path feels like an acceptable form of
    duct-tape, so maybe worth a shot and see what sticks.
    
    Cc: Alex Deucher <alexander.deucher at amd.com>
    Cc: Michel Dänzer <michel.daenzer at amd.com>
    References: https://bugzilla.kernel.org/show_bug.cgi?id=198123
    Signed-off-by: Daniel Vetter <daniel.vetter at intel.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 856526cb2caf..82fa7ab6d35c 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -127,6 +127,8 @@ static void dce5_crtc_load_lut(struct drm_crtc *crtc)
 
 	DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
 
+	msleep(10);
+
 	WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
 	       (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
 		NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
commit 2a239597dfc65fb097a3d7ddcd01066745fea069
Author: Colin Ian King <colin.king at canonical.com>
Date:   Tue Jan 28 11:28:27 2020 +0000

    drm/amd/display: fix spelling mistake link_integiry_check -> link_integrity_check
    
    There is a spelling mistake on the struct field name link_integiry_check,
    fix this by renaming it.
    
    Signed-off-by: Colin Ian King <colin.king at canonical.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index f98d3d9ecb6d..af78e4f1be68 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -63,7 +63,7 @@ struct mod_hdcp_transition_input_hdcp1 {
 	uint8_t hdcp_capable_dp;
 	uint8_t binfo_read_dp;
 	uint8_t r0p_available_dp;
-	uint8_t link_integiry_check;
+	uint8_t link_integrity_check;
 	uint8_t reauth_request_check;
 	uint8_t stream_encryption_dp;
 };
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index 04845e43df15..37670db64855 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -283,8 +283,8 @@ static enum mod_hdcp_status wait_for_ready(struct mod_hdcp *hdcp,
 				hdcp, "bstatus_read"))
 			goto out;
 		if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
-				&input->link_integiry_check, &status,
-				hdcp, "link_integiry_check"))
+				&input->link_integrity_check, &status,
+				hdcp, "link_integrity_check"))
 			goto out;
 		if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
 				&input->reauth_request_check, &status,
@@ -431,8 +431,8 @@ static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp,
 			hdcp, "bstatus_read"))
 		goto out;
 	if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
-			&input->link_integiry_check, &status,
-			hdcp, "link_integiry_check"))
+			&input->link_integrity_check, &status,
+			hdcp, "link_integrity_check"))
 		goto out;
 	if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
 			&input->reauth_request_check, &status,
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
index 21ebc62bb9d9..76edcbe51f71 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
@@ -241,7 +241,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
 		}
 		break;
 	case D1_A4_AUTHENTICATED:
-		if (input->link_integiry_check != PASS ||
+		if (input->link_integrity_check != PASS ||
 				input->reauth_request_check != PASS) {
 			/* 1A-07: restart hdcp on a link integrity failure */
 			fail_and_restart_in_ms(0, &status, output);
@@ -249,7 +249,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
 		}
 		break;
 	case D1_A6_WAIT_FOR_READY:
-		if (input->link_integiry_check == FAIL ||
+		if (input->link_integrity_check == FAIL ||
 				input->reauth_request_check == FAIL) {
 			fail_and_restart_in_ms(0, &status, output);
 			break;
commit 7044cb6c20776a1d546f08c332601bb6005388a2
Author: Stephen Rothwell <sfr at canb.auug.org.au>
Date:   Tue Jan 28 15:42:27 2020 +1100

    amdgpu: using vmalloc requires includeing vmalloc.h
    
    Fixes: 240c811ccde4 ("drm/amdgpu: fix VRAM partially encroached issue in GDDR6 memory training(V2)")
    Signed-off-by: Stephen Rothwell <sfr at canb.auug.org.au>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index ac173d2eb809..0829188c1a5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -22,6 +22,7 @@
 
 #include <linux/firmware.h>
 #include <linux/module.h>
+#include <linux/vmalloc.h>
 
 #include "amdgpu.h"
 #include "amdgpu_psp.h"
commit 977f7e1068be60f3cac7d80ab18692341f3b7fc5
Author: Nirmoy Das <nirmoy.das at amd.com>
Date:   Tue Jan 21 15:53:53 2020 +0100

    drm/amdgpu: allocate entities on demand
    
    Currently we pre-allocate entities and fences for all the HW IPs on
    context creation and some of which are might never be used.
    
    This patch tries to resolve entity/fences wastage by creating entity
    only when needed.
    
    v2: allocate memory for entity and fences together
    
    Signed-off-by: Nirmoy Das <nirmoy.das at amd.com>
    Reviewed-by: Christian König <christian.koenig at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 05c2af61e7de..94a6c42f29ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -42,19 +42,12 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
 	[AMDGPU_HW_IP_VCN_JPEG]	=	1,
 };
 
-static int amdgpu_ctx_total_num_entities(void)
-{
-	unsigned i, num_entities = 0;
-
-	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
-		num_entities += amdgpu_ctx_num_entities[i];
-
-	return num_entities;
-}
-
 static int amdgpu_ctx_priority_permit(struct drm_file *filp,
 				      enum drm_sched_priority priority)
 {
+	if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
+		return -EINVAL;
+
 	/* NORMAL and below are accessible by everyone */
 	if (priority <= DRM_SCHED_PRIORITY_NORMAL)
 		return 0;
@@ -68,64 +61,24 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
 	return -EACCES;
 }
 
-static int amdgpu_ctx_init(struct amdgpu_device *adev,
-			   enum drm_sched_priority priority,
-			   struct drm_file *filp,
-			   struct amdgpu_ctx *ctx)
+static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring)
 {
-	unsigned num_entities = amdgpu_ctx_total_num_entities();
-	unsigned i, j;
+	struct amdgpu_device *adev = ctx->adev;
+	struct amdgpu_ctx_entity *entity;
+	struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
+	unsigned num_scheds = 0;
+	enum drm_sched_priority priority;
 	int r;
 
-	if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
-		return -EINVAL;
-
-	r = amdgpu_ctx_priority_permit(filp, priority);
-	if (r)
-		return r;
-
-	memset(ctx, 0, sizeof(*ctx));
-	ctx->adev = adev;
-
-
-	ctx->entities[0] = kcalloc(num_entities,
-				   sizeof(struct amdgpu_ctx_entity),
-				   GFP_KERNEL);
-	if (!ctx->entities[0])
-		return -ENOMEM;
-
+	entity = kcalloc(1, offsetof(typeof(*entity), fences[amdgpu_sched_jobs]),
+			 GFP_KERNEL);
+	if (!entity)
+		return  -ENOMEM;
 
-	for (i = 0; i < num_entities; ++i) {
-		struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
-
-		entity->sequence = 1;
-		entity->fences = kcalloc(amdgpu_sched_jobs,
-					 sizeof(struct dma_fence*), GFP_KERNEL);
-		if (!entity->fences) {
-			r = -ENOMEM;
-			goto error_cleanup_memory;
-		}
-	}
-	for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
-		ctx->entities[i] = ctx->entities[i - 1] +
-			amdgpu_ctx_num_entities[i - 1];
-
-	kref_init(&ctx->refcount);
-	spin_lock_init(&ctx->ring_lock);
-	mutex_init(&ctx->lock);
-
-	ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
-	ctx->reset_counter_query = ctx->reset_counter;
-	ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
-	ctx->init_priority = priority;
-	ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
-
-	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
-		struct drm_gpu_scheduler **scheds;
-		struct drm_gpu_scheduler *sched;
-		unsigned num_scheds = 0;
-
-		switch (i) {
+	entity->sequence = 1;
+	priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
+				ctx->init_priority : ctx->override_priority;
+	switch (hw_ip) {
 		case AMDGPU_HW_IP_GFX:
 			sched = &adev->gfx.gfx_ring[0].sched;
 			scheds = &sched;
@@ -166,63 +119,90 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 			scheds = adev->jpeg.jpeg_sched;
 			num_scheds =  adev->jpeg.num_jpeg_sched;
 			break;
-		}
-
-		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
-			r = drm_sched_entity_init(&ctx->entities[i][j].entity,
-						  priority, scheds,
-						  num_scheds, &ctx->guilty);
-		if (r)
-			goto error_cleanup_entities;
 	}
 
+	r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
+				  &ctx->guilty);
+	if (r)
+		goto error_free_entity;
+
+	ctx->entities[hw_ip][ring] = entity;
 	return 0;
 
-error_cleanup_entities:
-	for (i = 0; i < num_entities; ++i)
-		drm_sched_entity_destroy(&ctx->entities[0][i].entity);
+error_free_entity:
+	kfree(entity);
 
-error_cleanup_memory:
-	for (i = 0; i < num_entities; ++i) {
-		struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
+	return r;
+}
 
-		kfree(entity->fences);
-		entity->fences = NULL;
-	}
+static int amdgpu_ctx_init(struct amdgpu_device *adev,
+			   enum drm_sched_priority priority,
+			   struct drm_file *filp,
+			   struct amdgpu_ctx *ctx)
+{
+	int r;
 
-	kfree(ctx->entities[0]);
-	ctx->entities[0] = NULL;
-	return r;
+	r = amdgpu_ctx_priority_permit(filp, priority);
+	if (r)
+		return r;
+
+	memset(ctx, 0, sizeof(*ctx));
+
+	ctx->adev = adev;
+
+	kref_init(&ctx->refcount);
+	spin_lock_init(&ctx->ring_lock);
+	mutex_init(&ctx->lock);
+
+	ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
+	ctx->reset_counter_query = ctx->reset_counter;
+	ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
+	ctx->init_priority = priority;
+	ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
+
+	return 0;
+
+}
+
+static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
+{
+
+	int i;
+
+	if (!entity)
+		return;
+
+	for (i = 0; i < amdgpu_sched_jobs; ++i)
+		dma_fence_put(entity->fences[i]);
+
+	kfree(entity);
 }
 
 static void amdgpu_ctx_fini(struct kref *ref)
 {
 	struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
-	unsigned num_entities = amdgpu_ctx_total_num_entities();
 	struct amdgpu_device *adev = ctx->adev;
 	unsigned i, j;
 
 	if (!adev)
 		return;
 
-	for (i = 0; i < num_entities; ++i) {
-		struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
-
-		for (j = 0; j < amdgpu_sched_jobs; ++j)
-			dma_fence_put(entity->fences[j]);
-
-		kfree(entity->fences);
+	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+		for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
+			amdgpu_ctx_fini_entity(ctx->entities[i][j]);
+			ctx->entities[i][j] = NULL;
+		}
 	}
 
-	kfree(ctx->entities[0]);
 	mutex_destroy(&ctx->lock);
-
 	kfree(ctx);
 }
 
 int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
 			  u32 ring, struct drm_sched_entity **entity)
 {
+	int r;
+
 	if (hw_ip >= AMDGPU_HW_IP_NUM) {
 		DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
 		return -EINVAL;
@@ -239,7 +219,13 @@ int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
 		return -EINVAL;
 	}
 
-	*entity = &ctx->entities[hw_ip][ring].entity;
+	if (ctx->entities[hw_ip][ring] == NULL) {
+		r = amdgpu_ctx_init_entity(ctx, hw_ip, ring);
+		if (r)
+			return r;
+	}
+
+	*entity = &ctx->entities[hw_ip][ring]->entity;
 	return 0;
 }
 
@@ -279,14 +265,17 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
 static void amdgpu_ctx_do_release(struct kref *ref)
 {
 	struct amdgpu_ctx *ctx;
-	unsigned num_entities;
-	u32 i;
+	u32 i, j;
 
 	ctx = container_of(ref, struct amdgpu_ctx, refcount);
+	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
+			if (!ctx->entities[i][j])
+				continue;
 
-	num_entities = amdgpu_ctx_total_num_entities();
-	for (i = 0; i < num_entities; i++)
-		drm_sched_entity_destroy(&ctx->entities[0][i].entity);
+			drm_sched_entity_destroy(&ctx->entities[i][j]->entity);
+		}
+	}
 
 	amdgpu_ctx_fini(ref);
 }
@@ -516,19 +505,23 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
 				  enum drm_sched_priority priority)
 {
-	unsigned num_entities = amdgpu_ctx_total_num_entities();
 	enum drm_sched_priority ctx_prio;
-	unsigned i;
+	unsigned i, j;
 
 	ctx->override_priority = priority;
 
 	ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
 			ctx->init_priority : ctx->override_priority;
+	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
+			struct drm_sched_entity *entity;
 
-	for (i = 0; i < num_entities; i++) {
-		struct drm_sched_entity *entity = &ctx->entities[0][i].entity;
+			if (!ctx->entities[i][j])
+				continue;
 
-		drm_sched_entity_set_priority(entity, ctx_prio);
+			entity = &ctx->entities[i][j]->entity;
+			drm_sched_entity_set_priority(entity, ctx_prio);
+		}
 	}
 }
 
@@ -564,20 +557,24 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
 
 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
 {
-	unsigned num_entities = amdgpu_ctx_total_num_entities();
 	struct amdgpu_ctx *ctx;
 	struct idr *idp;
-	uint32_t id, i;
+	uint32_t id, i, j;
 
 	idp = &mgr->ctx_handles;
 
 	mutex_lock(&mgr->lock);
 	idr_for_each_entry(idp, ctx, id) {
-		for (i = 0; i < num_entities; i++) {
-			struct drm_sched_entity *entity;
+		for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+			for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
+				struct drm_sched_entity *entity;
+
+				if (!ctx->entities[i][j])
+					continue;
 
-			entity = &ctx->entities[0][i].entity;
-			timeout = drm_sched_entity_flush(entity, timeout);
+				entity = &ctx->entities[i][j]->entity;
+				timeout = drm_sched_entity_flush(entity, timeout);
+			}
 		}
 	}
 	mutex_unlock(&mgr->lock);
@@ -586,10 +583,9 @@ long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
 
 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
 {
-	unsigned num_entities = amdgpu_ctx_total_num_entities();
 	struct amdgpu_ctx *ctx;
 	struct idr *idp;
-	uint32_t id, i;
+	uint32_t id, i, j;
 
 	idp = &mgr->ctx_handles;
 
@@ -599,8 +595,17 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
 			continue;
 		}
 
-		for (i = 0; i < num_entities; i++)
-			drm_sched_entity_fini(&ctx->entities[0][i].entity);
+		for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+			for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
+				struct drm_sched_entity *entity;
+
+				if (!ctx->entities[i][j])
+					continue;
+
+				entity = &ctx->entities[i][j]->entity;
+				drm_sched_entity_fini(entity);
+			}
+		}
 	}
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index a6cd9d4b078c..de490f183af2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -29,10 +29,12 @@ struct drm_device;
 struct drm_file;
 struct amdgpu_fpriv;
 
+#define AMDGPU_MAX_ENTITY_NUM 4
+
 struct amdgpu_ctx_entity {
 	uint64_t		sequence;
-	struct dma_fence	**fences;
 	struct drm_sched_entity	entity;
+	struct dma_fence	*fences[];
 };
 
 struct amdgpu_ctx {
@@ -42,7 +44,7 @@ struct amdgpu_ctx {
 	unsigned			reset_counter_query;
 	uint32_t			vram_lost_counter;
 	spinlock_t			ring_lock;
-	struct amdgpu_ctx_entity	*entities[AMDGPU_HW_IP_NUM];
+	struct amdgpu_ctx_entity	*entities[AMDGPU_HW_IP_NUM][AMDGPU_MAX_ENTITY_NUM];
 	bool				preamble_presented;
 	enum drm_sched_priority		init_priority;
 	enum drm_sched_priority		override_priority;
commit 18c6b74e7cfc9a599917d9e98f9835de8208e19a
Author: Joseph Greathouse <Joseph.Greathouse at amd.com>
Date:   Mon Jan 27 16:08:11 2020 -0600

    drm/amdgpu: Enable DISABLE_BARRIER_WAITCNT for Arcturus
    
    In previous gfx9 parts, S_BARRIER shader instructions are implicitly
    S_WAITCNT 0 instructions as well. This setting turns off that
    mechanism in Arcturus and beyond. With this, shaders must follow the
    ISA guide insofar as putting in explicit S_WAITCNT operations even
    after an S_BARRIER.
    
    v2: Fix patch title to list component
    
    Signed-off-by: Joseph Greathouse <Joseph.Greathouse at amd.com>
    Reviewed-by: Felix Kuehling <Felix.Kuehling at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 51ec8a70660a..90f64b8bc358 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2441,6 +2441,22 @@ static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev)
 	}
 }
 
+static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
+{
+	uint32_t tmp;
+
+	switch (adev->asic_type) {
+	case CHIP_ARCTURUS:
+		tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);
+		tmp = REG_SET_FIELD(tmp, SQ_CONFIG,
+					DISABLE_BARRIER_WAITCNT, 1);
+		WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
+		break;
+	default:
+		break;
+	};
+}
+
 static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
 {
 	u32 tmp;
@@ -2486,6 +2502,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
 
 	gfx_v9_0_init_compute_vmid(adev);
 	gfx_v9_0_init_gds_vmid(adev);
+	gfx_v9_0_init_sq_config(adev);
 }
 
 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
index c9e3f6d849a8..ea316d8dcb37 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
@@ -2060,7 +2060,8 @@
 
 // addressBlock: gc_sqdec
 //SQ_CONFIG
-#define SQ_CONFIG__UNUSED__SHIFT                                                                              0x0
+#define SQ_CONFIG__DISABLE_BARRIER_WAITCNT__SHIFT                                                             0x0
+#define SQ_CONFIG__UNUSED__SHIFT                                                                              0x1
 #define SQ_CONFIG__OVERRIDE_ALU_BUSY__SHIFT                                                                   0x7
 #define SQ_CONFIG__DEBUG_EN__SHIFT                                                                            0x8
 #define SQ_CONFIG__DEBUG_SINGLE_MEMOP__SHIFT                                                                  0x9
@@ -2079,7 +2080,8 @@
 #define SQ_CONFIG__DISABLE_SP_REDUNDANT_THREAD_GATING__SHIFT                                                  0x1d
 #define SQ_CONFIG__DISABLE_FLAT_SOFT_CLAUSE__SHIFT                                                            0x1e
 #define SQ_CONFIG__DISABLE_MIMG_SOFT_CLAUSE__SHIFT                                                            0x1f
-#define SQ_CONFIG__UNUSED_MASK                                                                                0x0000007FL
+#define SQ_CONFIG__DISABLE_BARRIER_WAITCNT_MASK                                                               0x00000001L
+#define SQ_CONFIG__UNUSED_MASK                                                                                0x0000007EL
 #define SQ_CONFIG__OVERRIDE_ALU_BUSY_MASK                                                                     0x00000080L
 #define SQ_CONFIG__DEBUG_EN_MASK                                                                              0x00000100L
 #define SQ_CONFIG__DEBUG_SINGLE_MEMOP_MASK                                                                    0x00000200L
commit 269a0bf79b3346fbebe7f62aef516ce61c2f02d0
Author: Colin Ian King <colin.king at canonical.com>
Date:   Sat Jan 25 20:26:13 2020 +0000

    drm/amd/powerplay: fix spelling mistake "Attemp" -> "Attempt"
    
    There are several spelling mistakes in PP_ASSERT_WITH_CODE messages.
    Fix these.
    
    Signed-off-by: Colin Ian King <colin.king at canonical.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index a3915bfcce81..275dbf65f1a0 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -128,20 +128,20 @@ int vega12_enable_smc_features(struct pp_hwmgr *hwmgr,
 	if (enable) {
 		PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
 				PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low) == 0,
-				"[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!",
+				"[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!",
 				return -EINVAL);
 		PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
 				PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high) == 0,
-				"[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!",
+				"[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!",
 				return -EINVAL);
 	} else {
 		PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
 				PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low) == 0,
-				"[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!",
+				"[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!",
 				return -EINVAL);
 		PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
 				PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high) == 0,
-				"[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!",
+				"[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!",
 				return -EINVAL);
 	}
 
@@ -158,13 +158,13 @@ int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
 
 	PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
 			PPSMC_MSG_GetEnabledSmuFeaturesLow) == 0,
-			"[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!",
+			"[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!",
 			return -EINVAL);
 	smc_features_low = smu9_get_argument(hwmgr);
 
 	PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
 			PPSMC_MSG_GetEnabledSmuFeaturesHigh) == 0,
-			"[GetEnabledSMCFeatures] Attemp to get SMU features High failed!",
+			"[GetEnabledSMCFeatures] Attempt to get SMU features High failed!",
 			return -EINVAL);
 	smc_features_high = smu9_get_argument(hwmgr);
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index 0db57fb83d30..49e5ef3e3876 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -316,20 +316,20 @@ int vega20_enable_smc_features(struct pp_hwmgr *hwmgr,
 	if (enable) {
 		PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
 				PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low)) == 0,
-				"[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!",
+				"[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!",
 				return ret);
 		PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
 				PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high)) == 0,
-				"[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!",
+				"[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!",
 				return ret);
 	} else {
 		PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
 				PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low)) == 0,
-				"[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!",
+				"[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!",
 				return ret);
 		PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
 				PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high)) == 0,
-				"[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!",
+				"[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!",
 				return ret);
 	}
 
@@ -347,12 +347,12 @@ int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
 
 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
 			PPSMC_MSG_GetEnabledSmuFeaturesLow)) == 0,
-			"[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!",
+			"[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!",
 			return ret);
 	smc_features_low = vega20_get_argument(hwmgr);
 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
 			PPSMC_MSG_GetEnabledSmuFeaturesHigh)) == 0,
-			"[GetEnabledSMCFeatures] Attemp to get SMU features High failed!",
+			"[GetEnabledSMCFeatures] Attempt to get SMU features High failed!",
 			return ret);
 	smc_features_high = vega20_get_argument(hwmgr);
 
commit 54f73df4cab18734b71a4325feb4749d334dc877
Author: Colin Ian King <colin.king at canonical.com>
Date:   Fri Jan 17 13:33:05 2020 +0000

    drm/amd/display: fix for-loop with incorrectly sized loop counter (v2)
    
    A for-loop is iterating from 0 up to 1000 however the loop variable count
    is a u8 and hence not large enough.  Fix this by making count an int.
    Also remove the redundant initialization of count since this is never used
    and add { } on the loop statement make the loop block clearer.
    
    v2: drop useless else (Walter Harms)
    
    Addresses-Coverity: ("Operands don't affect result")
    Fixes: ed581a0ace44 ("drm/amd/display: wait for update when setting dpg test pattern")
    Signed-off-by: Colin Ian King <colin.king at canonical.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index a53e8fed56f3..cb731c1d30b1 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -3680,7 +3680,7 @@ static void set_crtc_test_pattern(struct dc_link *link,
 			struct pipe_ctx *odm_pipe;
 			enum controller_dp_color_space controller_color_space;
 			int opp_cnt = 1;
-			uint8_t count = 0;
+			int count;
 
 			switch (test_pattern_color_space) {
 			case DP_TEST_PATTERN_COLOR_SPACE_RGB:
@@ -3725,11 +3725,11 @@ static void set_crtc_test_pattern(struct dc_link *link,
 				width,
 				height);
 			/* wait for dpg to blank pixel data with test pattern */
-			for (count = 0; count < 1000; count++)
+			for (count = 0; count < 1000; count++) {
 				if (opp->funcs->dpg_is_blanked(opp))
 					break;
-				else
-					udelay(100);
+				udelay(100);
+			}
 		}
 	}
 	break;
commit 2cb44fb09305e23fe60dbfcccad3f8c4028749f1
Author: Alex Deucher <alexander.deucher at amd.com>
Date:   Mon Jan 27 14:35:10 2020 -0500

    drm/amdgpu: enable GPU reset by default on renoir
    
    Everything is in place.
    
    Reviewed-by: Andrey Grodzovsky <andrey.grodzovsky at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 2f15e2f6d336..39cd545976b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3760,6 +3760,7 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
 		case CHIP_VEGA12:
 		case CHIP_RAVEN:
 		case CHIP_ARCTURUS:
+		case CHIP_RENOIR:
 		case CHIP_NAVI10:
 		case CHIP_NAVI14:
 		case CHIP_NAVI12:
commit 658c663947b04b1f8a2fd061a3ef767983c880e5
Author: Alex Deucher <alexander.deucher at amd.com>
Date:   Mon Jan 27 14:31:49 2020 -0500

    drm/amdgpu: enable GPU reset by default on Navi
    
    Has been working fine for a while.
    
    Reviewed-by: Andrey Grodzovsky <andrey.grodzovsky at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 990f8e64a4d5..2f15e2f6d336 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3760,6 +3760,9 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
 		case CHIP_VEGA12:
 		case CHIP_RAVEN:
 		case CHIP_ARCTURUS:
+		case CHIP_NAVI10:
+		case CHIP_NAVI14:
+		case CHIP_NAVI12:
 			break;
 		default:
 			goto disabled;
commit bb67bfd2e7101bf2ac5327b0b7a847cd9fb9723f
Author: Dor Askayo <dor.askayo at gmail.com>
Date:   Sat Jan 4 14:22:15 2020 +0200

    drm/amd/display: do not allocate display_mode_lib unnecessarily
    
    This allocation isn't required and can fail when resuming from suspend.
    
    Bug: https://gitlab.freedesktop.org/drm/amd/issues/1009
    Signed-off-by: Dor Askayo <dor.askayo at gmail.com>
    Reviewed-by: Leo Li <sunpeng.li at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 6c797fac189d..04441dbcba76 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2462,12 +2462,7 @@ void dc_set_power_state(
 	enum dc_acpi_cm_power_state power_state)
 {
 	struct kref refcount;
-	struct display_mode_lib *dml = kzalloc(sizeof(struct display_mode_lib),
-						GFP_KERNEL);
-
-	ASSERT(dml);
-	if (!dml)
-		return;
+	struct display_mode_lib *dml;
 
 	switch (power_state) {
 	case DC_ACPI_CM_POWER_STATE_D0:
@@ -2490,6 +2485,12 @@ void dc_set_power_state(
 		 * clean state, and dc hw programming optimizations will not
 		 * cause any trouble.
 		 */
+		dml = kzalloc(sizeof(struct display_mode_lib),
+				GFP_KERNEL);
+
+		ASSERT(dml);
+		if (!dml)
+			return;
 
 		/* Preserve refcount */
 		refcount = dc->current_state->refcount;
@@ -2503,10 +2504,10 @@ void dc_set_power_state(
 		dc->current_state->refcount = refcount;
 		dc->current_state->bw_ctx.dml = *dml;
 
+		kfree(dml);
+
 		break;
 	}
-
-	kfree(dml);
 }
 
 void dc_resume(struct dc *dc)
commit 77171eade8faa713c1104b29ea7ff96915ecf9fb
Author: Christian König <christian.koenig at amd.com>
Date:   Thu Jan 16 14:06:59 2020 +0100

    drm/amdgpu: add coreboot workaround for KV/KB
    
    Coreboot seems to have a problem correctly setting up access to the stolen VRAM
    on KV/KB. Use the direct access only when necessary.
    
    Signed-off-by: Christian König <christian.koenig at amd.com>
    Reported-and-tested-by: Fredrik Bruhn <fredrik.bruhn at unibap.com>
    Reviewed-by: Felix Kuehling <Felix.Kuehling at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 19d5b133e1d7..9da9596a3638 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -381,7 +381,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
 
 #ifdef CONFIG_X86_64
-	if (adev->flags & AMD_IS_APU) {
+	if (adev->flags & AMD_IS_APU &&
+	    adev->gmc.real_vram_size > adev->gmc.aper_size) {
 		adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
 		adev->gmc.aper_size = adev->gmc.real_vram_size;
 	}
commit da2f84ba73df11186cf23bec45d039f3b9162bd8
Author: Harry Wentland <harry.wentland at amd.com>
Date:   Tue Jan 21 16:29:54 2020 -0500

    Revert "drm/amd/display: Don't skip link training for empty dongle"
    
    This reverts commit 80adaebd2d411b7d6872a097634848a71eb13d20.
    
    [WHY]
    This change was working around a regression that occured in this:
    commit 0301ccbaf67d ("drm/amd/display: DP Compliance 400.1.1 failure")
    
    With the fix to run verify_link_cap when the SINK_COUNT of
    dongles becomes non-zero this change is no longer needed.
    
    Cc: Louis Li <Ching-shih.Li at amd.com>
    Cc: Wenjing Liu <Wenjing.Liu at amd.com>
    Cc: Hersen Wu <hersenxs.wu at amd.com>
    Cc: Eric Yang <Eric.Yang2 at amd.com>
    Reviewed-by: Wenjing Liu <Wenjing.Liu at amd.com>
    Signed-off-by: Harry Wentland <harry.wentland at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index cc2e05003595..a09119c10d7c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -851,18 +851,12 @@ static bool dc_link_detect_helper(struct dc_link *link,
 				if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, sizeof(struct dpcd_caps)))
 					same_dpcd = false;
 			}
-			/* Active dongle plug in without display or downstream unplug*/
+			/* Active dongle downstream unplug*/
 			if (link->type == dc_connection_active_dongle &&
 				link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
-				if (prev_sink != NULL) {
+				if (prev_sink != NULL)
 					/* Downstream unplug */
 					dc_sink_release(prev_sink);
-				} else {
-					/* Empty dongle plug in */
-					dp_verify_link_cap_with_retries(link,
-							&link->reported_link_cap,
-							LINK_TRAINING_MAX_VERIFY_RETRY);
-				}
 				return true;
 			}
 
commit 3eb6d7aca53d81ce888624f09cd44dc0302161e8
Author: Harry Wentland <harry.wentland at amd.com>
Date:   Tue Jan 21 16:12:45 2020 -0500

    drm/amd/display: Retrain dongles when SINK_COUNT becomes non-zero
    
    [WHY]
    Two years ago the patch referenced by the Fixes tag stopped running
    dp_verify_link_cap_with_retries during DP detection when the reason
    for the detection was a short-pulse interrupt. This effectively meant
    that we were no longer doing the verify_link_cap training on active
    dongles when their SINK_COUNT changed from 0 to 1.
    
    A year ago this was partly remedied with:
    commit 80adaebd2d41 ("drm/amd/display: Don't skip link training for empty dongle")
    
    This made sure that we trained the dongle on initial hotplug (without
    connected downstream devices).
    
    This is all fine and dandy if it weren't for the fact that there are
    some dongles on the market that don't like link training when SINK_COUNT
    is 0 These dongles will in fact indicate a SINK_COUNT of 0 immediately
    after hotplug, even when a downstream device is connected, and then
    trigger a shortpulse interrupt indicating a SINK_COUNT change to 1.
    
    In order to play nicely we will need our policy to not link train an
    active DP dongle when SINK_COUNT is 0 but ensure we train it when the
    SINK_COUNT changes to 1.
    
    [HOW]
    Call dp_verify_link_cap_with_retries on detection even when the detection
    is triggered from a short pulse interrupt.
    
    With this change we can also revert this commit which we'll do in a separate
    follow-up change:
    commit 80adaebd2d41 ("drm/amd/display: Don't skip link training for empty dongle")
    
    Fixes: 0301ccbaf67d ("drm/amd/display: DP Compliance 400.1.1 failure")
    Suggested-by: Louis Li <Ching-shih.Li at amd.com>
    Tested-by: Louis Li <Ching-shih.Li at amd.com>
    Cc: Wenjing Liu <Wenjing.Liu at amd.com>
    Cc: Hersen Wu <hersenxs.wu at amd.com>
    Cc: Eric Yang <Eric.Yang2 at amd.com>
    Reviewed-by: Wenjing Liu <Wenjing.Liu at amd.com>
    Signed-off-by: Harry Wentland <harry.wentland at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index a50768a7ba68..cc2e05003595 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -969,8 +969,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
 			same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
 
 		if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
-			sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX &&
-			reason != DETECT_REASON_HPDRX) {
+			sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
 			/*
 			 * TODO debug why Dell 2413 doesn't like
 			 *  two link trainings
commit 276cc92945ab4af02d8bedce18b4b6f87b9c1609
Author: Alex Deucher <alexander.deucher at amd.com>
Date:   Wed Jan 15 12:56:37 2020 -0500

    drm/amdgpu: original raven doesn't support full asic reset
    
    So don't use it.
    
    Reviewed-by: Hawking Zhang <Hawking.Zhang at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index af41ee4c9639..15f3424a1ff7 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -537,6 +537,10 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
 
 static int soc15_asic_reset(struct amdgpu_device *adev)
 {
+	/* original raven doesn't have full asic reset */
+	if (adev->pdev->device == 0x15dd && adev->rev_id < 0x8)
+		return 0;
+
 	switch (soc15_asic_reset_method(adev)) {
 		case AMD_RESET_METHOD_BACO:
 			if (!adev->in_suspend)
commit 7af2a5771e0918cdadb1614c1f81dd67a58e00aa
Author: Alex Deucher <alexander.deucher at amd.com>
Date:   Wed Jan 15 12:26:51 2020 -0500

    drm/amdgpu: attempt to enable gfxoff on more raven1 boards (v2)
    
    Switch to a blacklist so we can disable specific boards
    that are problematic.
    
    v2: make the blacklist non-raven specific.
    
    Reviewed-by: Hawking Zhang <Hawking.Zhang at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 6558d70fdc58..51ec8a70660a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1162,18 +1162,54 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
 	}
 }
 
+struct amdgpu_gfxoff_quirk {
+	u16 chip_vendor;
+	u16 chip_device;
+	u16 subsys_vendor;
+	u16 subsys_device;
+	u8 revision;
+};
+
+static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
+	/* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
+	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
+	{ 0, 0, 0, 0, 0 },
+};
+
+static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev)
+{
+	const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list;
+
+	while (p && p->chip_device != 0) {
+		if (pdev->vendor == p->chip_vendor &&
+		    pdev->device == p->chip_device &&
+		    pdev->subsystem_vendor == p->subsys_vendor &&
+		    pdev->subsystem_device == p->subsys_device &&
+		    pdev->revision == p->revision) {
+			return true;
+		}
+		++p;
+	}
+	return false;
+}
+
 static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
 {
+	if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
+		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+
 	switch (adev->asic_type) {
 	case CHIP_VEGA10:
 	case CHIP_VEGA12:
 	case CHIP_VEGA20:
 		break;
 	case CHIP_RAVEN:
-		if (!(adev->rev_id >= 0x8 ||
-		      adev->pdev->device == 0x15d8) &&
-		    (adev->pm.fw_version < 0x41e2b || /* not raven1 fresh */
-		     !adev->gfx.rlc.is_rlc_v2_1)) /* without rlc save restore ucodes */
+		if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) &&
+		    ((adev->gfx.rlc_fw_version != 106 &&
+		      adev->gfx.rlc_fw_version < 531) ||
+		     (adev->gfx.rlc_fw_version == 53815) ||
+		     (adev->gfx.rlc_feature_version < 1) ||
+		     !adev->gfx.rlc.is_rlc_v2_1))
 			adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
 
 		if (adev->pm.pp_feature & PP_GFXOFF_MASK)
commit b20dcd72c150d82c56682606202f3f028403e581
Author: Colin Ian King <colin.king at canonical.com>
Date:   Thu Jan 23 00:22:16 2020 +0000

    drm/amd/amdgpu: fix spelling mistake "to" -> "too"
    
    There is a spelling mistake in a DRM_ERROR message. Fix it.
    
    Signed-off-by: Colin Ian King <colin.king at canonical.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index ceb0dbf685f1..59ddba137946 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -652,7 +652,7 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
 
 	if ((addr + (uint64_t)size) >
 	    (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
-		DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
+		DRM_ERROR("BO too small for addr 0x%010Lx %d %d\n",
 			  addr, lo, hi);
 		return -EINVAL;
 	}
commit a16afcdd8c58285e83951ce318b55eafb7948997
Author: zhengbin <zhengbin13 at huawei.com>
Date:   Wed Jan 22 15:53:11 2020 +0800

    drm/amd/powerplay: use true, false for bool variable in smu7_hwmgr.c
    
    Fixes coccicheck warning:
    
    drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c:723:2-50: WARNING: Assignment of 0/1 to bool variable
    drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c:733:3-52: WARNING: Assignment of 0/1 to bool variable
    drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c:747:3-51: WARNING: Assignment of 0/1 to bool variable
    
    Reported-by: Hulk Robot <hulkci at huawei.com>
    Signed-off-by: zhengbin <zhengbin13 at huawei.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index d70abada66bf..bf04cfefb283 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -720,7 +720,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
 		data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
 		data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
 		/* param1 is for corresponding std voltage */
-		data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
+		data->dpm_table.vddc_table.dpm_levels[i].enabled = true;
 	}
 
 	data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
@@ -730,7 +730,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
 		/* Initialize Vddci DPM table based on allow Mclk values */
 		for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
 			data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
-			data->dpm_table.vddci_table.dpm_levels[i].enabled = 1;
+			data->dpm_table.vddci_table.dpm_levels[i].enabled = true;
 		}
 		data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
 	}
@@ -744,7 +744,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
 		 */
 		for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
 			data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
-			data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
+			data->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
 		}
 		data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
 	}
commit 2639f453f28e71dc4149fb06c71bcf6f93eb468f
Author: Nirmoy Das <nirmoy.das at amd.com>
Date:   Wed Jan 22 10:37:56 2020 +0100

    drm/amdgpu: fix doc by clarifying sched_list definition
    
    expand sched_list definition for better understanding.
    Also fix a typo atleast -> at least
    
    Signed-off-by: Nirmoy Das <nirmoy.das at amd.com>
    Reviewed-by: Christian König <christian.koenig at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index ec79e8e5ad3c..63bccd201b97 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -45,7 +45,7 @@
  * @guilty: atomic_t set to 1 when a job on this queue
  *          is found to be guilty causing a timeout
  *
- * Note: the sched_list should have atleast one element to schedule
+ * Note: the sched_list should have at least one element to schedule
  *       the entity
  *
  * Returns 0 on success or a negative error code on failure.
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 9e71be129c30..589be851f8a1 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -52,8 +52,9 @@ enum drm_sched_priority {
  * @list: used to append this struct to the list of entities in the
  *        runqueue.
  * @rq: runqueue on which this entity is currently scheduled.
- * @sched_list: a list of drm_gpu_schedulers on which jobs from this entity can
- *              be scheduled
+ * @sched_list: A list of schedulers (drm_gpu_schedulers).
+ *              Jobs from this entity can be scheduled on any scheduler
+ *              on this list.
  * @num_sched_list: number of drm_gpu_schedulers in the sched_list.
  * @rq_lock: lock to modify the runqueue to which this entity belongs.
  * @job_queue: the list of jobs of this entity.
commit f583cc57ba70afdd02e36b0ada3790e8bf4fb10f
Author: xinhui pan <xinhui.pan at amd.com>
Date:   Wed Jan 22 11:03:30 2020 +0800

    drm/amdgpu: initialize bo_va_list when add gws to process
    
    bo_va_list is list_head, so initialize it.
    
    Signed-off-by: xinhui pan <xinhui.pan at amd.com>
    Reviewed-by: Feifei Xu <Feifei.Xu at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index b2487f4f271b..fa8ac9d19a7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -2129,6 +2129,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
 		return -ENOMEM;
 
 	mutex_init(&(*mem)->lock);
+	INIT_LIST_HEAD(&(*mem)->bo_va_list);
 	(*mem)->bo = amdgpu_bo_ref(gws_bo);
 	(*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
 	(*mem)->process_info = process_info;
commit 55bbb747ecfa39806198419e96e7bc2c44221d4e
Author: James Zhu <James.Zhu at amd.com>
Date:   Tue Jan 21 16:33:21 2020 -0500

    drm/amdgpu/vcn: use inst_idx relacing inst
    
    Use inst_idx relacing inst in SOC15_DPG_MODE macro to avoid confusion.
    
    Signed-off-by: James Zhu <James.Zhu at amd.com>
    Reviewed-by: Leo Liu <leo.liu at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 56984ff09ea9..d6deb0eb1e15 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -65,33 +65,33 @@
 /* 1 second timeout */
 #define VCN_IDLE_TIMEOUT	msecs_to_jiffies(1000)
 
-#define RREG32_SOC15_DPG_MODE(ip, inst, reg, mask, sram_sel) 				\
-	({	WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); 			\
-		WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, 				\
+#define RREG32_SOC15_DPG_MODE(ip, inst_idx, reg, mask, sram_sel) 			\
+	({	WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); 			\
+		WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, 				\
 			UVD_DPG_LMA_CTL__MASK_EN_MASK | 				\
-			((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) 	\
+			((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) 	\
 			<< UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | 			\
 			(sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); 		\
-		RREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA); 				\
+		RREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA); 			\
 	})
 
-#define WREG32_SOC15_DPG_MODE(ip, inst, reg, value, mask, sram_sel) 			\
+#define WREG32_SOC15_DPG_MODE(ip, inst_idx, reg, value, mask, sram_sel) 		\
 	do { 										\
-		WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA, value); 			\
-		WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); 			\
-		WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, 				\
+		WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA, value); 			\
+		WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); 			\
+		WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, 				\
 			UVD_DPG_LMA_CTL__READ_WRITE_MASK | 				\
-			((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) 	\
+			((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) 	\
 			<< UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | 			\
 			(sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); 		\
 	} while (0)
 
-#define SOC15_DPG_MODE_OFFSET_2_0(ip, inst, reg) 						\
+#define SOC15_DPG_MODE_OFFSET_2_0(ip, inst_idx, reg) 						\
 	({											\
 		uint32_t internal_reg_offset, addr;						\
 		bool video_range, aon_range;							\
 												\
-		addr = (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg);		\
+		addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg);		\
 		addr <<= 2; 									\
 		video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS_2_0)) && 		\
 				((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS_2_0 + 0x2600)))));	\
commit a4555732142e96855c854fa7b2a65035044391b3
Author: James Zhu <James.Zhu at amd.com>
Date:   Tue Jan 21 16:28:07 2020 -0500

    drm/amdgpu/vcn: fix typo error
    
    Fix typo error, should be inst_idx instead of inst.
    
    Signed-off-by: James Zhu <James.Zhu at amd.com>
    Reviewed-by: Leo Liu <leo.liu at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index c4984c5fb2db..56984ff09ea9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -111,7 +111,7 @@
 
 #define RREG32_SOC15_DPG_MODE_2_0(inst_idx, offset, mask_en) 					\
 	({											\
-		WREG32_SOC15(VCN, inst, mmUVD_DPG_LMA_CTL, 					\
+		WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL, 					\
 			(0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT |				\
 			mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT |				\
 			offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT));			\
commit 326b523eeb521368fd1566c8ceeddf04a68cb01e
Author: James Zhu <James.Zhu at amd.com>
Date:   Mon Jan 20 21:44:07 2020 -0500

    drm/amdgpu/vcn: fix vcn2.5 instance issue
    
    Fix vcn2.5 instance issue, vcn0 and vcn1 have same register offset
    
    Signed-off-by: James Zhu <James.Zhu at amd.com>
    Reviewed-by: Leo Liu <leo.liu at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index bb31258e3340..70fae7977f8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -435,88 +435,88 @@ static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 		if (!indirect) {
 			WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-				UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+				UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
 			WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-				UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+				UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
 			WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-				UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+				UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
 		} else {
 			WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-				UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
+				UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
 			WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-				UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
+				UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
 			WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-				UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+				UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
 		}
 		offset = 0;
 	} else {
 		WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-			UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+			UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
 		WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-			UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+			UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
 		offset = size;
 		WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-			UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET0),
+			UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
 			AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
 	}
 
 	if (!indirect)
 		WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-			UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
+			UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
 	else
 		WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-			UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
+			UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
 
 	/* cache window 1: stack */
 	if (!indirect) {
 		WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-			UVD, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+			UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
 		WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-			UVD, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+			UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
 		WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-			UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+			UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
 	} else {
 		WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-			UVD, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
+			UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
 		WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-			UVD, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
+			UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
 		WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-			UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+			UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
 	}
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
+		UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
 
 	/* cache window 2: context */
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+		UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
 		lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+		UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
 		upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
+		UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
+		UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
 
 	/* non-cache window */
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
+		UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
+		UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
+		UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
+		UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
 
 	/* VCN global tiling registers */
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+		UVD, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
 }
 
 /**
@@ -670,19 +670,19 @@ static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
 		 UVD_CGC_CTRL__VCPU_MODE_MASK |
 		 UVD_CGC_CTRL__MMSCH_MODE_MASK);
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
+		UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
 
 	/* turn off clock gating */
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_CGC_GATE), 0, sram_sel, indirect);
+		UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
 
 	/* turn on SUVD clock gating */
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
+		UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
 
 	/* turn on sw mode in UVD_SUVD_CGC_CTRL */
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
+		UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
 }
 
 /**
@@ -772,11 +772,11 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
 	tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
+		UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
 
 	/* disable master interupt */
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_MASTINT_EN), 0, 0, indirect);
+		UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
 
 	/* setup mmUVD_LMI_CTRL */
 	tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
@@ -788,28 +788,28 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
 		(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
 		0x00100000L);
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_LMI_CTRL), tmp, 0, indirect);
+		UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
 
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_MPC_CNTL),
+		UVD, 0, mmUVD_MPC_CNTL),
 		0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
 
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_MPC_SET_MUXA0),
+		UVD, 0, mmUVD_MPC_SET_MUXA0),
 		((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
 		 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
 		 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
 		 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
 
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_MPC_SET_MUXB0),
+		UVD, 0, mmUVD_MPC_SET_MUXB0),
 		((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
 		 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
 		 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
 		 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
 
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_MPC_SET_MUX),
+		UVD, 0, mmUVD_MPC_SET_MUX),
 		((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
 		 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
 		 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
@@ -817,26 +817,26 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
 	vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
 
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
+		UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
+		UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
 
 	/* enable LMI MC and UMC channels */
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_LMI_CTRL2), 0, 0, indirect);
+		UVD, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
 
 	/* unblock VCPU register access */
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
+		UVD, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
 
 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
+		UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
 
 	/* enable master interrupt */
 	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-		UVD, inst_idx, mmUVD_MASTINT_EN),
+		UVD, 0, mmUVD_MASTINT_EN),
 		UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
 
 	if (indirect)
commit 62884a7bf34379b2cacd391c510cb9d852b36560
Author: James Zhu <James.Zhu at amd.com>
Date:   Mon Jan 20 15:47:35 2020 -0500

    drm/amdgpu/vcn2.5: fix a bug for the 2nd vcn instance (v2)
    
    Fix a bug for the 2nd vcn instance at start and stop.
    
    v2: squash in unused label removal.
    
    Signed-off-by: James Zhu <James.Zhu at amd.com>
    Reviewed-by: Leo Liu <leo.liu at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index d8455b62567d..bb31258e3340 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -891,8 +891,10 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 		if (adev->vcn.harvest_config & (1 << i))
 			continue;
-		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
-			return vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
+		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+			r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
+			continue;
+		}
 
 		/* disable register anti-hang mechanism */
 		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0,
@@ -903,6 +905,9 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
 		WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp);
 	}
 
+	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+		return 0;
+
 	/*SW clock gating */
 	vcn_v2_5_disable_clock_gating(adev);
 
@@ -1294,10 +1299,9 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 		if (adev->vcn.harvest_config & (1 << i))
 			continue;
-
 		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
 			r = vcn_v2_5_stop_dpg_mode(adev, i);
-			goto power_off;
+			continue;
 		}
 
 		/* wait for vcn idle */
@@ -1349,7 +1353,6 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
 			~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
 	}
 
-power_off:
 	if (adev->pm.dpm_enabled)
 		amdgpu_dpm_enable_uvd(adev, false);
 
commit b6501217265f770957548eec818f6c08aa1ef6a1
Author: James Zhu <James.Zhu at amd.com>
Date:   Mon Jan 20 15:43:04 2020 -0500

    drm/amdgpu/vcn: Share vcn_v2_0_dec_ring_test_ring to vcn2.5
    
    Share vcn_v2_0_dec_ring_test_ring to vcn2.5 to support
    vcn software ring.
    
    Signed-off-by: James Zhu <James.Zhu at amd.com>
    Reviewed-by: Leo Liu <leo.liu at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index b8dc136d2a01..4f7216788f11 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -1624,7 +1624,7 @@ static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev,
 	return 0;
 }
 
-static int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
+int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
 	uint32_t tmp = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h
index ef749b02ded9..6c9de1882428 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h
@@ -37,6 +37,7 @@ extern void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
 				unsigned vmid, uint64_t pd_addr);
 extern void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
 				uint32_t reg, uint32_t val);
+extern int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring);
 
 extern void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring);
 extern void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 6970d3a1ae6f..d8455b62567d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -1488,7 +1488,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
 	.emit_ib = vcn_v2_0_dec_ring_emit_ib,
 	.emit_fence = vcn_v2_0_dec_ring_emit_fence,
 	.emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
-	.test_ring = amdgpu_vcn_dec_ring_test_ring,
+	.test_ring = vcn_v2_0_dec_ring_test_ring,
 	.test_ib = amdgpu_vcn_dec_ring_test_ib,
 	.insert_nop = vcn_v2_0_dec_ring_insert_nop,
 	.insert_start = vcn_v2_0_dec_ring_insert_start,
commit fa34edbed46178545285fd080c8d4d3fbc8fbc71
Author: Felix Kuehling <Felix.Kuehling at amd.com>
Date:   Fri Jan 17 20:08:42 2020 -0500

    drm/amdgpu: Use the correct flush_type in flush_gpu_tlb_pasid
    
    The flush_type was incorrectly hard-coded to 0 when calling falling back
    to MMIO-based invalidation in flush_gpu_tlb_pasid.
    
    Fixes: ea930000a6dc ("drm/amdgpu: export function to flush TLB via pasid")
    Signed-off-by: Felix Kuehling <Felix.Kuehling at amd.com>
    Reviewed-by: Christian König <christian.koenig at amd.com>
    Reviewed-by: Oak Zeng <Oak.Zeng at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 67318b75f5b8..9775eca6fe43 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -443,10 +443,10 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
 			if (all_hub) {
 				for (i = 0; i < adev->num_vmhubs; i++)
 					gmc_v10_0_flush_gpu_tlb(adev, vmid,
-							i, 0);
+							i, flush_type);
 			} else {
 				gmc_v10_0_flush_gpu_tlb(adev, vmid,
-						AMDGPU_GFXHUB_0, 0);
+						AMDGPU_GFXHUB_0, flush_type);
 			}
 			break;
 		}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 6d95de1413c4..90216abf14a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -602,10 +602,10 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
 			if (all_hub) {
 				for (i = 0; i < adev->num_vmhubs; i++)
 					gmc_v9_0_flush_gpu_tlb(adev, vmid,
-							i, 0);
+							i, flush_type);
 			} else {
 				gmc_v9_0_flush_gpu_tlb(adev, vmid,
-						AMDGPU_GFXHUB_0, 0);
+						AMDGPU_GFXHUB_0, flush_type);
 			}
 			break;
 		}
commit 37c58ddf57364d1a636850bb8ba6acbe1e16195e
Author: Felix Kuehling <Felix.Kuehling at amd.com>
Date:   Fri Jan 17 19:54:45 2020 -0500

    drm/amdgpu: Fix TLB invalidation request when using semaphore
    
    Use a more meaningful variable name for the invalidation request
    that is distinct from the tmp variable that gets overwritten when
    acquiring the invalidation semaphore.
    
    Fixes: 4ed8a03740d0 ("drm/amdgpu: invalidate mmhub semaphore workaround in gmc9/gmc10")
    Signed-off-by: Felix Kuehling <Felix.Kuehling at amd.com>
    Reviewed-by: Christian König <christian.koenig at amd.com>
    Reviewed-by: Yong Zhao <Yong.Zhao at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 0c5bf3bd640f..67318b75f5b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -262,7 +262,8 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
 {
 	bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
 	struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
-	u32 tmp = gmc_v10_0_get_invalidate_req(vmid, flush_type);
+	u32 inv_req = gmc_v10_0_get_invalidate_req(vmid, flush_type);
+	u32 tmp;
 	/* Use register 17 for GART */
 	const unsigned eng = 17;
 	unsigned int i;
@@ -289,7 +290,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
 			DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
 	}
 
-	WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
+	WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
 
 	/*
 	 * Issue a dummy read to wait for the ACK register to be cleared
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 54bdc1786ab1..6d95de1413c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -476,13 +476,13 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 {
 	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
 	const unsigned eng = 17;
-	u32 j, tmp;
+	u32 j, inv_req, tmp;
 	struct amdgpu_vmhub *hub;
 
 	BUG_ON(vmhub >= adev->num_vmhubs);
 
 	hub = &adev->vmhub[vmhub];
-	tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
+	inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
 
 	/* This is necessary for a HW workaround under SRIOV as well
 	 * as GFXOFF under bare metal
@@ -493,7 +493,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 		uint32_t req = hub->vm_inv_eng0_req + eng;
 		uint32_t ack = hub->vm_inv_eng0_ack + eng;
 
-		amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
+		amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
 				1 << vmid);
 		return;
 	}
@@ -521,7 +521,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 			DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
 	}
 
-	WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
+	WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
 
 	/*
 	 * Issue a dummy read to wait for the ACK register to be cleared
commit d478667253f988dbd6215734a11e35f6953b540d
Author: Wenjing Liu <Wenjing.Liu at amd.com>
Date:   Mon Jan 13 17:05:42 2020 -0500

    drm/amd/display: use odm combine for YCbCr420 timing with h_active greater than 4096
    
    [why]
    FMT has limitation to support YCbCr420 with h_active greater than 4096.
    
    [how]
    Use odm combine to overcome the limitation.
    
    Signed-off-by: Wenjing Liu <Wenjing.Liu at amd.com>
    Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin at amd.com>
    Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
index e7a8ac7a1f22..45f028986a8d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
@@ -38,6 +38,7 @@
 
 #define BPP_INVALID 0
 #define BPP_BLENDED_PIPE 0xffffffff
+#define DCN20_MAX_420_IMAGE_WIDTH 4096
 
 static double adjust_ReturnBW(
 		struct display_mode_lib *mode_lib,
@@ -3894,13 +3895,19 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 						&& i == mode_lib->vba.soc.num_states)
 					mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
 							* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
-				if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
-					locals->ODMCombineEnablePerState[i][k] = false;
-					mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
-				} else {
-					locals->ODMCombineEnablePerState[i][k] = true;
-					mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+
+				locals->ODMCombineEnablePerState[i][k] = false;
+				mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
+				if (mode_lib->vba.ODMCapability) {
+					if (locals->PlaneRequiredDISPCLKWithoutODMCombine > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
+						locals->ODMCombineEnablePerState[i][k] = true;
+						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+					} else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
+						locals->ODMCombineEnablePerState[i][k] = true;
+						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+					}
 				}
+
 				if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
 						&& locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]
 						&& locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 22f3b5a4b3b9..485a9c62ec58 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -39,6 +39,7 @@
 #define BPP_INVALID 0
 #define BPP_BLENDED_PIPE 0xffffffff
 #define DCN20_MAX_DSC_IMAGE_WIDTH 5184
+#define DCN20_MAX_420_IMAGE_WIDTH 4096
 
 static double adjust_ReturnBW(
 		struct display_mode_lib *mode_lib,
@@ -3935,15 +3936,22 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 						&& i == mode_lib->vba.soc.num_states)
 					mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
 							* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
-				if (mode_lib->vba.ODMCapability == false ||
-						(locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown
-							&& (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN20_MAX_DSC_IMAGE_WIDTH))) {
-					locals->ODMCombineEnablePerState[i][k] = false;
-					mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
-				} else {
-					locals->ODMCombineEnablePerState[i][k] = true;
-					mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+
+				locals->ODMCombineEnablePerState[i][k] = false;
+				mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
+				if (mode_lib->vba.ODMCapability) {
+					if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) {
+						locals->ODMCombineEnablePerState[i][k] = true;
+						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+					} else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN20_MAX_DSC_IMAGE_WIDTH)) {
+						locals->ODMCombineEnablePerState[i][k] = true;
+						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+					} else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
+						locals->ODMCombineEnablePerState[i][k] = true;
+						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+					}
 				}
+
 				if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
 						&& locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]
 						&& locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index af35b3bea909..e6617c958bb8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -65,6 +65,7 @@ typedef struct {
 #define BPP_INVALID 0
 #define BPP_BLENDED_PIPE 0xffffffff
 #define DCN21_MAX_DSC_IMAGE_WIDTH 5184
+#define DCN21_MAX_420_IMAGE_WIDTH 4096
 
 static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
 static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
@@ -3971,15 +3972,22 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 						&& i == mode_lib->vba.soc.num_states)
 					mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
 							* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
-				if (mode_lib->vba.ODMCapability == false ||
-						(locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown
-							&& (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN21_MAX_DSC_IMAGE_WIDTH))) {
-					locals->ODMCombineEnablePerState[i][k] = false;
-					mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
-				} else {
-					locals->ODMCombineEnablePerState[i][k] = true;
-					mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+
+				locals->ODMCombineEnablePerState[i][k] = false;
+				mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
+				if (mode_lib->vba.ODMCapability) {
+					if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) {
+						locals->ODMCombineEnablePerState[i][k] = true;
+						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+					} else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN21_MAX_DSC_IMAGE_WIDTH)) {
+						locals->ODMCombineEnablePerState[i][k] = true;
+						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+					} else if (locals->HActive[k] > DCN21_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
+						locals->ODMCombineEnablePerState[i][k] = true;
+						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+					}
 				}
+
 				if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
 						&& locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]
 						&& locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
commit 09f6dd6e6d4305cf199443bb7d099f4f64dc374b
Author: Lewis Huang <Lewis.Huang at amd.com>
Date:   Tue Jan 14 11:08:34 2020 +0800

    drm/amd/display: init hw i2c speed
    
    [Why]
    Driver didn't init hw i2c speed cause hdcp hw cannot
    send command, because the default value of speed register
    is 0x2.
    
    [How]
    Restore the default speed when release i2c engine
    
    Signed-off-by: Lewis Huang <Lewis.Huang at amd.com>
    Reviewed-by: Tony Cheng <Tony.Cheng at amd.com>
    Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index 49d490214060..066188ba7949 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -339,7 +339,7 @@ static void release_engine(
 	bool safe_to_reset;
 
 	/* Restore original HW engine speed */
-	REG_WRITE(SPEED, dce_i2c_hw->original_speed);
+	set_speed(dce_i2c_hw, dce_i2c_hw->default_speed);
 
 	/* Reset HW engine */
 	{
@@ -371,7 +371,6 @@ struct dce_i2c_hw *acquire_i2c_hw_engine(
 {
 	uint32_t counter = 0;
 	enum gpio_result result;
-	uint32_t current_speed;
 	struct dce_i2c_hw *dce_i2c_hw = NULL;
 
 	if (!ddc)
@@ -409,11 +408,6 @@ struct dce_i2c_hw *acquire_i2c_hw_engine(
 
 	dce_i2c_hw->ddc = ddc;
 
-	current_speed = REG_READ(SPEED);
-
-	if (current_speed)
-		dce_i2c_hw->original_speed = current_speed;
-
 	if (!setup_engine(dce_i2c_hw)) {
 		release_engine(dce_i2c_hw);
 		return NULL;
@@ -613,7 +607,6 @@ void dce_i2c_hw_construct(
 	dce_i2c_hw->buffer_used_bytes = 0;
 	dce_i2c_hw->transaction_count = 0;
 	dce_i2c_hw->engine_keep_power_up_count = 1;
-	dce_i2c_hw->original_speed = DEFAULT_I2C_HW_SPEED;
 	dce_i2c_hw->default_speed = DEFAULT_I2C_HW_SPEED;
 	dce_i2c_hw->send_reset_length = 0;
 	dce_i2c_hw->setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
index d4b2037f7d74..fb055e6883c0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
@@ -256,7 +256,6 @@ struct i2c_request_transaction_data {
 
 struct dce_i2c_hw {
 	struct ddc *ddc;
-	uint32_t original_speed;
 	uint32_t engine_keep_power_up_count;
 	uint32_t transaction_count;
 	uint32_t buffer_used_bytes;
commit 947daab2a2d1813a8d973c59551545e44bdf1549
Author: Isabel Zhang <isabel.zhang at amd.com>
Date:   Thu Jan 9 12:05:21 2020 -0500

    drm/amd/display: changed max_downscale_src_width to 4096.
    
    [Why]
    MPO isn't enabled on some 4k videos due to video source width is 4096
    and the current limit is 3840.
    
    [How]
    Changed the limit to 4096.
    
    Signed-off-by: Isabel Zhang <isabel.zhang at amd.com>
    Reviewed-by: Tony Cheng <Tony.Cheng at amd.com>
    Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 1d741bca2211..0d506d30d6b6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -830,7 +830,7 @@ static const struct dc_debug_options debug_defaults_drv = {
 		.disable_dcc = DCC_ENABLE,
 		.vsr_support = true,
 		.performance_trace = false,
-		.max_downscale_src_width = 3840,
+		.max_downscale_src_width = 4096,
 		.disable_pplib_wm_range = false,
 		.scl_reset_length10 = true,
 		.sanity_checks = true,
commit 23bc5f3404de2e8b3adaedf33507409fda6f5528
Author: Wenjing Liu <Wenjing.Liu at amd.com>
Date:   Wed Jan 8 16:59:23 2020 -0500

    drm/amd/display: support VSC SDP update on video test pattern request
    
    [why]
    MSA will be deprecated in the future.
    Need to support VSC during DP test automation.
    
    [how]
    Do not disable VSC during DP test automation.
    TODO - need to add VSC update on DM side on test request.
    
    Signed-off-by: Wenjing Liu <Wenjing.Liu at amd.com>
    Reviewed-by: Nikola Cornij <Nikola.Cornij at amd.com>
    Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 3bb1b481451b..a53e8fed56f3 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -3926,7 +3926,6 @@ bool dc_link_dp_set_test_pattern(
 		}
 	} else {
 		enum dc_color_space color_space = COLOR_SPACE_UNKNOWN;
-		struct encoder_info_frame info_frame = pipe_ctx->stream_res.encoder_info_frame;
 
 		switch (test_pattern_color_space) {
 		case DP_TEST_PATTERN_COLOR_SPACE_RGB:
@@ -3951,13 +3950,9 @@ bool dc_link_dp_set_test_pattern(
 		/* update MSA to requested color space */
 		pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc,
 				&pipe_ctx->stream->timing,
-				color_space, false, link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
-
-		/* disable vsc so no need to update it based on request */
-		info_frame.vsc.valid = false;
-		pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(
-				pipe_ctx->stream_res.stream_enc,
-				&info_frame);
+				color_space,
+				pipe_ctx->stream->use_vsc_sdp_for_colorimetry,
+				link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
 
 		/* CRTC Patterns */
 		set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);
commit e5f0b5214beea71bd25584da451897300742795e
Author: Nicholas Kazlauskas <nicholas.kazlauskas at amd.com>
Date:   Mon Jan 13 09:00:46 2020 -0500

    drm/amd/display: Check hw_init state when determining if DMCUB is initialized
    
    [Why]
    CW2 may already be programmed when coming back from S4. In this case
    we want to unconditionally replace whatever DMCUB version is currently
    enabled with the latest.
    
    [How]
    Check the hw_init flag to know whether or not we've previously executed
    the initliazed routine.
    
    Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas at amd.com>
    Reviewed-by: Tony Cheng <Tony.Cheng at amd.com>
    Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index ea99d4cf34cc..85a518bf8a76 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -312,6 +312,9 @@ enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init)
 	if (!dmub->sw_init)
 		return DMUB_STATUS_INVALID;
 
+	if (!dmub->hw_init)
+		return DMUB_STATUS_OK;
+
 	if (dmub->hw_funcs.is_hw_init)
 		*is_hw_init = dmub->hw_funcs.is_hw_init(dmub);
 
commit a7da96fbccc682a67bcc15f3461cdd60a622ffaf
Author: Nicholas Kazlauskas <nicholas.kazlauskas at amd.com>
Date:   Mon Jan 13 08:58:00 2020 -0500

    drm/amd/display: Reset inbox rptr/wptr when resetting DMCUB
    
    [Why]
    These logically make sense more to be set after the DMCUB has been
    reset rather than when we setup the inbox.
    
    [How]
    Move them into the reset callback.
    
    Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas at amd.com>
    Reviewed-by: Tony Cheng <Tony.Cheng at amd.com>
    Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index 9229012b93e2..b2ca8e0dbac9 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -80,6 +80,8 @@ void dmub_dcn20_reset(struct dmub_srv *dmub)
 	REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 1);
 	REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
 	REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+	REG_WRITE(DMCUB_INBOX1_RPTR, 0);
+	REG_WRITE(DMCUB_INBOX1_WPTR, 0);
 }
 
 void dmub_dcn20_reset_release(struct dmub_srv *dmub)
@@ -190,8 +192,6 @@ void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
 
 	REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, 0x80000000);
 	REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base);
-	REG_WRITE(DMCUB_INBOX1_RPTR, 0);
-	REG_WRITE(DMCUB_INBOX1_WPTR, 0);
 }
 
 uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub)
commit cc934031dc2a0be9535477dbef4b0e94c9f06ecd
Author: Nicholas Kazlauskas <nicholas.kazlauskas at amd.com>
Date:   Sat Jan 11 13:43:39 2020 -0500

    drm/amd/display: Call ATOM_INIT instead of ATOM_ENABLE for DMCUB
    
    [Why]
    DMCUB command table doesn't support ATOM_ENABLE/ATOM_DISABLE anymore
    so we never end up calling the DCN init path in DMCUB.
    
    [How]
    Map ATOM_ENABLE to ATOM_INIT only for DMCUB command table offloading.
    
    Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas at amd.com>
    Reviewed-by: Tony Cheng <Tony.Cheng at amd.com>
    Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index c4ba6e84db65..629a07a2719b 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -711,6 +711,10 @@ static void enable_disp_power_gating_dmcub(
 	power_gating.header.sub_type = DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING;
 	power_gating.power_gating.pwr = *pwr;
 
+	/* ATOM_ENABLE is old API in DMUB */
+	if (power_gating.power_gating.pwr.enable == ATOM_ENABLE)
+		power_gating.power_gating.pwr.enable = ATOM_INIT;
+
 	dc_dmub_srv_cmd_queue(dmcub, &power_gating.header);
 	dc_dmub_srv_cmd_execute(dmcub);
 	dc_dmub_srv_wait_idle(dmcub);
commit 0167da498d57dddf31eb30516d1fae0f31ddecb6
Author: Nicholas Kazlauskas <nicholas.kazlauskas at amd.com>
Date:   Thu Jan 9 14:41:22 2020 -0500

    drm/amd/display: Add hardware reset interface for DMUB service
    
    [Why]
    We'll need this to perform a clean shutdown before unloading the driver.
    
    [How]
    It will call reset internally and set hw_init to false. It won't do
    anything if the hardware isn't initialized.
    
    Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas at amd.com>
    Reviewed-by: Tony Cheng <Tony.Cheng at amd.com>
    Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
index 287fb9a36a64..f8917594036a 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
@@ -418,6 +418,21 @@ enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init);
 enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
 				  const struct dmub_srv_hw_params *params);
 
+/**
+ * dmub_srv_hw_reset() - puts the DMUB hardware in reset state if initialized
+ * @dmub: the dmub service
+ *
+ * Before destroying the DMUB service or releasing the backing framebuffer
+ * memory we'll need to put the DMCUB into reset first.
+ *
+ * A subsequent call to dmub_srv_hw_init() will re-enable the DMCUB.
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub);
+
 /**
  * dmub_srv_cmd_queue() - queues a command to the DMUB
  * @dmub: the dmub service
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index dee676335d73..ea99d4cf34cc 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -415,6 +415,22 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
 	return DMUB_STATUS_OK;
 }
 
+enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
+{
+	if (!dmub->sw_init)
+		return DMUB_STATUS_INVALID;
+
+	if (dmub->hw_init == false)
+		return DMUB_STATUS_OK;
+
+	if (dmub->hw_funcs.reset)
+		dmub->hw_funcs.reset(dmub);
+
+	dmub->hw_init = false;
+
+	return DMUB_STATUS_OK;
+}
+
 enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
 				    const struct dmub_cmd_header *cmd)
 {
commit 7527791e1fbd595a294cf6e6f41999d8acf6c43f
Author: Roman Li <roman.li at amd.com>
Date:   Mon Jan 13 10:26:19 2020 -0500

    drm/amd/display: Fix update type for multiple planes
    
    [Why]
    determine_update_type_for_commit() uses pointers to single instance
    of local variable to fill scaling/color info for all planes updates.
    This is a bug, that leads to incorrect update type for commit in case
    of multiple planes per crtc.
    Each plane should refer to separate scaling/color data.
    
    [How]
    Use arrays for plane properties.
    Bundle all properties into a single structure to simplify memory allocation.
    
    Signed-off-by: Roman Li <roman.li at amd.com>
    Reviewed-by: Nicholas Kazlauskas <Nicholas.Kazlauskas at amd.com>
    Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 3af014fcdcc1..2ac349849081 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -7759,24 +7759,27 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
 	struct drm_crtc_state *new_crtc_state, *old_crtc_state;
 	struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
 	struct dc_stream_status *status = NULL;
-
-	struct dc_surface_update *updates;
 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
+	struct surface_info_bundle {
+		struct dc_surface_update surface_updates[MAX_SURFACES];
+		struct dc_plane_info plane_infos[MAX_SURFACES];
+		struct dc_scaling_info scaling_infos[MAX_SURFACES];
+		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
+		struct dc_stream_update stream_update;
+	} *bundle;
 
-	updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL);
+	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
 
-	if (!updates) {
-		DRM_ERROR("Failed to allocate plane updates\n");
+	if (!bundle) {
+		DRM_ERROR("Failed to allocate update bundle\n");
 		/* Set type to FULL to avoid crashing in DC*/
 		update_type = UPDATE_TYPE_FULL;
 		goto cleanup;
 	}
 
 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-		struct dc_scaling_info scaling_info;
-		struct dc_stream_update stream_update;
 
-		memset(&stream_update, 0, sizeof(stream_update));
+		memset(bundle, 0, sizeof(struct surface_info_bundle));
 
 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
 		old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
@@ -7793,8 +7796,9 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
 			const struct amdgpu_framebuffer *amdgpu_fb =
 				to_amdgpu_framebuffer(new_plane_state->fb);
-			struct dc_plane_info plane_info;
-			struct dc_flip_addrs flip_addr;
+			struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
+			struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
+			struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
 			uint64_t tiling_flags;
 
 			new_plane_crtc = new_plane_state->crtc;
@@ -7812,49 +7816,48 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
 			if (crtc != new_plane_crtc)
 				continue;
 
-			updates[num_plane].surface = new_dm_plane_state->dc_state;
+			bundle->surface_updates[num_plane].surface =
+					new_dm_plane_state->dc_state;
 
 			if (new_crtc_state->mode_changed) {
-				stream_update.dst = new_dm_crtc_state->stream->dst;
-				stream_update.src = new_dm_crtc_state->stream->src;
+				bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
+				bundle->stream_update.src = new_dm_crtc_state->stream->src;
 			}
 
 			if (new_crtc_state->color_mgmt_changed) {
-				updates[num_plane].gamma =
+				bundle->surface_updates[num_plane].gamma =
 						new_dm_plane_state->dc_state->gamma_correction;
-				updates[num_plane].in_transfer_func =
+				bundle->surface_updates[num_plane].in_transfer_func =
 						new_dm_plane_state->dc_state->in_transfer_func;
-				stream_update.gamut_remap =
+				bundle->stream_update.gamut_remap =
 						&new_dm_crtc_state->stream->gamut_remap_matrix;
-				stream_update.output_csc_transform =
+				bundle->stream_update.output_csc_transform =
 						&new_dm_crtc_state->stream->csc_color_matrix;
-				stream_update.out_transfer_func =
+				bundle->stream_update.out_transfer_func =
 						new_dm_crtc_state->stream->out_transfer_func;
 			}
 
 			ret = fill_dc_scaling_info(new_plane_state,
-						   &scaling_info);
+						   scaling_info);
 			if (ret)
 				goto cleanup;
 
-			updates[num_plane].scaling_info = &scaling_info;
+			bundle->surface_updates[num_plane].scaling_info = scaling_info;
 
 			if (amdgpu_fb) {
 				ret = get_fb_info(amdgpu_fb, &tiling_flags);
 				if (ret)
 					goto cleanup;
 
-				memset(&flip_addr, 0, sizeof(flip_addr));
-
 				ret = fill_dc_plane_info_and_addr(
 					dm->adev, new_plane_state, tiling_flags,
-					&plane_info,
-					&flip_addr.address);
+					plane_info,
+					&flip_addr->address);
 				if (ret)
 					goto cleanup;
 
-				updates[num_plane].plane_info = &plane_info;
-				updates[num_plane].flip_addr = &flip_addr;
+				bundle->surface_updates[num_plane].plane_info = plane_info;
+				bundle->surface_updates[num_plane].flip_addr = flip_addr;
 			}
 
 			num_plane++;
@@ -7875,14 +7878,15 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
 
 		status = dc_stream_get_status_from_state(old_dm_state->context,
 							 new_dm_crtc_state->stream);
-		stream_update.stream = new_dm_crtc_state->stream;
+		bundle->stream_update.stream = new_dm_crtc_state->stream;
 		/*
 		 * TODO: DC modifies the surface during this call so we need
 		 * to lock here - find a way to do this without locking.
 		 */
 		mutex_lock(&dm->dc_lock);
-		update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
-								  &stream_update, status);
+		update_type = dc_check_update_surfaces_for_stream(
+				dc,	bundle->surface_updates, num_plane,
+				&bundle->stream_update, status);
 		mutex_unlock(&dm->dc_lock);
 
 		if (update_type > UPDATE_TYPE_MED) {
@@ -7892,7 +7896,7 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
 	}
 
 cleanup:
-	kfree(updates);
+	kfree(bundle);
 
 	*out_type = update_type;
 	return ret;
commit 022205ffbb8fc48bb12c7941f07c6e19bdf11155
Author: Brandon Syu <Brandon.Syu at amd.com>
Date:   Thu Jan 9 14:19:20 2020 +0800

    drm/amd/display: fix rotation_angle to use enum values
    
    [Why]
    Hardcoded fixed values are not proper.
    
    [How]
    Use enum values instead of fixed numbers.
    
    Signed-off-by: Brandon Syu <Brandon.Syu at amd.com>
    Reviewed-by: Eric Yang <eric.yang2 at amd.com>
    Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index da63fc53cc4a..cf09b9335728 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -261,28 +261,28 @@ static void hubp21_apply_PLAT_54186_wa(
 			address->video_progressive.luma_addr.high_part == 0xf4)
 		return;
 
-	if ((rotation_angle == 0 || rotation_angle == 180)
+	if ((rotation_angle == ROTATION_ANGLE_0 || rotation_angle == ROTATION_ANGLE_180)
 			&& viewport_c_height <= 512)
 		return;
 
-	if ((rotation_angle == 90 || rotation_angle == 270)
+	if ((rotation_angle == ROTATION_ANGLE_90 || rotation_angle == ROTATION_ANGLE_270)
 				&& viewport_c_width <= 512)
 		return;
 
 	switch (rotation_angle) {
-	case 0: /* 0 degree rotation */
+	case ROTATION_ANGLE_0: /* 0 degree rotation */
 		row_height = 128;
 		patched_viewport_height = (viewport_c_height / row_height + 1) * row_height + 1;
 		patched_viewport_width = viewport_c_width;
 		hubp21->PLAT_54186_wa_chroma_addr_offset = 0;
 		break;
-	case 2: /* 180 degree rotation */
+	case ROTATION_ANGLE_180: /* 180 degree rotation */
 		row_height = 128;
 		patched_viewport_height = viewport_c_height + row_height;
 		patched_viewport_width = viewport_c_width;
 		hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - chroma_pitch * row_height * chroma_bpe;
 		break;
-	case 1: /* 90 degree rotation */
+	case ROTATION_ANGLE_90: /* 90 degree rotation */
 		row_height = 256;
 		if (h_mirror_en) {
 			patched_viewport_height = viewport_c_height;
@@ -294,7 +294,7 @@ static void hubp21_apply_PLAT_54186_wa(
 			hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - tile_blk_size;
 		}
 		break;
-	case 3:	/* 270 degree rotation */
+	case ROTATION_ANGLE_270: /* 270 degree rotation */
 		row_height = 256;
 		if (h_mirror_en) {
 			patched_viewport_height = viewport_c_height;
commit 2c61704bcaeed3b51dd48f97003533f40dbf3050
Author: Aric Cyr <aric.cyr at amd.com>
Date:   Mon Jan 13 08:40:00 2020 -0500

    drm/amd/display: 3.2.69
    
    Signed-off-by: Aric Cyr <aric.cyr at amd.com>
    Reviewed-by: Aric Cyr <Aric.Cyr at amd.com>
    Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 7d31dcb9e37f..8ff25b5dd2f6 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
 #include "inc/hw/dmcu.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.2.68"
+#define DC_VER "3.2.69"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
commit 9eeec26a13392d6efb5215c26b5e9965ef7ac2af
Author: Lewis Huang <Lewis.Huang at amd.com>
Date:   Sat Jan 11 00:51:15 2020 +0800

    drm/amd/display: Refine i2c frequency calculating sequence
    
    [Why]
    In HG mode, vbios didn't call DispController_Init to program NV1x
    XTAL_REF_DIV value when ASIC_INIT, but driver read XTAL_REF_DIV
    to calculate i2c reference frequency. it cause i2c frequency change
    from 100kHz to 200kHz.
    
    [How]
    remove get_speed function and calculate reference frequency at
    set_speed functiton.
    
    Signed-off-by: Lewis Huang <Lewis.Huang at amd.com>
    Reviewed-by: Jun Lei <Jun.Lei at amd.com>
    Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index 1cd4d8fc361f..49d490214060 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -100,20 +100,6 @@ static uint32_t get_hw_buffer_available_size(
 			dce_i2c_hw->buffer_used_bytes;
 }
 
-static uint32_t get_speed(
-	const struct dce_i2c_hw *dce_i2c_hw)
-{
-	uint32_t pre_scale = 0;
-
-	REG_GET(SPEED, DC_I2C_DDC1_PRESCALE, &pre_scale);
-
-	/* [anaumov] it seems following is unnecessary */
-	/*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/
-	return pre_scale ?
-		dce_i2c_hw->reference_frequency / pre_scale :
-		dce_i2c_hw->default_speed;
-}
-
 static void process_channel_reply(
 	struct dce_i2c_hw *dce_i2c_hw,
 	struct i2c_payload *reply)
@@ -278,16 +264,25 @@ static void set_speed(
 	struct dce_i2c_hw *dce_i2c_hw,
 	uint32_t speed)
 {
+	uint32_t xtal_ref_div = 0;
+	uint32_t prescale = 0;
+
+	REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
+
+	if (xtal_ref_div == 0)
+		xtal_ref_div = 2;
+
+	prescale = ((dce_i2c_hw->reference_frequency * 2) / xtal_ref_div) / speed;
 
 	if (speed) {
 		if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
 			REG_UPDATE_N(SPEED, 3,
-				     FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed,
+				     FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
 				     FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2,
 				     FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1);
 		else
 			REG_UPDATE_N(SPEED, 2,
-				     FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed,
+				     FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
 				     FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
 	}
 }
@@ -344,9 +339,7 @@ static void release_engine(
 	bool safe_to_reset;
 
 	/* Restore original HW engine speed */
-
-	set_speed(dce_i2c_hw, dce_i2c_hw->original_speed);
-
+	REG_WRITE(SPEED, dce_i2c_hw->original_speed);
 
 	/* Reset HW engine */
 	{
@@ -416,7 +409,7 @@ struct dce_i2c_hw *acquire_i2c_hw_engine(
 
 	dce_i2c_hw->ddc = ddc;
 
-	current_speed = get_speed(dce_i2c_hw);
+	current_speed = REG_READ(SPEED);
 
 	if (current_speed)
 		dce_i2c_hw->original_speed = current_speed;
@@ -478,13 +471,9 @@ static void submit_channel_request_hw(
 
 static uint32_t get_transaction_timeout_hw(
 	const struct dce_i2c_hw *dce_i2c_hw,
-	uint32_t length)
+	uint32_t length,
+	uint32_t speed)
 {
-
-	uint32_t speed = get_speed(dce_i2c_hw);
-
-
-
 	uint32_t period_timeout;
 	uint32_t num_of_clock_stretches;
 
@@ -504,7 +493,8 @@ static uint32_t get_transaction_timeout_hw(
 bool dce_i2c_hw_engine_submit_payload(
 	struct dce_i2c_hw *dce_i2c_hw,
 	struct i2c_payload *payload,
-	bool middle_of_transaction)
+	bool middle_of_transaction,
+	uint32_t speed)
 {
 
 	struct i2c_request_transaction_data request;
@@ -542,7 +532,7 @@ bool dce_i2c_hw_engine_submit_payload(
 	/* obtain timeout value before submitting request */
 
 	transaction_timeout = get_transaction_timeout_hw(
-		dce_i2c_hw, payload->length + 1);
+		dce_i2c_hw, payload->length + 1, speed);
 
 	submit_channel_request_hw(
 		dce_i2c_hw, &request);
@@ -588,13 +578,11 @@ bool dce_i2c_submit_command_hw(
 		struct i2c_payload *payload = cmd->payloads + index_of_payload;
 
 		if (!dce_i2c_hw_engine_submit_payload(
-				dce_i2c_hw, payload, mot)) {
+				dce_i2c_hw, payload, mot, cmd->speed)) {
 			result = false;
 			break;
 		}
 
-
-
 		++index_of_payload;
 	}
 
@@ -640,9 +628,6 @@ void dce100_i2c_hw_construct(
 	const struct dce_i2c_shift *shifts,
 	const struct dce_i2c_mask *masks)
 {
-
-	uint32_t xtal_ref_div = 0;
-
 	dce_i2c_hw_construct(dce_i2c_hw,
 			ctx,
 			engine_id,
@@ -650,21 +635,6 @@ void dce100_i2c_hw_construct(
 			shifts,
 			masks);
 	dce_i2c_hw->buffer_size = I2C_HW_BUFFER_SIZE_DCE100;
-
-	REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
-
-	if (xtal_ref_div == 0)
-		xtal_ref_div = 2;
-
-	/*Calculating Reference Clock by divding original frequency by
-	 * XTAL_REF_DIV.
-	 * At upper level, uint32_t reference_frequency =
-	 *  dal_dce_i2c_get_reference_clock(as) >> 1
-	 *  which already divided by 2. So we need x2 to get original
-	 *  reference clock from ppll_info
-	 */
-	dce_i2c_hw->reference_frequency =
-		(dce_i2c_hw->reference_frequency * 2) / xtal_ref_div;
 }
 
 void dce112_i2c_hw_construct(
commit 887ff121987d1bf56ce7c126731a396d4e29a53c
Author: Anthony Koo <Anthony.Koo at amd.com>
Date:   Fri Jan 10 14:44:28 2020 -0500

    drm/amd/display: Refactor to remove diags specific rgam func
    
    [Why]
    It has duplicate code for building regamma curve
    
    [How]
    Remove the duplicate code and use the same function for building regamma
    
    Signed-off-by: Anthony Koo <Anthony.Koo at amd.com>
    Reviewed-by: Aric Cyr <Aric.Cyr at amd.com>
    Reviewed-by: Krunoslav Kovac <Krunoslav.Kovac at amd.com>
    Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 1b278c42809a..cac09d500fda 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1673,129 +1673,6 @@ static bool map_regamma_hw_to_x_user(
 
 #define _EXTRA_POINTS 3
 
-bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
-		const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
-		const struct freesync_hdr_tf_params *fs_params)
-{
-	struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
-	struct dividers dividers;
-
-	struct pwl_float_data *rgb_user = NULL;
-	struct pwl_float_data_ex *rgb_regamma = NULL;
-	struct gamma_pixel *axis_x = NULL;
-	struct pixel_gamma_point *coeff = NULL;
-	enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
-	bool ret = false;
-
-	if (output_tf->type == TF_TYPE_BYPASS)
-		return false;
-
-	/* we can use hardcoded curve for plain SRGB TF */
-	if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true &&
-			output_tf->tf == TRANSFER_FUNCTION_SRGB) {
-		if (ramp == NULL)
-			return true;
-		if ((ramp->is_identity && ramp->type != GAMMA_CS_TFM_1D) ||
-				(!mapUserRamp && ramp->type == GAMMA_RGB_256))
-			return true;
-	}
-
-	output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
-
-	if (ramp && ramp->type != GAMMA_CS_TFM_1D &&
-			(mapUserRamp || ramp->type != GAMMA_RGB_256)) {
-		rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
-			    sizeof(*rgb_user),
-			    GFP_KERNEL);
-		if (!rgb_user)
-			goto rgb_user_alloc_fail;
-
-		axis_x = kvcalloc(ramp->num_entries + 3, sizeof(*axis_x),
-				GFP_KERNEL);
-		if (!axis_x)
-			goto axis_x_alloc_fail;
-
-		dividers.divider1 = dc_fixpt_from_fraction(3, 2);
-		dividers.divider2 = dc_fixpt_from_int(2);
-		dividers.divider3 = dc_fixpt_from_fraction(5, 2);
-
-		build_evenly_distributed_points(
-				axis_x,
-				ramp->num_entries,
-				dividers);
-
-		if (ramp->type == GAMMA_RGB_256 && mapUserRamp)
-			scale_gamma(rgb_user, ramp, dividers);
-		else if (ramp->type == GAMMA_RGB_FLOAT_1024)
-			scale_gamma_dx(rgb_user, ramp, dividers);
-	}
-
-	rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
-			       sizeof(*rgb_regamma),
-			       GFP_KERNEL);
-	if (!rgb_regamma)
-		goto rgb_regamma_alloc_fail;
-
-	coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff),
-			 GFP_KERNEL);
-	if (!coeff)
-		goto coeff_alloc_fail;
-
-	tf = output_tf->tf;
-	if (tf == TRANSFER_FUNCTION_PQ) {
-		tf_pts->end_exponent = 7;
-		tf_pts->x_point_at_y1_red = 125;
-		tf_pts->x_point_at_y1_green = 125;
-		tf_pts->x_point_at_y1_blue = 125;
-
-		build_pq(rgb_regamma,
-				MAX_HW_POINTS,
-				coordinates_x,
-				output_tf->sdr_ref_white_level);
-	} else if (tf == TRANSFER_FUNCTION_GAMMA22 &&
-			fs_params != NULL && fs_params->skip_tm == 0) {
-		build_freesync_hdr(rgb_regamma,
-				MAX_HW_POINTS,
-				coordinates_x,
-				fs_params);
-	} else if (tf == TRANSFER_FUNCTION_HLG) {
-		build_freesync_hdr(rgb_regamma,
-				MAX_HW_POINTS,
-				coordinates_x,
-				fs_params);
-
-	} else {
-		tf_pts->end_exponent = 0;
-		tf_pts->x_point_at_y1_red = 1;
-		tf_pts->x_point_at_y1_green = 1;
-		tf_pts->x_point_at_y1_blue = 1;
-
-		build_regamma(rgb_regamma,
-				MAX_HW_POINTS,
-				coordinates_x, tf);
-	}
-	map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
-			coordinates_x, axis_x, rgb_regamma,
-			MAX_HW_POINTS, tf_pts,
-			(mapUserRamp || (ramp && ramp->type != GAMMA_RGB_256)) &&
-			(ramp && ramp->type != GAMMA_CS_TFM_1D));
-
-	if (ramp && ramp->type == GAMMA_CS_TFM_1D)
-		apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
-
-	ret = true;
-
-	kvfree(coeff);
-coeff_alloc_fail:
-	kvfree(rgb_regamma);
-rgb_regamma_alloc_fail:
-	kvfree(axis_x);
-axis_x_alloc_fail:
-	kvfree(rgb_user);
-rgb_user_alloc_fail:
-	return ret;
-}
-
 bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
 		const struct regamma_lut *regamma)
 {
@@ -2043,14 +1920,14 @@ rgb_user_alloc_fail:
 	return ret;
 }
 
-
-bool  mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
+static bool calculate_curve(enum dc_transfer_func_predefined trans,
 				struct dc_transfer_func_distributed_points *points,
+				struct pwl_float_data_ex *rgb_regamma,
+				const struct freesync_hdr_tf_params *fs_params,
 				uint32_t sdr_ref_white_level)
 {
 	uint32_t i;
 	bool ret = false;
-	struct pwl_float_data_ex *rgb_regamma = NULL;
 
 	if (trans == TRANSFER_FUNCTION_UNITY ||
 		trans == TRANSFER_FUNCTION_LINEAR) {
@@ -2060,68 +1937,33 @@ bool  mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
 		points->x_point_at_y1_blue = 1;
 
 		for (i = 0; i <= MAX_HW_POINTS ; i++) {
-			points->red[i]    = coordinates_x[i].x;
-			points->green[i]  = coordinates_x[i].x;
-			points->blue[i]   = coordinates_x[i].x;
+			rgb_regamma[i].r = coordinates_x[i].x;
+			rgb_regamma[i].g = coordinates_x[i].x;
+			rgb_regamma[i].b = coordinates_x[i].x;
 		}
+
 		ret = true;
 	} else if (trans == TRANSFER_FUNCTION_PQ) {
-		rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
-				       sizeof(*rgb_regamma),
-				       GFP_KERNEL);
-		if (!rgb_regamma)
-			goto rgb_regamma_alloc_fail;
 		points->end_exponent = 7;
 		points->x_point_at_y1_red = 125;
 		points->x_point_at_y1_green = 125;
 		points->x_point_at_y1_blue = 125;
 
-
 		build_pq(rgb_regamma,
 				MAX_HW_POINTS,
 				coordinates_x,
 				sdr_ref_white_level);
-		for (i = 0; i <= MAX_HW_POINTS ; i++) {
-			points->red[i]    = rgb_regamma[i].r;
-			points->green[i]  = rgb_regamma[i].g;
-			points->blue[i]   = rgb_regamma[i].b;
-		}
-		ret = true;
-
-		kvfree(rgb_regamma);
-	} else if (trans == TRANSFER_FUNCTION_SRGB ||
-		trans == TRANSFER_FUNCTION_BT709 ||
-		trans == TRANSFER_FUNCTION_GAMMA22 ||
-		trans == TRANSFER_FUNCTION_GAMMA24 ||
-		trans == TRANSFER_FUNCTION_GAMMA26) {
-		rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
-				       sizeof(*rgb_regamma),
-				       GFP_KERNEL);
-		if (!rgb_regamma)
-			goto rgb_regamma_alloc_fail;
-		points->end_exponent = 0;
-		points->x_point_at_y1_red = 1;
-		points->x_point_at_y1_green = 1;
-		points->x_point_at_y1_blue = 1;
 
-		build_regamma(rgb_regamma,
+		ret = true;
+	} else if (trans == TRANSFER_FUNCTION_GAMMA22 &&
+			fs_params != NULL && fs_params->skip_tm == 0) {
+		build_freesync_hdr(rgb_regamma,
 				MAX_HW_POINTS,
 				coordinates_x,
-				trans);
-		for (i = 0; i <= MAX_HW_POINTS ; i++) {
-			points->red[i]    = rgb_regamma[i].r;
-			points->green[i]  = rgb_regamma[i].g;
-			points->blue[i]   = rgb_regamma[i].b;
-		}
-		ret = true;
+				fs_params);
 
-		kvfree(rgb_regamma);
+		ret = true;
 	} else if (trans == TRANSFER_FUNCTION_HLG) {
-		rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
-				       sizeof(*rgb_regamma),
-				       GFP_KERNEL);
-		if (!rgb_regamma)
-			goto rgb_regamma_alloc_fail;
 		points->end_exponent = 4;
 		points->x_point_at_y1_red = 12;
 		points->x_point_at_y1_green = 12;
@@ -2131,18 +1973,127 @@ bool  mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
 				MAX_HW_POINTS,
 				coordinates_x,
 				80, 1000);
-		for (i = 0; i <= MAX_HW_POINTS ; i++) {
-			points->red[i]    = rgb_regamma[i].r;
-			points->green[i]  = rgb_regamma[i].g;
-			points->blue[i]   = rgb_regamma[i].b;
-		}
+
+		ret = true;
+	} else {
+		// trans == TRANSFER_FUNCTION_SRGB
+		// trans == TRANSFER_FUNCTION_BT709
+		// trans == TRANSFER_FUNCTION_GAMMA22
+		// trans == TRANSFER_FUNCTION_GAMMA24
+		// trans == TRANSFER_FUNCTION_GAMMA26
+		points->end_exponent = 0;
+		points->x_point_at_y1_red = 1;
+		points->x_point_at_y1_green = 1;
+		points->x_point_at_y1_blue = 1;
+
+		build_regamma(rgb_regamma,
+				MAX_HW_POINTS,
+				coordinates_x,
+				trans);
+
 		ret = true;
-		kvfree(rgb_regamma);
 	}
-rgb_regamma_alloc_fail:
+
 	return ret;
 }
 
+bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
+		const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
+		const struct freesync_hdr_tf_params *fs_params)
+{
+	struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
+	struct dividers dividers;
+
+	struct pwl_float_data *rgb_user = NULL;
+	struct pwl_float_data_ex *rgb_regamma = NULL;
+	struct gamma_pixel *axis_x = NULL;
+	struct pixel_gamma_point *coeff = NULL;
+	enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
+	bool ret = false;
+
+	if (output_tf->type == TF_TYPE_BYPASS)
+		return false;
+
+	/* we can use hardcoded curve for plain SRGB TF */
+	if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true &&
+			output_tf->tf == TRANSFER_FUNCTION_SRGB) {
+		if (ramp == NULL)
+			return true;
+		if ((ramp->is_identity && ramp->type != GAMMA_CS_TFM_1D) ||
+				(!mapUserRamp && ramp->type == GAMMA_RGB_256))
+			return true;
+	}
+
+	output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+
+	if (ramp && ramp->type != GAMMA_CS_TFM_1D &&
+			(mapUserRamp || ramp->type != GAMMA_RGB_256)) {
+		rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
+			    sizeof(*rgb_user),
+			    GFP_KERNEL);
+		if (!rgb_user)
+			goto rgb_user_alloc_fail;
+
+		axis_x = kvcalloc(ramp->num_entries + 3, sizeof(*axis_x),
+				GFP_KERNEL);
+		if (!axis_x)
+			goto axis_x_alloc_fail;
+
+		dividers.divider1 = dc_fixpt_from_fraction(3, 2);
+		dividers.divider2 = dc_fixpt_from_int(2);
+		dividers.divider3 = dc_fixpt_from_fraction(5, 2);
+
+		build_evenly_distributed_points(
+				axis_x,
+				ramp->num_entries,
+				dividers);
+
+		if (ramp->type == GAMMA_RGB_256 && mapUserRamp)
+			scale_gamma(rgb_user, ramp, dividers);
+		else if (ramp->type == GAMMA_RGB_FLOAT_1024)
+			scale_gamma_dx(rgb_user, ramp, dividers);
+	}
+
+	rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
+			       sizeof(*rgb_regamma),
+			       GFP_KERNEL);
+	if (!rgb_regamma)
+		goto rgb_regamma_alloc_fail;
+
+	coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff),
+			 GFP_KERNEL);
+	if (!coeff)
+		goto coeff_alloc_fail;
+
+	tf = output_tf->tf;
+
+	ret = calculate_curve(tf,
+			tf_pts,
+			rgb_regamma,
+			fs_params,
+			output_tf->sdr_ref_white_level);
+
+	if (ret) {
+		map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
+				coordinates_x, axis_x, rgb_regamma,
+				MAX_HW_POINTS, tf_pts,
+				(mapUserRamp || (ramp && ramp->type != GAMMA_RGB_256)) &&
+				(ramp && ramp->type != GAMMA_CS_TFM_1D));
+
+		if (ramp && ramp->type == GAMMA_CS_TFM_1D)
+			apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
+	}
+
+	kvfree(coeff);
+coeff_alloc_fail:
+	kvfree(rgb_regamma);
+rgb_regamma_alloc_fail:
+	kvfree(axis_x);
+axis_x_alloc_fail:
+	kvfree(rgb_user);
+rgb_user_alloc_fail:
+	return ret;
+}
 
 bool  mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
 				struct dc_transfer_func_distributed_points *points)
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
index 44ddea58523a..9994817a9a03 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
@@ -103,10 +103,6 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
 bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf,
 		const struct dc_gamma *ramp, bool mapUserRamp);
 
-bool mod_color_calculate_curve(enum dc_transfer_func_predefined  trans,
-		struct dc_transfer_func_distributed_points *points,
-		uint32_t sdr_ref_white_level);
-
 bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
 				struct dc_transfer_func_distributed_points *points);
 
commit 819d4b3fbfaf40b4537e80b01aef612e4140a91b
Author: Jerry (Fangzhi) Zuo <Jerry.Zuo at amd.com>
Date:   Thu Dec 12 10:37:35 2019 -0500

    drm/amd/display: Fix DML dummyinteger types mismatch
    
    [Why]
    The types for dummyinteger1 and dummyinteger2 are unsigned
    as part of the DML spec. They should not be long.
    
    [How]
    Make them unsigned int instead of long.
    
    Signed-off-by: Jerry (Fangzhi) Zuo <Jerry.Zuo at amd.com>
    Reviewed-by: Bhawanpreet Lakha <Bhawanpreet.Lakha at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index e7a44df676ca..2875efd85467 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -763,8 +763,8 @@ struct vba_vars_st {
 	double SwathWidthC[DC__NUM_DPP__MAX];
 	unsigned int BytePerPixelY[DC__NUM_DPP__MAX];
 	unsigned int BytePerPixelC[DC__NUM_DPP__MAX];
-	long dummyinteger1;
-	long dummyinteger2;
+	unsigned int dummyinteger1;
+	unsigned int dummyinteger2;
 	double FinalDRAMClockChangeLatency;
 	double Tdmdl_vm[DC__NUM_DPP__MAX];
 	double Tdmdl[DC__NUM_DPP__MAX];
commit bb6785c1212988c76c21cd1d4647c31545231e8b
Author: Nicholas Kazlauskas <nicholas.kazlauskas at amd.com>
Date:   Fri Jan 10 09:17:58 2020 -0500

    drm/amd/display: Do DMCUB hw_init before DC
    
    [Why]
    For DMCUB enabled hardware DC has a dependency on DMCUB already being
    running.
    
    Command table offloading will fail on first modeset if DMCUB isn't
    initialized first.
    
    [How]
    Perform DMCUB hardware initialization before DC.
    
    Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas at amd.com>
    Reviewed-by: Hersen Wu <hersenxs.wu at amd.com>
    Reviewed-by: Tony Cheng <Tony.Cheng at amd.com>
    Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 9402374d2466..3af014fcdcc1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -940,14 +940,14 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 		goto error;
 	}
 
-	dc_hardware_init(adev->dm.dc);
-
 	r = dm_dmub_hw_init(adev);
 	if (r) {
 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
 		goto error;
 	}
 
+	dc_hardware_init(adev->dm.dc);
+
 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
 	if (!adev->dm.freesync_module) {
 		DRM_ERROR(
commit 5ea3985098048ba6850547c816208e5c2a40bef4
Author: Haiyi Zhou <haiyi.zhou at amd.com>
Date:   Thu Jan 9 11:08:33 2020 -0500

    drm/amd/display: Fixed comment styling
    
    Switched to C-style comments for consistency
    
    Signed-off-by: Haiyi Zhou <haiyi.zhou at amd.com>
    Reviewed-by: Reza Amini <Reza.Amini at amd.com>
    Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 6e5ecefe7d9d..b9992ebf77a6 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -381,7 +381,7 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync,
 	bool update = false;
 	unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
 
-	//Compute the exit refresh rate and exit frame duration
+	/* Compute the exit refresh rate and exit frame duration */
 	unsigned int exit_refresh_rate_in_milli_hz = ((1000000000/max_render_time_in_us)
 			+ (1000*FIXED_REFRESH_EXIT_MARGIN_IN_HZ));
 	unsigned int exit_frame_duration_in_us = 1000000000/exit_refresh_rate_in_milli_hz;
commit 14ae7665e70a982b3653f2aa024251d69d89f81c
Author: Paul Hsieh <paul.hsieh at amd.com>
Date:   Thu Jan 9 15:11:06 2020 +0800

    drm/amd/display: check pipe_ctx is split pipe or not
    
    [Why]
    Driver use pipe_ctx to reallocate payload may cause allocate
    payload twice on same sink with split pipe.
    
    [How]
    Drvier must to check pipe_ctx is split pipe or not to avoid
    reallocate payload twice on same sink.
    
    Signed-off-by: Paul Hsieh <paul.hsieh at amd.com>
    Reviewed-by: Tony Cheng <Tony.Cheng at amd.com>
    Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 260c0b62d37d..a50768a7ba68 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2882,7 +2882,16 @@ enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link)
 	// Clear all of MST payload then reallocate
 	for (i = 0; i < MAX_PIPES; i++) {
 		pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
-		if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link &&
+
+		/* driver enable split pipe for external monitors
+		 * we have to check pipe_ctx is split pipe or not
+		 * If it's split pipe, driver using top pipe to
+		 * reaallocate.
+		 */
+		if (!pipe_ctx || pipe_ctx->top_pipe)
+			continue;
+
+		if (pipe_ctx->stream && pipe_ctx->stream->link == link &&
 				pipe_ctx->stream->dpms_off == false &&
 				pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
 			deallocate_mst_payload(pipe_ctx);
@@ -2891,7 +2900,11 @@ enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link)
 
 	for (i = 0; i < MAX_PIPES; i++) {
 		pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
-		if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link &&
+
+		if (!pipe_ctx || pipe_ctx->top_pipe)
+			continue;
+
+		if (pipe_ctx->stream && pipe_ctx->stream->link == link &&
 				pipe_ctx->stream->dpms_off == false &&
 				pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
 			/* enable/disable PHY will clear connection between BE and FE
commit 830821a64db63026a8b7414b2b539f262821ad23
Author: Nicholas Kazlauskas <nicholas.kazlauskas at amd.com>
Date:   Mon Jan 6 13:21:48 2020 -0500

    drm/amd/display: Fallback to DMCUB when command table is missing
    
    [Why]
    If the command table isn't available then we can fallback to DMCUB
    offloading if it's enabled and available.
    
    [How]
    Instead of assigning NULL for supported command table functions we can
    fallback to the DMCUB when it's available.
    
    Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas at amd.com>
    Reviewed-by: Wesley Chalmers <Wesley.Chalmers at amd.com>
    Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 2cb7a4288cb7..c4ba6e84db65 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -89,6 +89,10 @@ static enum bp_result encoder_control_digx_v1_5(
 	struct bios_parser *bp,
 	struct bp_encoder_control *cntl);
 
+static enum bp_result encoder_control_fallback(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl);
+
 static void init_dig_encoder_control(struct bios_parser *bp)
 {
 	uint32_t version =
@@ -100,7 +104,7 @@ static void init_dig_encoder_control(struct bios_parser *bp)
 		break;
 	default:
 		dm_output_to_console("Don't have dig_encoder_control for v%d\n", version);
-		bp->cmd_tbl.dig_encoder_control = NULL;
+		bp->cmd_tbl.dig_encoder_control = encoder_control_fallback;
 		break;
 	}
 }
@@ -184,6 +188,18 @@ static enum bp_result encoder_control_digx_v1_5(
 	return result;
 }
 
+static enum bp_result encoder_control_fallback(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl)
+{
+	if (bp->base.ctx->dc->ctx->dmub_srv &&
+	    bp->base.ctx->dc->debug.dmub_command_table) {
+		return encoder_control_digx_v1_5(bp, cntl);
+	}
+
+	return BP_RESULT_FAILURE;
+}
+
 /*****************************************************************************
  ******************************************************************************
  **
@@ -196,6 +212,10 @@ static enum bp_result transmitter_control_v1_6(
 	struct bios_parser *bp,
 	struct bp_transmitter_control *cntl);
 
+static enum bp_result transmitter_control_fallback(
+	struct bios_parser *bp,
+	struct bp_transmitter_control *cntl);
+
 static void init_transmitter_control(struct bios_parser *bp)
 {
 	uint8_t frev;
@@ -209,7 +229,7 @@ static void init_transmitter_control(struct bios_parser *bp)
 		break;
 	default:
 		dm_output_to_console("Don't have transmitter_control for v%d\n", crev);
-		bp->cmd_tbl.transmitter_control = NULL;
+		bp->cmd_tbl.transmitter_control = transmitter_control_fallback;
 		break;
 	}
 }
@@ -273,6 +293,18 @@ static enum bp_result transmitter_control_v1_6(
 	return result;
 }
 
+static enum bp_result transmitter_control_fallback(
+	struct bios_parser *bp,
+	struct bp_transmitter_control *cntl)
+{
+	if (bp->base.ctx->dc->ctx->dmub_srv &&
+	    bp->base.ctx->dc->debug.dmub_command_table) {
+		return transmitter_control_v1_6(bp, cntl);
+	}
+
+	return BP_RESULT_FAILURE;
+}
+
 /******************************************************************************
  ******************************************************************************
  **
@@ -285,6 +317,10 @@ static enum bp_result set_pixel_clock_v7(
 	struct bios_parser *bp,
 	struct bp_pixel_clock_parameters *bp_params);
 
+static enum bp_result set_pixel_clock_fallback(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params);
+
 static void init_set_pixel_clock(struct bios_parser *bp)
 {
 	switch (BIOS_CMD_TABLE_PARA_REVISION(setpixelclock)) {
@@ -294,7 +330,7 @@ static void init_set_pixel_clock(struct bios_parser *bp)
 	default:
 		dm_output_to_console("Don't have set_pixel_clock for v%d\n",
 			 BIOS_CMD_TABLE_PARA_REVISION(setpixelclock));
-		bp->cmd_tbl.set_pixel_clock = NULL;
+		bp->cmd_tbl.set_pixel_clock = set_pixel_clock_fallback;
 		break;
 	}
 }
@@ -400,6 +436,18 @@ static enum bp_result set_pixel_clock_v7(
 	return result;
 }
 
+static enum bp_result set_pixel_clock_fallback(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params)
+{
+	if (bp->base.ctx->dc->ctx->dmub_srv &&
+	    bp->base.ctx->dc->debug.dmub_command_table) {
+		return set_pixel_clock_v7(bp, bp_params);
+	}
+
+	return BP_RESULT_FAILURE;
+}
+
 /******************************************************************************
  ******************************************************************************
  **
@@ -632,6 +680,11 @@ static enum bp_result enable_disp_power_gating_v2_1(
 	enum controller_id crtc_id,
 	enum bp_pipe_control_action action);
 
+static enum bp_result enable_disp_power_gating_fallback(
+	struct bios_parser *bp,
+	enum controller_id crtc_id,
+	enum bp_pipe_control_action action);
+
 static void init_enable_disp_power_gating(
 	struct bios_parser *bp)
 {
@@ -643,7 +696,7 @@ static void init_enable_disp_power_gating(
 	default:
 		dm_output_to_console("Don't enable_disp_power_gating enable_crtc for v%d\n",
 			 BIOS_CMD_TABLE_PARA_REVISION(enabledisppowergating));
-		bp->cmd_tbl.enable_disp_power_gating = NULL;
+		bp->cmd_tbl.enable_disp_power_gating = enable_disp_power_gating_fallback;
 		break;
 	}
 }
@@ -695,6 +748,19 @@ static enum bp_result enable_disp_power_gating_v2_1(
 	return result;
 }
 
+static enum bp_result enable_disp_power_gating_fallback(
+	struct bios_parser *bp,
+	enum controller_id crtc_id,
+	enum bp_pipe_control_action action)
+{
+	if (bp->base.ctx->dc->ctx->dmub_srv &&
+	    bp->base.ctx->dc->debug.dmub_command_table) {
+		return enable_disp_power_gating_v2_1(bp, crtc_id, action);
+	}
+
+	return BP_RESULT_FAILURE;
+}
+
 /******************************************************************************
 *******************************************************************************
  **
commit 153ca760219277997e2c3146ae9a15c979c50520
Author: Nicholas Kazlauskas <nicholas.kazlauskas at amd.com>
Date:   Mon Jan 6 10:29:13 2020 -0500

    drm/amd/display: Get fb base and fb offset for DMUB from registers
    
    [Why]
    Under some hardware initialization sequences the fb base/fb offset
    provided can be zero or hardwareinit can happen too late.
    
    We want to ensure that we always have the correct fb_base/fb_offset
    when performing DMCUB hardware initialization so we can do DMCUB
    command table offloading during first dc hardware init.
    
    [How]
    Read from the DCN registers. VBIOS already filled these in for us.
    
    Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas at amd.com>
    Reviewed-by: Wesley Chalmers <Wesley.Chalmers at amd.com>
    Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
index 8e23a7017588..287fb9a36a64 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
@@ -231,6 +231,8 @@ struct dmub_srv_base_funcs {
 struct dmub_srv_hw_funcs {
 	/* private: internal use only */
 
+	void (*init)(struct dmub_srv *dmub);
+
 	void (*reset)(struct dmub_srv *dmub);
 
 	void (*reset_release)(struct dmub_srv *dmub);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index cd51c6138894..9229012b93e2 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -54,6 +54,19 @@ const struct dmub_srv_common_regs dmub_srv_dcn20_regs = {
 
 /* Shared functions. */
 
+static void dmub_dcn20_get_fb_base_offset(struct dmub_srv *dmub,
+					  uint64_t *fb_base,
+					  uint64_t *fb_offset)
+{
+	uint32_t tmp;
+
+	REG_GET(DCN_VM_FB_LOCATION_BASE, FB_BASE, &tmp);
+	*fb_base = (uint64_t)tmp << 24;
+
+	REG_GET(DCN_VM_FB_OFFSET, FB_OFFSET, &tmp);
+	*fb_offset = (uint64_t)tmp << 24;
+}
+
 static inline void dmub_dcn20_translate_addr(const union dmub_addr *addr_in,
 					     uint64_t fb_base,
 					     uint64_t fb_offset,
@@ -82,7 +95,9 @@ void dmub_dcn20_backdoor_load(struct dmub_srv *dmub,
 			      const struct dmub_window *cw1)
 {
 	union dmub_addr offset;
-	uint64_t fb_base = dmub->fb_base, fb_offset = dmub->fb_offset;
+	uint64_t fb_base, fb_offset;
+
+	dmub_dcn20_get_fb_base_offset(dmub, &fb_base, &fb_offset);
 
 	REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
 	REG_UPDATE_2(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE, 0x3,
@@ -118,7 +133,9 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
 			      const struct dmub_window *cw6)
 {
 	union dmub_addr offset;
-	uint64_t fb_base = dmub->fb_base, fb_offset = dmub->fb_offset;
+	uint64_t fb_base, fb_offset;
+
+	dmub_dcn20_get_fb_base_offset(dmub, &fb_base, &fb_offset);
 
 	dmub_dcn20_translate_addr(&cw2->offset, fb_base, fb_offset, &offset);
 
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
index 53bfd4da69ad..04b0fa13153d 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
@@ -92,7 +92,9 @@ struct dmub_srv;
 	DMUB_SR(DMCUB_SCRATCH14) \
 	DMUB_SR(DMCUB_SCRATCH15) \
 	DMUB_SR(CC_DC_PIPE_DIS) \
-	DMUB_SR(MMHUBBUB_SOFT_RESET)
+	DMUB_SR(MMHUBBUB_SOFT_RESET) \
+	DMUB_SR(DCN_VM_FB_LOCATION_BASE) \
+	DMUB_SR(DCN_VM_FB_OFFSET)
 
 #define DMUB_COMMON_FIELDS() \
 	DMUB_SF(DMCUB_CNTL, DMCUB_ENABLE) \
@@ -121,7 +123,9 @@ struct dmub_srv;
 	DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_TOP_ADDRESS) \
 	DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \
 	DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \
-	DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET)
+	DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \
+	DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \
+	DMUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET)
 
 struct dmub_srv_common_reg_offset {
 #define DMUB_SR(reg) uint32_t reg;
commit 07d4f905329438b8e4daf7f58f5afea7d781a9b2
Author: Wenjing Liu <Wenjing.Liu at amd.com>
Date:   Tue Jan 7 16:33:32 2020 -0500

    drm/amd/display: Add debug option to disable DSC support
    
    [how]
    Empty dsc enc caps when debug option is set to disable DSC.
    
    Signed-off-by: Wenjing Liu <Wenjing.Liu at amd.com>
    Reviewed-by: Nikola Cornij <Nikola.Cornij at amd.com>
    Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 3fa85a54360f..7d31dcb9e37f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -425,6 +425,7 @@ struct dc_debug_options {
 	bool validate_dml_output;
 	bool enable_dmcub_surface_flip;
 	bool usbc_combo_phy_reset_wa;
+	bool disable_dsc;
 };
 
 struct dc_debug_data {
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 8b78fcbfe746..87d682d25278 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -224,7 +224,8 @@ static void get_dsc_enc_caps(
 
 	memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps));
 	if (dsc) {
-		dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz);
+		if (!dsc->ctx->dc->debug.disable_dsc)
+			dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz);
 		if (dsc->ctx->dc->debug.native422_support)
 			dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
 	}
commit 50d2c6027ef4d09d7c2a9d12b2a7957576c0fa3c
Author: Sung Lee <sung.lee at amd.com>
Date:   Fri Jan 3 14:03:52 2020 -0500

    drm/amd/display: Do not send training pattern if VS Different
    
    [Why]
    The DP 1.4a Spec requires that training pattern only under certain
    specific conditions. Currently driver will re-send
    training pattern every time voltage swing value changes,
    but that should not be the case.
    
    [How]
    Do not re-send training pattern every time VS values
    are different. Only send it on the first iteration.
    
    Signed-off-by: Sung Lee <sung.lee at amd.com>
    Reviewed-by: Tony Cheng <Tony.Cheng at amd.com>
    Acked-by: Abdoulaye Berthe <Abdoulaye.Berthe at amd.com>
    Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 1bd0946829e3..3bb1b481451b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -983,7 +983,7 @@ static enum link_training_result perform_clock_recovery_sequence(
 				offset);
 
 		/* 2. update DPCD of the receiver*/
-		if (!retries_cr)
+		if (!retry_count)
 			/* EPR #361076 - write as a 5-byte burst,
 			 * but only for the 1-st iteration.*/
 			dpcd_set_lt_pattern_and_lane_settings(
commit 43563bc2e6a769502d23f4ec9cd590e4636cf0ea
Author: Wenjing Liu <Wenjing.Liu at amd.com>
Date:   Mon Dec 23 16:02:13 2019 -0500

    drm/amd/display: update MSA and VSC SDP on video test pattern request
    
    [why]
    On video test pattern request we need to update MSA and VSC so
    it will match the requested test pattern dynamic range field.
    
    [how]
    Update dynamic range field in MSA and disable VSC as updating VSC
    info packet is complicated and not required for test pattern purpose.
    
    Signed-off-by: Wenjing Liu <Wenjing.Liu at amd.com>
    Reviewed-by: Nikola Cornij <Nikola.Cornij at amd.com>
    Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 6ab298c65247..1bd0946829e3 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -3925,8 +3925,43 @@ bool dc_link_dp_set_test_pattern(
 					     sizeof(training_pattern));
 		}
 	} else {
-	/* CRTC Patterns */
+		enum dc_color_space color_space = COLOR_SPACE_UNKNOWN;
+		struct encoder_info_frame info_frame = pipe_ctx->stream_res.encoder_info_frame;
+
+		switch (test_pattern_color_space) {
+		case DP_TEST_PATTERN_COLOR_SPACE_RGB:
+			color_space = COLOR_SPACE_SRGB;
+			if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
+				color_space = COLOR_SPACE_SRGB_LIMITED;
+			break;
+
+		case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601:
+			color_space = COLOR_SPACE_YCBCR601;
+			if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
+				color_space = COLOR_SPACE_YCBCR601_LIMITED;
+			break;
+		case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709:
+			color_space = COLOR_SPACE_YCBCR709;
+			if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
+				color_space = COLOR_SPACE_YCBCR709_LIMITED;
+			break;
+		default:
+			break;
+		}
+		/* update MSA to requested color space */
+		pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc,
+				&pipe_ctx->stream->timing,
+				color_space, false, link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
+
+		/* disable vsc so no need to update it based on request */
+		info_frame.vsc.valid = false;
+		pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(
+				pipe_ctx->stream_res.stream_enc,
+				&info_frame);
+
+		/* CRTC Patterns */
 		set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);
+
 		/* Set Test Pattern state */
 		link->test_pattern_enabled = true;
 	}
commit 63e3ab9a8226c0271e823f32fc1c3de67d142c78
Author: Nirmoy Das <nirmoy.das at amd.com>
Date:   Tue Jan 21 13:29:20 2020 +0100

    drm/amdgpu: individualize fence allocation per entity
    
    Allocate fences for each entity and remove ctx->fences reference as
    fences should be bound to amdgpu_ctx_entity instead amdgpu_ctx.
    
    Signed-off-by: Nirmoy Das <nirmoy.das at amd.com>
    Reviewed-by: Christian König <christian.koenig at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 64e2babbc36e..05c2af61e7de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -87,24 +87,24 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 	memset(ctx, 0, sizeof(*ctx));
 	ctx->adev = adev;
 
-	ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
-			      sizeof(struct dma_fence*), GFP_KERNEL);
-	if (!ctx->fences)
-		return -ENOMEM;
 
 	ctx->entities[0] = kcalloc(num_entities,
 				   sizeof(struct amdgpu_ctx_entity),
 				   GFP_KERNEL);
-	if (!ctx->entities[0]) {
-		r = -ENOMEM;
-		goto error_free_fences;
-	}
+	if (!ctx->entities[0])
+		return -ENOMEM;
+
 
 	for (i = 0; i < num_entities; ++i) {
 		struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
 
 		entity->sequence = 1;
-		entity->fences = &ctx->fences[amdgpu_sched_jobs * i];
+		entity->fences = kcalloc(amdgpu_sched_jobs,
+					 sizeof(struct dma_fence*), GFP_KERNEL);
+		if (!entity->fences) {
+			r = -ENOMEM;
+			goto error_cleanup_memory;
+		}
 	}
 	for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
 		ctx->entities[i] = ctx->entities[i - 1] +
@@ -181,11 +181,17 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 error_cleanup_entities:
 	for (i = 0; i < num_entities; ++i)
 		drm_sched_entity_destroy(&ctx->entities[0][i].entity);
-	kfree(ctx->entities[0]);
 
-error_free_fences:
-	kfree(ctx->fences);
-	ctx->fences = NULL;
+error_cleanup_memory:
+	for (i = 0; i < num_entities; ++i) {
+		struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
+
+		kfree(entity->fences);
+		entity->fences = NULL;
+	}
+
+	kfree(ctx->entities[0]);
+	ctx->entities[0] = NULL;
 	return r;
 }
 
@@ -199,12 +205,16 @@ static void amdgpu_ctx_fini(struct kref *ref)
 	if (!adev)
 		return;
 
-	for (i = 0; i < num_entities; ++i)
+	for (i = 0; i < num_entities; ++i) {
+		struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
+
 		for (j = 0; j < amdgpu_sched_jobs; ++j)
-			dma_fence_put(ctx->entities[0][i].fences[j]);
-	kfree(ctx->fences);
-	kfree(ctx->entities[0]);
+			dma_fence_put(entity->fences[j]);
 
+		kfree(entity->fences);
+	}
+
+	kfree(ctx->entities[0]);
 	mutex_destroy(&ctx->lock);
 
 	kfree(ctx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index 4ad90a44dc3c..a6cd9d4b078c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -42,7 +42,6 @@ struct amdgpu_ctx {
 	unsigned			reset_counter_query;
 	uint32_t			vram_lost_counter;
 	spinlock_t			ring_lock;
-	struct dma_fence		**fences;
 	struct amdgpu_ctx_entity	*entities[AMDGPU_HW_IP_NUM];
 	bool				preamble_presented;
 	enum drm_sched_priority		init_priority;
commit 7db1d560a4fab1e4668b1964e77f52cefce9e9c7
Author: Tianci.Yin <tianci.yin at amd.com>
Date:   Mon Jan 20 17:20:10 2020 +0800

    Revert "drm/amdgpu: fix modprobe failure of the secondary GPU when GDDR6 training enabled(V5)"
    
    This reverts commit 9e441478623fd913d4340654682b19f0c24e629d.
    
    The patch will be replaced with a better solution, revert it.
    
    Reviewed-by: Christian König <christian.koenig at amd.com>
    Signed-off-by: Tianci.Yin <tianci.yin at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 86267baca07c..d3c27a3c43f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -60,11 +60,6 @@
  */
 #define AMDGPU_GMC_FAULT_TIMEOUT	5000ULL
 
-/*
- * Default stolen memory size, 1024 * 768 * 4
- */
-#define AMDGPU_STOLEN_BIST_TRAINING_DEFAULT_SIZE	0x300000ULL
-
 struct firmware;
 
 /*
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 86f4ffe408e7..0c5bf3bd640f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -641,12 +641,7 @@ static int gmc_v10_0_late_init(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	int r;
 
-	/*
-	 * Can't free the stolen VGA memory when it might be used for memory
-	 * training again.
-	 */
-	if (!adev->fw_vram_usage.mem_train_support)
-		amdgpu_bo_late_init(adev);
+	amdgpu_bo_late_init(adev);
 
 	r = amdgpu_gmc_allocate_vm_inv_eng(adev);
 	if (r)
@@ -830,19 +825,6 @@ static int gmc_v10_0_sw_init(void *handle)
 
 	adev->gmc.stolen_size = gmc_v10_0_get_vbios_fb_size(adev);
 
-	/*
-	 * In dual GPUs scenario, stolen_size is assigned to zero on the
-	 * secondary GPU, since there is no pre-OS console using that memory.
-	 * Then the bottom region of VRAM was allocated as GTT, unfortunately a
-	 * small region of bottom VRAM was encroached by UMC firmware during
-	 * GDDR6 BIST training, this cause page fault.
-	 * The page fault can be fixed by forcing stolen_size to 3MB, then the
-	 * bottom region of VRAM was allocated as stolen memory, GTT corruption
-	 * avoid.
-	 */
-	adev->gmc.stolen_size = max(adev->gmc.stolen_size,
-				    AMDGPU_STOLEN_BIST_TRAINING_DEFAULT_SIZE);
-
 	/* Memory manager */
 	r = amdgpu_bo_init(adev);
 	if (r)
@@ -882,13 +864,6 @@ static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
 static int gmc_v10_0_sw_fini(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	void *stolen_vga_buf;
-
-	/*
-	 * Free the stolen memory if it wasn't already freed in late_init
-	 * because of memory training.
-	 */
-	amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
 
 	amdgpu_vm_manager_fini(adev);
 	gmc_v10_0_gart_fini(adev);
commit 240c811ccde49ad336b23c582018c6001690f19d
Author: Tianci.Yin <tianci.yin at amd.com>
Date:   Mon Jan 20 15:08:35 2020 +0800

    drm/amdgpu: fix VRAM partially encroached issue in GDDR6 memory training(V2)
    
    [why]
    In GDDR6 BIST training, a certain mount of bottom VRAM will be encroached by
    UMC, that causes problems(like GTT corrupted and page fault observed).
    
    [how]
    Saving the content of this bottom VRAM to system memory before training, and
    restoring it after training to avoid VRAM corruption.
    
    Acked-by: Christian König <christian.koenig at amd.com>
    Signed-off-by: Tianci.Yin <tianci.yin at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 3265487b859f..611021514c52 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -172,6 +172,8 @@ struct psp_dtm_context {
 #define MEM_TRAIN_SYSTEM_SIGNATURE		0x54534942
 #define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES	0x1000
 #define GDDR6_MEM_TRAINING_OFFSET		0x8000
+/*Define the VRAM size that will be encroached by BIST training.*/
+#define GDDR6_MEM_TRAINING_ENCROACHED_SIZE	0x2000000
 
 enum psp_memory_training_init_flag {
 	PSP_MEM_TRAIN_NOT_SUPPORT	= 0x0,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 685dd9754c67..ac173d2eb809 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -971,10 +971,13 @@ Err_out:
  */
 static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
 {
-	int ret;
-	uint32_t p2c_header[4];
 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
 	uint32_t *pcache = (uint32_t*)ctx->sys_cache;
+	struct amdgpu_device *adev = psp->adev;
+	uint32_t p2c_header[4];
+	uint32_t sz;
+	void *buf;
+	int ret;
 
 	if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) {
 		DRM_DEBUG("Memory training is not supported.\n");
@@ -989,7 +992,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
 		return 0;
 	}
 
-	amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false);
+	amdgpu_device_vram_access(adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false);
 	DRM_DEBUG("sys_cache[%08x,%08x,%08x,%08x] p2c_header[%08x,%08x,%08x,%08x]\n",
 		  pcache[0], pcache[1], pcache[2], pcache[3],
 		  p2c_header[0], p2c_header[1], p2c_header[2], p2c_header[3]);
@@ -1026,11 +1029,38 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
 	DRM_DEBUG("Memory training ops:%x.\n", ops);
 
 	if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
+		/*
+		 * Long traing will encroach certain mount of bottom VRAM,
+		 * saving the content of this bottom VRAM to system memory
+		 * before training, and restoring it after training to avoid
+		 * VRAM corruption.
+		 */
+		sz = GDDR6_MEM_TRAINING_ENCROACHED_SIZE;
+
+		if (adev->gmc.visible_vram_size < sz || !adev->mman.aper_base_kaddr) {
+			DRM_ERROR("visible_vram_size %llx or aper_base_kaddr %p is not initialized.\n",
+				  adev->gmc.visible_vram_size,
+				  adev->mman.aper_base_kaddr);
+			return -EINVAL;
+		}
+
+		buf = vmalloc(sz);
+		if (!buf) {
+			DRM_ERROR("failed to allocate system memory.\n");
+			return -ENOMEM;
+		}
+
+		memcpy_fromio(buf, adev->mman.aper_base_kaddr, sz);
 		ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN);
 		if (ret) {
 			DRM_ERROR("Send long training msg failed.\n");
+			vfree(buf);
 			return ret;
 		}
+
+		memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
+		adev->nbio.funcs->hdp_flush(adev, NULL);
+		vfree(buf);
 	}
 
 	if (ops & PSP_MEM_TRAIN_SAVE) {
commit a9d4fe2fd6529c3129ec6883b8649ea5c7eee4d3
Author: Nirmoy Das <nirmoy.das at amd.com>
Date:   Mon Jan 20 13:54:30 2020 +0100

    drm/amdgpu:  remove unnecessary conversion to bool
    
    Better clean that up before some automation starts to complain about it
    
    Signed-off-by: Nirmoy Das <nirmoy.das at amd.com>
    Acked-by: Christian König <christian.koenig at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 82155ac3288a..12247a32f9ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -527,7 +527,7 @@ static int acp_set_powergating_state(void *handle,
 				     enum amd_powergating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	bool enable = state == AMD_PG_STATE_GATE ? true : false;
+	bool enable = (state == AMD_PG_STATE_GATE);
 
 	if (adev->powerplay.pp_funcs &&
 		adev->powerplay.pp_funcs->set_powergating_by_smu)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 64b1b2d2d19b..990f8e64a4d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -985,7 +985,7 @@ static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
 {
 	struct sysinfo si;
-	bool is_os_64 = (sizeof(void *) == 8) ? true : false;
+	bool is_os_64 = (sizeof(void *) == 8);
 	uint64_t total_memory;
 	uint64_t dram_size_seven_GB = 0x1B8000000;
 	uint64_t dram_size_three_GB = 0xB8000000;
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
index d9cc746af5e6..847ca9b3ce4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
@@ -74,9 +74,9 @@ int athub_v1_0_set_clockgating(struct amdgpu_device *adev,
 	case CHIP_VEGA20:
 	case CHIP_RAVEN:
 		athub_update_medium_grain_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		athub_update_medium_grain_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		break;
 	default:
 		break;
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
index ceb9aa4df0e7..921a69abda55 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
@@ -77,9 +77,9 @@ int athub_v2_0_set_clockgating(struct amdgpu_device *adev,
 	case CHIP_NAVI14:
 	case CHIP_NAVI12:
 		athub_v2_0_update_medium_grain_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		athub_v2_0_update_medium_grain_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		break;
 	default:
 		break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 65885ba92a5b..1785fdad6ecb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4229,7 +4229,7 @@ static int gfx_v10_0_set_powergating_state(void *handle,
 					  enum amd_powergating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
+	bool enable = (state == AMD_PG_STATE_GATE);
 	switch (adev->asic_type) {
 	case CHIP_NAVI10:
 	case CHIP_NAVI14:
@@ -4255,7 +4255,7 @@ static int gfx_v10_0_set_clockgating_state(void *handle,
 	case CHIP_NAVI14:
 	case CHIP_NAVI12:
 		gfx_v10_0_update_gfx_clock_gating(adev,
-						 state == AMD_CG_STATE_GATE ? true : false);
+						 state == AMD_CG_STATE_GATE);
 		break;
 	default:
 		break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index f8b1fd6ae6f4..6558d70fdc58 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -4652,7 +4652,7 @@ static int gfx_v9_0_set_powergating_state(void *handle,
 					  enum amd_powergating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
+	bool enable = (state == AMD_PG_STATE_GATE);
 
 	switch (adev->asic_type) {
 	case CHIP_RAVEN:
@@ -4714,7 +4714,7 @@ static int gfx_v9_0_set_clockgating_state(void *handle,
 	case CHIP_ARCTURUS:
 	case CHIP_RENOIR:
 		gfx_v9_0_update_gfx_clock_gating(adev,
-						 state == AMD_CG_STATE_GATE ? true : false);
+						 state == AMD_CG_STATE_GATE);
 		break;
 	default:
 		break;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index a78292d84854..ff2e6e1ccde7 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -690,7 +690,7 @@ static int jpeg_v2_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+	bool enable = (state == AMD_CG_STATE_GATE);
 
 	if (enable) {
 		if (jpeg_v2_0_is_idle(handle))
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index 2c58939e6ad0..c6d046df4b70 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -469,7 +469,7 @@ static int jpeg_v2_5_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+	bool enable = (state == AMD_CG_STATE_GATE);
 	int i;
 
 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index adfd8a6171eb..49a3a56ec017 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -523,9 +523,9 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
 	case CHIP_RAVEN:
 	case CHIP_RENOIR:
 		mmhub_v1_0_update_medium_grain_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		mmhub_v1_0_update_medium_grain_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		break;
 	default:
 		break;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index a7cb185d639a..bde189680521 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -427,9 +427,9 @@ int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
 	case CHIP_NAVI14:
 	case CHIP_NAVI12:
 		mmhub_v2_0_update_medium_grain_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		mmhub_v2_0_update_medium_grain_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		break;
 	default:
 		break;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index a32c9757711f..a5281df8d84f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -625,9 +625,9 @@ int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
 	switch (adev->asic_type) {
 	case CHIP_ARCTURUS:
 		mmhub_v9_4_update_medium_grain_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		mmhub_v9_4_update_medium_grain_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		break;
 	default:
 		break;
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index f737ce459c28..cf557a428298 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -426,7 +426,7 @@ static int navi10_ih_set_clockgating_state(void *handle,
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	navi10_ih_update_clockgating_state(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 2e0f8933410e..2d1bebdf1603 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -950,13 +950,13 @@ static int nv_common_set_clockgating_state(void *handle,
 	case CHIP_NAVI14:
 	case CHIP_NAVI12:
 		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		nv_update_hdp_mem_power_gating(adev,
-				   state == AMD_CG_STATE_GATE ? true : false);
+				   state == AMD_CG_STATE_GATE);
 		nv_update_hdp_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		break;
 	default:
 		break;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 27c7001be1ee..e55884d204bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -2176,9 +2176,9 @@ static int sdma_v4_0_set_clockgating_state(void *handle,
 	case CHIP_ARCTURUS:
 	case CHIP_RENOIR:
 		sdma_v4_0_update_medium_grain_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		sdma_v4_0_update_medium_grain_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		break;
 	default:
 		break;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 4c6bf1f8a528..67b9830b7c7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -1525,9 +1525,9 @@ static int sdma_v5_0_set_clockgating_state(void *handle,
 	case CHIP_NAVI14:
 	case CHIP_NAVI12:
 		sdma_v5_0_update_medium_grain_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		sdma_v5_0_update_medium_grain_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		break;
 	default:
 		break;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index 9aac9f9c50bb..42d5601b6bf3 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -648,7 +648,7 @@ static int si_dma_set_clockgating_state(void *handle,
 	bool enable;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	enable = (state == AMD_CG_STATE_GATE) ? true : false;
+	enable = (state == AMD_CG_STATE_GATE);
 
 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
 		for (i = 0; i < adev->sdma.num_instances; i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 317803f6a561..af41ee4c9639 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -1467,38 +1467,38 @@ static int soc15_common_set_clockgating_state(void *handle,
 	case CHIP_VEGA12:
 	case CHIP_VEGA20:
 		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		soc15_update_hdp_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		soc15_update_drm_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		soc15_update_drm_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		soc15_update_rom_medium_grain_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		adev->df.funcs->update_medium_grain_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		break;
 	case CHIP_RAVEN:
 	case CHIP_RENOIR:
 		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		soc15_update_hdp_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		soc15_update_drm_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		soc15_update_drm_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		soc15_update_rom_medium_grain_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		break;
 	case CHIP_ARCTURUS:
 		soc15_update_hdp_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		break;
 	default:
 		break;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 01e62fb8e6e0..0fa8aae2d78e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -763,7 +763,7 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+	bool enable = (state == AMD_CG_STATE_GATE);
 
 	if (enable) {
 		/* wait for STATUS to clear */
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 217084d56ab8..e0aadcaf6c8b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -1421,7 +1421,7 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+	bool enable = (state == AMD_CG_STATE_GATE);
 
 	if (enable) {
 		/* wait for STATUS to clear */
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 475ae68f38f5..217db187207c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -739,7 +739,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+	bool enable = (state == AMD_CG_STATE_GATE);
 	int i;
 
 	if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 683701cf7270..3fd102efb7af 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -887,7 +887,7 @@ static int vce_v4_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+	bool enable = (state == AMD_CG_STATE_GATE);
 	int i;
 
 	if ((adev->asic_type == CHIP_POLARIS10) ||
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index e654938f6cca..1a24fadd30e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -1346,7 +1346,7 @@ static int vcn_v1_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+	bool enable = (state == AMD_CG_STATE_GATE);
 
 	if (enable) {
 		/* wait for STATUS to clear */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index f4db8af6536b..b8dc136d2a01 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -1213,7 +1213,7 @@ static int vcn_v2_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+	bool enable = (state == AMD_CG_STATE_GATE);
 
 	if (enable) {
 		/* wait for STATUS to clear */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index c8b63d57a541..6970d3a1ae6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -1663,7 +1663,7 @@ static int vcn_v2_5_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+	bool enable = (state == AMD_CG_STATE_GATE);
 
 	if (amdgpu_sriov_vf(adev))
 		return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index d9e331084ea0..407c6093c2ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -717,7 +717,7 @@ static int vega10_ih_set_clockgating_state(void *handle,
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	vega10_ih_update_clockgating_state(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 	return 0;
 
 }
commit 4c461d89db4f8f40509b044b0daf3ac6edf4fbd7
Author: Dennis Li <Dennis.Li at amd.com>
Date:   Thu Jan 16 13:40:52 2020 +0800

    drm/amdgpu: add RAS support for the gfx block of Arcturus
    
    Implement functions to do the RAS error injection and
    query EDC counter.
    
    Signed-off-by: Dennis Li <Dennis.Li at amd.com>
    Reviewed-by: Guchun Chen <guchun.chen at amd.com>
    Reviewed-by: Tao Zhou <tao.zhou1 at amd.com>
    Reviewed-by: Hawking Zhang <Hawking.Zhang at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 7ae3b22c5628..c2bbcdd9c875 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -120,6 +120,7 @@ amdgpu-y += \
 	amdgpu_rlc.o \
 	gfx_v8_0.o \
 	gfx_v9_0.o \
+	gfx_v9_4.o \
 	gfx_v10_0.o
 
 # add async DMA block
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 583800ab5fe5..f8b1fd6ae6f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -48,6 +48,8 @@
 
 #include "amdgpu_ras.h"
 
+#include "gfx_v9_4.h"
+
 #define GFX9_NUM_GFX_RINGS     1
 #define GFX9_MEC_HPD_SIZE 4096
 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
@@ -1950,6 +1952,17 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
 	.query_ras_error_count = &gfx_v9_0_query_ras_error_count
 };
 
+static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
+	.get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
+	.select_se_sh = &gfx_v9_0_select_se_sh,
+	.read_wave_data = &gfx_v9_0_read_wave_data,
+	.read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
+	.read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
+	.select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
+	.ras_error_inject = &gfx_v9_4_ras_error_inject,
+	.query_ras_error_count = &gfx_v9_4_query_ras_error_count
+};
+
 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
 {
 	u32 gb_addr_config;
@@ -2001,6 +2014,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
 			gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
 		break;
 	case CHIP_ARCTURUS:
+		adev->gfx.funcs = &gfx_v9_4_gfx_funcs;
 		adev->gfx.config.max_hw_contexts = 8;
 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -4265,7 +4279,17 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 		goto fail;
 	}
 
-	gfx_v9_0_clear_ras_edc_counter(adev);
+	switch (adev->asic_type)
+	{
+	case CHIP_VEGA20:
+		gfx_v9_0_clear_ras_edc_counter(adev);
+		break;
+	case CHIP_ARCTURUS:
+		gfx_v9_4_clear_ras_edc_counter(adev);
+		break;
+	default:
+		break;
+	}
 
 fail:
 	amdgpu_ib_free(adev, &ib, NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
new file mode 100644
index 000000000000..e19d275f3f7d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -0,0 +1,978 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+
+#include "amdgpu.h"
+#include "amdgpu_gfx.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "amdgpu_atomfirmware.h"
+#include "amdgpu_pm.h"
+
+#include "gc/gc_9_4_1_offset.h"
+#include "gc/gc_9_4_1_sh_mask.h"
+#include "soc15_common.h"
+
+#include "gfx_v9_4.h"
+#include "amdgpu_ras.h"
+
+static const struct soc15_reg_entry gfx_v9_4_edc_counter_regs[] = {
+	/* CPC */
+	{ SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1 },
+	{ SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1 },
+	/* DC */
+	{ SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1 },
+	{ SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1 },
+	{ SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1 },
+	/* CPF */
+	{ SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1 },
+	{ SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1 },
+	/* GDS */
+	{ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1 },
+	{ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1 },
+	{ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1 },
+	{ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1 },
+	{ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1 },
+	/* SPI */
+	{ SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1 },
+	/* SQ */
+	{ SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16 },
+	{ SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16 },
+	{ SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16 },
+	{ SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16 },
+	/* SQC */
+	{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6 },
+	{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6 },
+	{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6 },
+	{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), 0, 4, 6 },
+	/* TA */
+	{ SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16 },
+	/* TCA */
+	{ SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2 },
+	/* TCC */
+	{ SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16 },
+	{ SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16 },
+	/* TCI */
+	{ SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72 },
+	/* TCP */
+	{ SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16 },
+	{ SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16 },
+	{ SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16 },
+	/* TD */
+	{ SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16 },
+	/* GCEA */
+	{ SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32 },
+	{ SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32 },
+	{ SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 1, 32 },
+	/* RLC */
+	{ SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), 0, 1, 1 },
+	{ SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), 0, 1, 1 },
+};
+
+static void gfx_v9_4_select_se_sh(struct amdgpu_device *adev, u32 se_num,
+				  u32 sh_num, u32 instance)
+{
+	u32 data;
+
+	if (instance == 0xffffffff)
+		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
+				     INSTANCE_BROADCAST_WRITES, 1);
+	else
+		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
+				     instance);
+
+	if (se_num == 0xffffffff)
+		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
+				     1);
+	else
+		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
+
+	if (sh_num == 0xffffffff)
+		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES,
+				     1);
+	else
+		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
+
+	WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
+}
+
+static const struct soc15_ras_field_entry gfx_v9_4_ras_fields[] = {
+	/* CPC */
+	{ "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
+	  SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
+	  SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT) },
+	{ "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
+	  SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
+	  SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT) },
+	{ "CPC_DC_STATE_RAM_ME1", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
+	  SOC15_REG_FIELD(DC_EDC_STATE_CNT, SEC_COUNT_ME1),
+	  SOC15_REG_FIELD(DC_EDC_STATE_CNT, DED_COUNT_ME1) },
+	{ "CPC_DC_CSINVOC_RAM_ME1",
+	  SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
+	  SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT_ME1),
+	  SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT_ME1) },
+	{ "CPC_DC_RESTORE_RAM_ME1",
+	  SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
+	  SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT_ME1),
+	  SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT_ME1) },
+	{ "CPC_DC_CSINVOC_RAM1_ME1",
+	  SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
+	  SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT1_ME1),
+	  SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT1_ME1) },
+	{ "CPC_DC_RESTORE_RAM1_ME1",
+	  SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
+	  SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT1_ME1),
+	  SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT1_ME1) },
+
+	/* CPF */
+	{ "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
+	  SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME2),
+	  SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME2) },
+	{ "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
+	  SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME1),
+	  SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME1) },
+	{ "CPF_TCIU_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
+	  SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
+	  SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT) },
+
+	/* GDS */
+	{ "GDS_GRBM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT),
+	  SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, SEC),
+	  SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, DED) },
+	{ "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
+	  SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
+	  SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED) },
+	{ "GDS_PHY_CMD_RAM_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) },
+	{ "GDS_PHY_DATA_RAM_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SEC),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_DED) },
+	{ "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) },
+	{ "GDS_ME1_PIPE0_PIPE_MEM",
+	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) },
+	{ "GDS_ME1_PIPE1_PIPE_MEM",
+	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) },
+	{ "GDS_ME1_PIPE2_PIPE_MEM",
+	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) },
+	{ "GDS_ME1_PIPE3_PIPE_MEM",
+	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) },
+
+	/* SPI */
+	{ "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SEC_COUNT),
+	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_DED_COUNT) },
+	{ "SPI_GDS_EXPREQ", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_SEC_COUNT),
+	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_DED_COUNT) },
+	{ "SPI_WB_GRANT_30", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_SEC_COUNT),
+	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_DED_COUNT) },
+	{ "SPI_WB_GRANT_61", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_61_SEC_COUNT),
+	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_61_DED_COUNT) },
+	{ "SPI_LIFE_CNT", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_SEC_COUNT),
+	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_DED_COUNT) },
+
+	/* SQ */
+	{ "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT) },
+	{ "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT) },
+	{ "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT) },
+	{ "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT) },
+	{ "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT) },
+	{ "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT) },
+	{ "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT) },
+
+	/* SQC */
+	{ "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) },
+	{ "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) },
+	{ "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) },
+	{ "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) },
+	{ "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) },
+	{ "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) },
+	{ "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) },
+	{ "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) },
+	{ "SQC_INST_BANKA_UTCL1_MISS_FIFO",
+	  SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+			  INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+			  INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT) },
+	{ "SQC_INST_BANKA_MISS_FIFO",
+	  SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_MISS_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+			  INST_BANKA_MISS_FIFO_DED_COUNT) },
+	{ "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) },
+	{ "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) },
+	{ "SQC_DATA_BANKA_HIT_FIFO",
+	  SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_DED_COUNT) },
+	{ "SQC_DATA_BANKA_MISS_FIFO",
+	  SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_MISS_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+			  DATA_BANKA_MISS_FIFO_DED_COUNT) },
+	{ "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) },
+	{ "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) },
+	{ "SQC_INST_BANKB_UTCL1_MISS_FIFO",
+	  SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+			  INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+			  INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT) },
+	{ "SQC_INST_BANKB_MISS_FIFO",
+	  SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_MISS_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+			  INST_BANKB_MISS_FIFO_DED_COUNT) },
+	{ "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) },
+	{ "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) },
+	{ "SQC_DATA_BANKB_HIT_FIFO",
+	  SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_DED_COUNT) },
+	{ "SQC_DATA_BANKB_MISS_FIFO",
+	  SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_MISS_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+			  DATA_BANKB_MISS_FIFO_DED_COUNT) },
+	{ "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) },
+
+	/* TA */
+	{ "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) },
+	{ "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_DED_COUNT) },
+	{ "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_DED_COUNT) },
+	{ "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_DED_COUNT) },
+	{ "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_DED_COUNT) },
+
+	/* TCA */
+	{ "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
+	  SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_DED_COUNT) },
+	{ "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
+	  SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_DED_COUNT) },
+
+	/* TCC */
+	{ "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) },
+	{ "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) },
+	{ "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) },
+	{ "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) },
+	{ "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_DED_COUNT) },
+	{ "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_DED_COUNT) },
+	{ "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_DED_COUNT) },
+	{ "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_DED_COUNT) },
+	{ "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_DED_COUNT) },
+	{ "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_DED_COUNT) },
+	{ "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_DED_COUNT) },
+	{ "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) },
+	{ "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_DED_COUNT) },
+	{ "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_DED_COUNT) },
+	{ "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_DED_COUNT) },
+
+	/* TCI */
+	{ "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
+	  SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_DED_COUNT) },
+
+	/* TCP */
+	{ "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) },
+	{ "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) },
+	{ "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_DED_COUNT) },
+	{ "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_DED_COUNT) },
+	{ "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT), 0, 0 },
+	{ "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
+	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) },
+	{ "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
+	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) },
+
+	/* TD */
+	{ "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
+	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) },
+	{ "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
+	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) },
+	{ "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+	  SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_DED_COUNT) },
+
+	/* EA */
+	{ "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) },
+	{ "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) },
+	{ "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) },
+	{ "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) },
+	{ "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) },
+	{ "EA_GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) },
+	{ "EA_GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) },
+	{ "EA_GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) },
+	{ "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 0, 0 },
+	{ "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT) },
+	{ "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 0, 0 },
+	{ "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT) },
+	{ "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), 0, 0 },
+	{ "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, IORD_CMDMEM_DED_COUNT) },
+	{ "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 0, 0 },
+	{ "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_CMDMEM_DED_COUNT) },
+	{ "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 0, 0 },
+	{ "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_DATAMEM_DED_COUNT) },
+	{ "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 0, 0 },
+	{ "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT) },
+	{ "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 0, 0 },
+	{ "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT) },
+	{ "EA_MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_DED_COUNT) },
+	{ "EA_MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_DED_COUNT) },
+	{ "EA_MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_DED_COUNT) },
+	{ "EA_MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_DED_COUNT) },
+	{ "EA_MAM_A0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_SEC_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_DED_COUNT) },
+	{ "EA_MAM_A1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_SEC_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_DED_COUNT) },
+	{ "EA_MAM_A2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_SEC_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_DED_COUNT) },
+	{ "EA_MAM_A3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_SEC_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_DED_COUNT) },
+	{ "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, MAM_AFMEM_SEC_COUNT), 0, 0 },
+	{ "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_AFMEM_DED_COUNT) },
+
+	/* RLC */
+	{ "RLCG_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_DED_COUNT) },
+	{ "RLCG_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_DED_COUNT) },
+	{ "RLCV_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_DED_COUNT) },
+	{ "RLCV_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_DED_COUNT) },
+	{ "RLC_TCTAG_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_DED_COUNT) },
+	{ "RLC_SPM_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_DED_COUNT) },
+	{ "RLC_SRM_DATA_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_DED_COUNT) },
+	{ "RLC_SRM_ADDR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_DED_COUNT) },
+	{ "RLC_SPM_SE0_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT) },
+	{ "RLC_SPM_SE1_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT) },
+	{ "RLC_SPM_SE2_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT) },
+	{ "RLC_SPM_SE3_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT) },
+	{ "RLC_SPM_SE4_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT) },
+	{ "RLC_SPM_SE5_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT) },
+	{ "RLC_SPM_SE6_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT) },
+	{ "RLC_SPM_SE7_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT) },
+};
+
+static const char * const vml2_mems[] = {
+	"UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
+	"UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
+	"UTC_VML2_BANK_CACHE_0_4K_MEM0",
+	"UTC_VML2_BANK_CACHE_0_4K_MEM1",
+	"UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
+	"UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
+	"UTC_VML2_BANK_CACHE_1_4K_MEM0",
+	"UTC_VML2_BANK_CACHE_1_4K_MEM1",
+	"UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
+	"UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
+	"UTC_VML2_BANK_CACHE_2_4K_MEM0",
+	"UTC_VML2_BANK_CACHE_2_4K_MEM1",
+	"UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
+	"UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
+	"UTC_VML2_BANK_CACHE_3_4K_MEM0",
+	"UTC_VML2_BANK_CACHE_3_4K_MEM1",
+	"UTC_VML2_IFIFO_GROUP0",
+	"UTC_VML2_IFIFO_GROUP1",
+	"UTC_VML2_IFIFO_GROUP2",
+	"UTC_VML2_IFIFO_GROUP3",
+	"UTC_VML2_IFIFO_GROUP4",
+	"UTC_VML2_IFIFO_GROUP5",
+	"UTC_VML2_IFIFO_GROUP6",
+	"UTC_VML2_IFIFO_GROUP7",
+	"UTC_VML2_IFIFO_GROUP8",
+	"UTC_VML2_IFIFO_GROUP9",
+	"UTC_VML2_IFIFO_GROUP10",
+	"UTC_VML2_IFIFO_GROUP11",
+	"UTC_VML2_IFIFO_GROUP12",
+	"UTC_VML2_IFIFO_GROUP13",
+	"UTC_VML2_IFIFO_GROUP14",
+	"UTC_VML2_IFIFO_GROUP15",
+	"UTC_VML2_IFIFO_GROUP16",
+	"UTC_VML2_IFIFO_GROUP17",
+	"UTC_VML2_IFIFO_GROUP18",
+	"UTC_VML2_IFIFO_GROUP19",
+	"UTC_VML2_IFIFO_GROUP20",
+	"UTC_VML2_IFIFO_GROUP21",
+	"UTC_VML2_IFIFO_GROUP22",
+	"UTC_VML2_IFIFO_GROUP23",
+	"UTC_VML2_IFIFO_GROUP24",
+};
+
+static const char * const vml2_walker_mems[] = {
+	"UTC_VML2_CACHE_PDE0_MEM0",
+	"UTC_VML2_CACHE_PDE0_MEM1",
+	"UTC_VML2_CACHE_PDE1_MEM0",
+	"UTC_VML2_CACHE_PDE1_MEM1",
+	"UTC_VML2_CACHE_PDE2_MEM0",
+	"UTC_VML2_CACHE_PDE2_MEM1",
+	"UTC_VML2_RDIF_ARADDRS",
+	"UTC_VML2_RDIF_LOG_FIFO",
+	"UTC_VML2_QUEUE_REQ",
+	"UTC_VML2_QUEUE_RET",
+};
+
+static const char * const utcl2_router_mems[] = {
+	"UTCL2_ROUTER_GROUP0_VML2_REQ_FIFO0",
+	"UTCL2_ROUTER_GROUP1_VML2_REQ_FIFO1",
+	"UTCL2_ROUTER_GROUP2_VML2_REQ_FIFO2",
+	"UTCL2_ROUTER_GROUP3_VML2_REQ_FIFO3",
+	"UTCL2_ROUTER_GROUP4_VML2_REQ_FIFO4",
+	"UTCL2_ROUTER_GROUP5_VML2_REQ_FIFO5",
+	"UTCL2_ROUTER_GROUP6_VML2_REQ_FIFO6",
+	"UTCL2_ROUTER_GROUP7_VML2_REQ_FIFO7",
+	"UTCL2_ROUTER_GROUP8_VML2_REQ_FIFO8",
+	"UTCL2_ROUTER_GROUP9_VML2_REQ_FIFO9",
+	"UTCL2_ROUTER_GROUP10_VML2_REQ_FIFO10",
+	"UTCL2_ROUTER_GROUP11_VML2_REQ_FIFO11",
+	"UTCL2_ROUTER_GROUP12_VML2_REQ_FIFO12",
+	"UTCL2_ROUTER_GROUP13_VML2_REQ_FIFO13",
+	"UTCL2_ROUTER_GROUP14_VML2_REQ_FIFO14",
+	"UTCL2_ROUTER_GROUP15_VML2_REQ_FIFO15",
+	"UTCL2_ROUTER_GROUP16_VML2_REQ_FIFO16",
+	"UTCL2_ROUTER_GROUP17_VML2_REQ_FIFO17",
+	"UTCL2_ROUTER_GROUP18_VML2_REQ_FIFO18",
+	"UTCL2_ROUTER_GROUP19_VML2_REQ_FIFO19",
+	"UTCL2_ROUTER_GROUP20_VML2_REQ_FIFO20",
+	"UTCL2_ROUTER_GROUP21_VML2_REQ_FIFO21",
+	"UTCL2_ROUTER_GROUP22_VML2_REQ_FIFO22",
+	"UTCL2_ROUTER_GROUP23_VML2_REQ_FIFO23",
+	"UTCL2_ROUTER_GROUP24_VML2_REQ_FIFO24",
+};
+
+static const char * const atc_l2_cache_2m_mems[] = {
+	"UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
+	"UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
+	"UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
+	"UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
+};
+
+static const char * const atc_l2_cache_4k_mems[] = {
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
+};
+
+static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
+					 struct ras_err_data *err_data)
+{
+	uint32_t i, data;
+	uint32_t sec_count, ded_count;
+
+	WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL, 0);
+	WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL, 0);
+	WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL, 0);
+
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0);
+
+	for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
+		WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, i);
+		data = RREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL);
+
+		sec_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, SEC_COUNT);
+		if (sec_count) {
+			DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+				 vml2_mems[i], sec_count);
+			err_data->ce_count += sec_count;
+		}
+
+		ded_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, DED_COUNT);
+		if (ded_count) {
+			DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+				 vml2_mems[i], ded_count);
+			err_data->ue_count += ded_count;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
+		WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, i);
+		data = RREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL);
+
+		sec_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL,
+					  SEC_COUNT);
+		if (sec_count) {
+			DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+				 vml2_walker_mems[i], sec_count);
+			err_data->ce_count += sec_count;
+		}
+
+		ded_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL,
+					  DED_COUNT);
+		if (ded_count) {
+			DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+				 vml2_walker_mems[i], ded_count);
+			err_data->ue_count += ded_count;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(utcl2_router_mems); i++) {
+		WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, i);
+		data = RREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL);
+
+		sec_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, SEC_COUNT);
+		if (sec_count) {
+			DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+				 utcl2_router_mems[i], sec_count);
+			err_data->ce_count += sec_count;
+		}
+
+		ded_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, DED_COUNT);
+		if (ded_count) {
+			DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+				 utcl2_router_mems[i], ded_count);
+			err_data->ue_count += ded_count;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
+		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, i);
+		data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL);
+
+		sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL,
+					  SEC_COUNT);
+		if (sec_count) {
+			DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+				 atc_l2_cache_2m_mems[i], sec_count);
+			err_data->ce_count += sec_count;
+		}
+
+		ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL,
+					  DED_COUNT);
+		if (ded_count) {
+			DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+				 atc_l2_cache_2m_mems[i], ded_count);
+			err_data->ue_count += ded_count;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
+		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, i);
+		data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_CNTL);
+
+		sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL,
+					  SEC_COUNT);
+		if (sec_count) {
+			DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+				 atc_l2_cache_4k_mems[i], sec_count);
+			err_data->ce_count += sec_count;
+		}
+
+		ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL,
+					  DED_COUNT);
+		if (ded_count) {
+			DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+				 atc_l2_cache_4k_mems[i], ded_count);
+			err_data->ue_count += ded_count;
+		}
+	}
+
+	WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
+
+	return 0;
+}
+
+static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg,
+				    uint32_t se_id, uint32_t inst_id,
+				    uint32_t value, uint32_t *sec_count,
+				    uint32_t *ded_count)
+{
+	uint32_t i;
+	uint32_t sec_cnt, ded_cnt;
+
+	for (i = 0; i < ARRAY_SIZE(gfx_v9_4_ras_fields); i++) {
+		if (gfx_v9_4_ras_fields[i].reg_offset != reg->reg_offset ||
+		    gfx_v9_4_ras_fields[i].seg != reg->seg ||
+		    gfx_v9_4_ras_fields[i].inst != reg->inst)
+			continue;
+
+		sec_cnt = (value & gfx_v9_4_ras_fields[i].sec_count_mask) >>
+			  gfx_v9_4_ras_fields[i].sec_count_shift;
+		if (sec_cnt) {
+			DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
+				 gfx_v9_4_ras_fields[i].name, se_id, inst_id,
+				 sec_cnt);
+			*sec_count += sec_cnt;
+		}
+
+		ded_cnt = (value & gfx_v9_4_ras_fields[i].ded_count_mask) >>
+			  gfx_v9_4_ras_fields[i].ded_count_shift;
+		if (ded_cnt) {
+			DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
+				 gfx_v9_4_ras_fields[i].name, se_id, inst_id,
+				 ded_cnt);
+			*ded_count += ded_cnt;
+		}
+	}
+
+	return 0;
+}
+
+int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
+				   void *ras_error_status)
+{
+	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+	uint32_t sec_count = 0, ded_count = 0;
+	uint32_t i, j, k;
+	uint32_t reg_value;
+
+	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+		return -EINVAL;
+
+	err_data->ue_count = 0;
+	err_data->ce_count = 0;
+
+	mutex_lock(&adev->grbm_idx_mutex);
+
+	for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) {
+		for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) {
+			for (k = 0; k < gfx_v9_4_edc_counter_regs[i].instance;
+			     k++) {
+				gfx_v9_4_select_se_sh(adev, j, 0, k);
+				reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
+					gfx_v9_4_edc_counter_regs[i]));
+				if (reg_value)
+					gfx_v9_4_ras_error_count(
+						&gfx_v9_4_edc_counter_regs[i],
+						j, k, reg_value, &sec_count,
+						&ded_count);
+			}
+		}
+	}
+
+	err_data->ce_count += sec_count;
+	err_data->ue_count += ded_count;
+
+	gfx_v9_4_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+	mutex_unlock(&adev->grbm_idx_mutex);
+
+	gfx_v9_4_query_utc_edc_status(adev, err_data);
+
+	return 0;
+}
+
+void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev)
+{
+	int i, j, k;
+
+	mutex_lock(&adev->grbm_idx_mutex);
+	for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) {
+		for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) {
+			for (k = 0; k < gfx_v9_4_edc_counter_regs[i].instance;
+			     k++) {
+				gfx_v9_4_select_se_sh(adev, j, 0x0, k);
+				RREG32(SOC15_REG_ENTRY_OFFSET(
+					gfx_v9_4_edc_counter_regs[i]));
+			}
+		}
+	}
+	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
+	mutex_unlock(&adev->grbm_idx_mutex);
+
+	WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL, 0);
+	WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL, 0);
+	WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL, 0);
+
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0);
+
+	for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
+		WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, i);
+		RREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
+		WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, i);
+		RREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(utcl2_router_mems); i++) {
+		WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, i);
+		RREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
+		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, i);
+		RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
+		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, i);
+		RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_CNTL);
+	}
+
+	WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
+}
+
+int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
+{
+	struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
+	int ret;
+	struct ta_ras_trigger_error_input block_info = { 0 };
+
+	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+		return -EINVAL;
+
+	block_info.block_id = info->head.block;
+	block_info.sub_block_index = info->head.sub_block_index;
+	block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
+	block_info.address = info->address;
+	block_info.value = info->value;
+
+	mutex_lock(&adev->grbm_idx_mutex);
+	ret = psp_ras_trigger_error(&adev->psp, &block_info);
+	mutex_unlock(&adev->grbm_idx_mutex);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
new file mode 100644
index 000000000000..2e3f6f755ad4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __GFX_V9_4_H__
+#define __GFX_V9_4_H__
+
+void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev);
+
+int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
+				   void *ras_error_status);
+
+int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev,
+				     void *inject_if);
+
+#endif /* __GFX_V9_4_H__ */
commit 19cf0dd4b9d2771015fc9d74ec1b0b9203cf8c5a
Author: Dennis Li <Dennis.Li at amd.com>
Date:   Thu Jan 16 12:30:33 2020 +0800

    drm/amdgpu: add EDC counter registers of gc for Arcturus
    
    add reg headers to gc includes
    
    v2: remove unused registers and fields in this patch set
    
    Signed-off-by: Dennis Li <Dennis.Li at amd.com>
    Reviewed-by: Guchun Chen <guchun.chen at amd.com>
    Reviewed-by: Tao Zhou <tao.zhou1 at amd.com>
    Acked-by: Alex Deucher <alexander.deucher at amd.com>
    Reviewed-by: Hawking Zhang <Hawking.Zhang at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h
new file mode 100644
index 000000000000..f41556abfbbc
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h
@@ -0,0 +1,264 @@
+/*
+ * Copyright (C) 2020  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _gc_9_4_1_OFFSET_HEADER
+#define _gc_9_4_1_OFFSET_HEADER
+
+// addressBlock: gc_grbmdec
+// base address: 0x8000
+#define mmGRBM_CNTL                                                                                    0x0000
+#define mmGRBM_CNTL_BASE_IDX                                                                           0
+#define mmGRBM_SKEW_CNTL                                                                               0x0001
+#define mmGRBM_SKEW_CNTL_BASE_IDX                                                                      0
+#define mmGRBM_STATUS2                                                                                 0x0002
+#define mmGRBM_STATUS2_BASE_IDX                                                                        0
+#define mmGRBM_PWR_CNTL                                                                                0x0003
+#define mmGRBM_PWR_CNTL_BASE_IDX                                                                       0
+#define mmGRBM_STATUS                                                                                  0x0004
+#define mmGRBM_STATUS_BASE_IDX                                                                         0
+#define mmGRBM_STATUS_SE0                                                                              0x0005
+#define mmGRBM_STATUS_SE0_BASE_IDX                                                                     0
+#define mmGRBM_STATUS_SE1                                                                              0x0006
+#define mmGRBM_STATUS_SE1_BASE_IDX                                                                     0
+#define mmGRBM_SOFT_RESET                                                                              0x0008
+#define mmGRBM_SOFT_RESET_BASE_IDX                                                                     0
+#define mmGRBM_GFX_CLKEN_CNTL                                                                          0x000c
+#define mmGRBM_GFX_CLKEN_CNTL_BASE_IDX                                                                 0
+#define mmGRBM_WAIT_IDLE_CLOCKS                                                                        0x000d
+#define mmGRBM_WAIT_IDLE_CLOCKS_BASE_IDX                                                               0
+#define mmGRBM_STATUS_SE2                                                                              0x000e
+#define mmGRBM_STATUS_SE2_BASE_IDX                                                                     0
+#define mmGRBM_STATUS_SE3                                                                              0x000f
+#define mmGRBM_STATUS_SE3_BASE_IDX                                                                     0
+#define mmGRBM_READ_ERROR                                                                              0x0016
+#define mmGRBM_READ_ERROR_BASE_IDX                                                                     0
+#define mmGRBM_READ_ERROR2                                                                             0x0017
+#define mmGRBM_READ_ERROR2_BASE_IDX                                                                    0
+#define mmGRBM_INT_CNTL                                                                                0x0018
+#define mmGRBM_INT_CNTL_BASE_IDX                                                                       0
+#define mmGRBM_TRAP_OP                                                                                 0x0019
+#define mmGRBM_TRAP_OP_BASE_IDX                                                                        0
+#define mmGRBM_TRAP_ADDR                                                                               0x001a
+#define mmGRBM_TRAP_ADDR_BASE_IDX                                                                      0
+#define mmGRBM_TRAP_ADDR_MSK                                                                           0x001b
+#define mmGRBM_TRAP_ADDR_MSK_BASE_IDX                                                                  0
+#define mmGRBM_TRAP_WD                                                                                 0x001c
+#define mmGRBM_TRAP_WD_BASE_IDX                                                                        0
+#define mmGRBM_TRAP_WD_MSK                                                                             0x001d
+#define mmGRBM_TRAP_WD_MSK_BASE_IDX                                                                    0
+#define mmGRBM_DSM_BYPASS                                                                              0x001e
+#define mmGRBM_DSM_BYPASS_BASE_IDX                                                                     0
+#define mmGRBM_WRITE_ERROR                                                                             0x001f
+#define mmGRBM_WRITE_ERROR_BASE_IDX                                                                    0
+#define mmGRBM_IOV_ERROR                                                                               0x0020
+#define mmGRBM_IOV_ERROR_BASE_IDX                                                                      0
+#define mmGRBM_CHIP_REVISION                                                                           0x0021
+#define mmGRBM_CHIP_REVISION_BASE_IDX                                                                  0
+#define mmGRBM_GFX_CNTL                                                                                0x0022
+#define mmGRBM_GFX_CNTL_BASE_IDX                                                                       0
+#define mmGRBM_RSMU_CFG                                                                                0x0023
+#define mmGRBM_RSMU_CFG_BASE_IDX                                                                       0
+#define mmGRBM_IH_CREDIT                                                                               0x0024
+#define mmGRBM_IH_CREDIT_BASE_IDX                                                                      0
+#define mmGRBM_PWR_CNTL2                                                                               0x0025
+#define mmGRBM_PWR_CNTL2_BASE_IDX                                                                      0
+#define mmGRBM_UTCL2_INVAL_RANGE_START                                                                 0x0026
+#define mmGRBM_UTCL2_INVAL_RANGE_START_BASE_IDX                                                        0
+#define mmGRBM_UTCL2_INVAL_RANGE_END                                                                   0x0027
+#define mmGRBM_UTCL2_INVAL_RANGE_END_BASE_IDX                                                          0
+#define mmGRBM_RSMU_READ_ERROR                                                                         0x0028
+#define mmGRBM_RSMU_READ_ERROR_BASE_IDX                                                                0
+#define mmGRBM_CHICKEN_BITS                                                                            0x0029
+#define mmGRBM_CHICKEN_BITS_BASE_IDX                                                                   0
+#define mmGRBM_FENCE_RANGE0                                                                            0x002a
+#define mmGRBM_FENCE_RANGE0_BASE_IDX                                                                   0
+#define mmGRBM_FENCE_RANGE1                                                                            0x002b
+#define mmGRBM_FENCE_RANGE1_BASE_IDX                                                                   0
+#define mmGRBM_NOWHERE                                                                                 0x003f
+#define mmGRBM_NOWHERE_BASE_IDX                                                                        0
+#define mmGRBM_SCRATCH_REG0                                                                            0x0040
+#define mmGRBM_SCRATCH_REG0_BASE_IDX                                                                   0
+#define mmGRBM_SCRATCH_REG1                                                                            0x0041
+#define mmGRBM_SCRATCH_REG1_BASE_IDX                                                                   0
+#define mmGRBM_SCRATCH_REG2                                                                            0x0042
+#define mmGRBM_SCRATCH_REG2_BASE_IDX                                                                   0
+#define mmGRBM_SCRATCH_REG3                                                                            0x0043
+#define mmGRBM_SCRATCH_REG3_BASE_IDX                                                                   0
+#define mmGRBM_SCRATCH_REG4                                                                            0x0044
+#define mmGRBM_SCRATCH_REG4_BASE_IDX                                                                   0
+#define mmGRBM_SCRATCH_REG5                                                                            0x0045
+#define mmGRBM_SCRATCH_REG5_BASE_IDX                                                                   0
+#define mmGRBM_SCRATCH_REG6                                                                            0x0046
+#define mmGRBM_SCRATCH_REG6_BASE_IDX                                                                   0
+#define mmGRBM_SCRATCH_REG7                                                                            0x0047
+#define mmGRBM_SCRATCH_REG7_BASE_IDX                                                                   0
+
+// addressBlock: gc_cppdec2
+// base address: 0xc600
+#define mmCPF_EDC_TAG_CNT                                                                              0x1189
+#define mmCPF_EDC_TAG_CNT_BASE_IDX                                                                     0
+#define mmCPF_EDC_ROQ_CNT                                                                              0x118a
+#define mmCPF_EDC_ROQ_CNT_BASE_IDX                                                                     0
+#define mmCPG_EDC_TAG_CNT                                                                              0x118b
+#define mmCPG_EDC_TAG_CNT_BASE_IDX                                                                     0
+#define mmCPG_EDC_DMA_CNT                                                                              0x118d
+#define mmCPG_EDC_DMA_CNT_BASE_IDX                                                                     0
+#define mmCPC_EDC_SCRATCH_CNT                                                                          0x118e
+#define mmCPC_EDC_SCRATCH_CNT_BASE_IDX                                                                 0
+#define mmCPC_EDC_UCODE_CNT                                                                            0x118f
+#define mmCPC_EDC_UCODE_CNT_BASE_IDX                                                                   0
+#define mmDC_EDC_STATE_CNT                                                                             0x1191
+#define mmDC_EDC_STATE_CNT_BASE_IDX                                                                    0
+#define mmDC_EDC_CSINVOC_CNT                                                                           0x1192
+#define mmDC_EDC_CSINVOC_CNT_BASE_IDX                                                                  0
+#define mmDC_EDC_RESTORE_CNT                                                                           0x1193
+#define mmDC_EDC_RESTORE_CNT_BASE_IDX                                                                  0
+
+// addressBlock: gc_gdsdec
+// base address: 0x9700
+#define mmGDS_EDC_CNT                                                                                  0x05c5
+#define mmGDS_EDC_CNT_BASE_IDX                                                                         0
+#define mmGDS_EDC_GRBM_CNT                                                                             0x05c6
+#define mmGDS_EDC_GRBM_CNT_BASE_IDX                                                                    0
+#define mmGDS_EDC_OA_DED                                                                               0x05c7
+#define mmGDS_EDC_OA_DED_BASE_IDX                                                                      0
+#define mmGDS_EDC_OA_PHY_CNT                                                                           0x05cb
+#define mmGDS_EDC_OA_PHY_CNT_BASE_IDX                                                                  0
+#define mmGDS_EDC_OA_PIPE_CNT                                                                          0x05cc
+#define mmGDS_EDC_OA_PIPE_CNT_BASE_IDX                                                                 0
+
+// addressBlock: gc_shsdec
+// base address: 0x9000
+#define mmSPI_EDC_CNT                                                                                  0x0445
+#define mmSPI_EDC_CNT_BASE_IDX                                                                         0
+
+// addressBlock: gc_sqdec
+// base address: 0x8c00
+#define mmSQC_EDC_CNT2                                                                                 0x032c
+#define mmSQC_EDC_CNT2_BASE_IDX                                                                        0
+#define mmSQC_EDC_CNT3                                                                                 0x032d
+#define mmSQC_EDC_CNT3_BASE_IDX                                                                        0
+#define mmSQC_EDC_PARITY_CNT3                                                                          0x032e
+#define mmSQC_EDC_PARITY_CNT3_BASE_IDX                                                                 0
+#define mmSQC_EDC_CNT                                                                                  0x03a2
+#define mmSQC_EDC_CNT_BASE_IDX                                                                         0
+#define mmSQ_EDC_SEC_CNT                                                                               0x03a3
+#define mmSQ_EDC_SEC_CNT_BASE_IDX                                                                      0
+#define mmSQ_EDC_DED_CNT                                                                               0x03a4
+#define mmSQ_EDC_DED_CNT_BASE_IDX                                                                      0
+#define mmSQ_EDC_INFO                                                                                  0x03a5
+#define mmSQ_EDC_INFO_BASE_IDX                                                                         0
+#define mmSQ_EDC_CNT                                                                                   0x03a6
+#define mmSQ_EDC_CNT_BASE_IDX                                                                          0
+
+// addressBlock: gc_tpdec
+// base address: 0x9400
+#define mmTA_EDC_CNT                                                                                   0x0586
+#define mmTA_EDC_CNT_BASE_IDX                                                                          0
+
+// addressBlock: gc_tcdec
+// base address: 0xac00
+#define mmTCP_EDC_CNT                                                                                  0x0b17
+#define mmTCP_EDC_CNT_BASE_IDX                                                                         0
+#define mmTCP_EDC_CNT_NEW                                                                              0x0b18
+#define mmTCP_EDC_CNT_NEW_BASE_IDX                                                                     0
+#define mmTCP_ATC_EDC_GATCL1_CNT                                                                       0x12b1
+#define mmTCP_ATC_EDC_GATCL1_CNT_BASE_IDX                                                              0
+#define mmTCI_EDC_CNT                                                                                  0x0b60
+#define mmTCI_EDC_CNT_BASE_IDX                                                                         0
+#define mmTCC_EDC_CNT                                                                                  0x0b82
+#define mmTCC_EDC_CNT_BASE_IDX                                                                         0
+#define mmTCC_EDC_CNT2                                                                                 0x0b83
+#define mmTCC_EDC_CNT2_BASE_IDX                                                                        0
+#define mmTCA_EDC_CNT                                                                                  0x0bc5
+#define mmTCA_EDC_CNT_BASE_IDX                                                                         0
+
+// addressBlock: gc_tpdec
+// base address: 0x9400
+#define mmTD_EDC_CNT                                                                                   0x052e
+#define mmTD_EDC_CNT_BASE_IDX                                                                          0
+#define mmTA_EDC_CNT                                                                                   0x0586
+#define mmTA_EDC_CNT_BASE_IDX                                                                          0
+
+// addressBlock: gc_ea_gceadec2
+// base address: 0x9c00
+#define mmGCEA_EDC_CNT                                                                                 0x0706
+#define mmGCEA_EDC_CNT_BASE_IDX                                                                        0
+#define mmGCEA_EDC_CNT2                                                                                0x0707
+#define mmGCEA_EDC_CNT2_BASE_IDX                                                                       0
+#define mmGCEA_EDC_CNT3                                                                                0x071b
+#define mmGCEA_EDC_CNT3_BASE_IDX                                                                       0
+
+// addressBlock: gc_gfxudec
+// base address: 0x30000
+#define mmSCRATCH_REG0                                                                                 0x2040
+#define mmSCRATCH_REG0_BASE_IDX                                                                        1
+#define mmSCRATCH_REG1                                                                                 0x2041
+#define mmSCRATCH_REG1_BASE_IDX                                                                        1
+#define mmSCRATCH_REG2                                                                                 0x2042
+#define mmSCRATCH_REG2_BASE_IDX                                                                        1
+#define mmSCRATCH_REG3                                                                                 0x2043
+#define mmSCRATCH_REG3_BASE_IDX                                                                        1
+#define mmSCRATCH_REG4                                                                                 0x2044
+#define mmSCRATCH_REG4_BASE_IDX                                                                        1
+#define mmSCRATCH_REG5                                                                                 0x2045
+#define mmSCRATCH_REG5_BASE_IDX                                                                        1
+#define mmSCRATCH_REG6                                                                                 0x2046
+#define mmSCRATCH_REG6_BASE_IDX                                                                        1
+#define mmSCRATCH_REG7                                                                                 0x2047
+#define mmSCRATCH_REG7_BASE_IDX                                                                        1
+#define mmGRBM_GFX_INDEX                                                                               0x2200
+#define mmGRBM_GFX_INDEX_BASE_IDX                                                                      1
+
+// addressBlock: gc_utcl2_atcl2dec
+// base address: 0xa000
+#define mmATC_L2_CACHE_4K_DSM_INDEX                                                                    0x080e
+#define mmATC_L2_CACHE_4K_DSM_INDEX_BASE_IDX                                                           0
+#define mmATC_L2_CACHE_2M_DSM_INDEX                                                                    0x080f
+#define mmATC_L2_CACHE_2M_DSM_INDEX_BASE_IDX                                                           0
+#define mmATC_L2_CACHE_4K_DSM_CNTL                                                                     0x0810
+#define mmATC_L2_CACHE_4K_DSM_CNTL_BASE_IDX                                                            0
+#define mmATC_L2_CACHE_2M_DSM_CNTL                                                                     0x0811
+#define mmATC_L2_CACHE_2M_DSM_CNTL_BASE_IDX                                                            0
+
+// addressBlock: gc_utcl2_vml2pfdec
+// base address: 0xa100
+#define mmVML2_MEM_ECC_INDEX                                                                           0x0860
+#define mmVML2_MEM_ECC_INDEX_BASE_IDX                                                                  0
+#define mmVML2_WALKER_MEM_ECC_INDEX                                                                    0x0861
+#define mmVML2_WALKER_MEM_ECC_INDEX_BASE_IDX                                                           0
+#define mmUTCL2_MEM_ECC_INDEX                                                                          0x0862
+#define mmUTCL2_MEM_ECC_INDEX_BASE_IDX                                                                 0
+
+#define mmVML2_MEM_ECC_CNTL                                                                            0x0863
+#define mmVML2_MEM_ECC_CNTL_BASE_IDX                                                                   0
+#define mmVML2_WALKER_MEM_ECC_CNTL                                                                     0x0864
+#define mmVML2_WALKER_MEM_ECC_CNTL_BASE_IDX                                                            0
+#define mmUTCL2_MEM_ECC_CNTL                                                                           0x0865
+#define mmUTCL2_MEM_ECC_CNTL_BASE_IDX                                                                  0
+
+// addressBlock: gc_rlcpdec
+// base address: 0x3b000
+#define mmRLC_EDC_CNT                                                                                  0x4d40
+#define mmRLC_EDC_CNT_BASE_IDX                                                                         1
+#define mmRLC_EDC_CNT2                                                                                 0x4d41
+#define mmRLC_EDC_CNT2_BASE_IDX                                                                        1
+
+#endif
\ No newline at end of file
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h
new file mode 100644
index 000000000000..f26246a600c6
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h
@@ -0,0 +1,748 @@
+/*
+ * Copyright (C) 2020  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _gc_9_4_1_SH_MASK_HEADER
+#define _gc_9_4_1_SH_MASK_HEADER
+
+// addressBlock: gc_cppdec2
+//CPF_EDC_TAG_CNT
+#define CPF_EDC_TAG_CNT__DED_COUNT__SHIFT                                                                     0x0
+#define CPF_EDC_TAG_CNT__SEC_COUNT__SHIFT                                                                     0x2
+#define CPF_EDC_TAG_CNT__DED_COUNT_MASK                                                                       0x00000003L
+#define CPF_EDC_TAG_CNT__SEC_COUNT_MASK                                                                       0x0000000CL
+//CPF_EDC_ROQ_CNT
+#define CPF_EDC_ROQ_CNT__DED_COUNT_ME1__SHIFT                                                                 0x0
+#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME1__SHIFT                                                                 0x2
+#define CPF_EDC_ROQ_CNT__DED_COUNT_ME2__SHIFT                                                                 0x4
+#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME2__SHIFT                                                                 0x6
+#define CPF_EDC_ROQ_CNT__DED_COUNT_ME1_MASK                                                                   0x00000003L
+#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME1_MASK                                                                   0x0000000CL
+#define CPF_EDC_ROQ_CNT__DED_COUNT_ME2_MASK                                                                   0x00000030L
+#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME2_MASK                                                                   0x000000C0L
+//CPG_EDC_TAG_CNT
+#define CPG_EDC_TAG_CNT__DED_COUNT__SHIFT                                                                     0x0
+#define CPG_EDC_TAG_CNT__SEC_COUNT__SHIFT                                                                     0x2
+#define CPG_EDC_TAG_CNT__DED_COUNT_MASK                                                                       0x00000003L
+#define CPG_EDC_TAG_CNT__SEC_COUNT_MASK                                                                       0x0000000CL
+//CPG_EDC_DMA_CNT
+#define CPG_EDC_DMA_CNT__ROQ_DED_COUNT__SHIFT                                                                 0x0
+#define CPG_EDC_DMA_CNT__ROQ_SEC_COUNT__SHIFT                                                                 0x2
+#define CPG_EDC_DMA_CNT__TAG_DED_COUNT__SHIFT                                                                 0x4
+#define CPG_EDC_DMA_CNT__TAG_SEC_COUNT__SHIFT                                                                 0x6
+#define CPG_EDC_DMA_CNT__ROQ_DED_COUNT_MASK                                                                   0x00000003L
+#define CPG_EDC_DMA_CNT__ROQ_SEC_COUNT_MASK                                                                   0x0000000CL
+#define CPG_EDC_DMA_CNT__TAG_DED_COUNT_MASK                                                                   0x00000030L
+#define CPG_EDC_DMA_CNT__TAG_SEC_COUNT_MASK                                                                   0x000000C0L
+//CPC_EDC_SCRATCH_CNT
+#define CPC_EDC_SCRATCH_CNT__DED_COUNT__SHIFT                                                                 0x0
+#define CPC_EDC_SCRATCH_CNT__SEC_COUNT__SHIFT                                                                 0x2
+#define CPC_EDC_SCRATCH_CNT__DED_COUNT_MASK                                                                   0x00000003L
+#define CPC_EDC_SCRATCH_CNT__SEC_COUNT_MASK                                                                   0x0000000CL
+//CPC_EDC_UCODE_CNT
+#define CPC_EDC_UCODE_CNT__DED_COUNT__SHIFT                                                                   0x0
+#define CPC_EDC_UCODE_CNT__SEC_COUNT__SHIFT                                                                   0x2
+#define CPC_EDC_UCODE_CNT__DED_COUNT_MASK                                                                     0x00000003L
+#define CPC_EDC_UCODE_CNT__SEC_COUNT_MASK                                                                     0x0000000CL
+//DC_EDC_STATE_CNT
+#define DC_EDC_STATE_CNT__DED_COUNT_ME1__SHIFT                                                                0x0
+#define DC_EDC_STATE_CNT__SEC_COUNT_ME1__SHIFT                                                                0x2
+#define DC_EDC_STATE_CNT__DED_COUNT_ME1_MASK                                                                  0x00000003L
+#define DC_EDC_STATE_CNT__SEC_COUNT_ME1_MASK                                                                  0x0000000CL
+//DC_EDC_CSINVOC_CNT
+#define DC_EDC_CSINVOC_CNT__DED_COUNT_ME1__SHIFT                                                              0x0
+#define DC_EDC_CSINVOC_CNT__SEC_COUNT_ME1__SHIFT                                                              0x2
+#define DC_EDC_CSINVOC_CNT__DED_COUNT1_ME1__SHIFT                                                             0x4
+#define DC_EDC_CSINVOC_CNT__SEC_COUNT1_ME1__SHIFT                                                             0x6
+#define DC_EDC_CSINVOC_CNT__DED_COUNT_ME1_MASK                                                                0x00000003L
+#define DC_EDC_CSINVOC_CNT__SEC_COUNT_ME1_MASK                                                                0x0000000CL
+#define DC_EDC_CSINVOC_CNT__DED_COUNT1_ME1_MASK                                                               0x00000030L
+#define DC_EDC_CSINVOC_CNT__SEC_COUNT1_ME1_MASK                                                               0x000000C0L
+//DC_EDC_RESTORE_CNT
+#define DC_EDC_RESTORE_CNT__DED_COUNT_ME1__SHIFT                                                              0x0
+#define DC_EDC_RESTORE_CNT__SEC_COUNT_ME1__SHIFT                                                              0x2
+#define DC_EDC_RESTORE_CNT__DED_COUNT1_ME1__SHIFT                                                             0x4
+#define DC_EDC_RESTORE_CNT__SEC_COUNT1_ME1__SHIFT                                                             0x6
+#define DC_EDC_RESTORE_CNT__DED_COUNT_ME1_MASK                                                                0x00000003L
+#define DC_EDC_RESTORE_CNT__SEC_COUNT_ME1_MASK                                                                0x0000000CL
+#define DC_EDC_RESTORE_CNT__DED_COUNT1_ME1_MASK                                                               0x00000030L
+#define DC_EDC_RESTORE_CNT__SEC_COUNT1_ME1_MASK                                                               0x000000C0L
+
+// addressBlock: gc_gdsdec
+//GDS_EDC_CNT
+#define GDS_EDC_CNT__GDS_MEM_DED__SHIFT                                                                       0x0
+#define GDS_EDC_CNT__GDS_MEM_SEC__SHIFT                                                                       0x4
+#define GDS_EDC_CNT__UNUSED__SHIFT                                                                            0x6
+#define GDS_EDC_CNT__GDS_MEM_DED_MASK                                                                         0x00000003L
+#define GDS_EDC_CNT__GDS_MEM_SEC_MASK                                                                         0x00000030L
+#define GDS_EDC_CNT__UNUSED_MASK                                                                              0xFFFFFFC0L
+//GDS_EDC_GRBM_CNT
+#define GDS_EDC_GRBM_CNT__DED__SHIFT                                                                          0x0
+#define GDS_EDC_GRBM_CNT__SEC__SHIFT                                                                          0x2
+#define GDS_EDC_GRBM_CNT__UNUSED__SHIFT                                                                       0x4
+#define GDS_EDC_GRBM_CNT__DED_MASK                                                                            0x00000003L
+#define GDS_EDC_GRBM_CNT__SEC_MASK                                                                            0x0000000CL
+#define GDS_EDC_GRBM_CNT__UNUSED_MASK                                                                         0xFFFFFFF0L
+//GDS_EDC_OA_DED
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED__SHIFT                                                            0x0
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED__SHIFT                                                            0x1
+#define GDS_EDC_OA_DED__ME0_CS_DED__SHIFT                                                                     0x2
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED__SHIFT                                                             0x3
+#define GDS_EDC_OA_DED__ME1_PIPE0_DED__SHIFT                                                                  0x4
+#define GDS_EDC_OA_DED__ME1_PIPE1_DED__SHIFT                                                                  0x5
+#define GDS_EDC_OA_DED__ME1_PIPE2_DED__SHIFT                                                                  0x6
+#define GDS_EDC_OA_DED__ME1_PIPE3_DED__SHIFT                                                                  0x7
+#define GDS_EDC_OA_DED__ME2_PIPE0_DED__SHIFT                                                                  0x8
+#define GDS_EDC_OA_DED__ME2_PIPE1_DED__SHIFT                                                                  0x9
+#define GDS_EDC_OA_DED__ME2_PIPE2_DED__SHIFT                                                                  0xa
+#define GDS_EDC_OA_DED__ME2_PIPE3_DED__SHIFT                                                                  0xb
+#define GDS_EDC_OA_DED__UNUSED1__SHIFT                                                                        0xc
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED_MASK                                                              0x00000001L
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED_MASK                                                              0x00000002L
+#define GDS_EDC_OA_DED__ME0_CS_DED_MASK                                                                       0x00000004L
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED_MASK                                                               0x00000008L
+#define GDS_EDC_OA_DED__ME1_PIPE0_DED_MASK                                                                    0x00000010L
+#define GDS_EDC_OA_DED__ME1_PIPE1_DED_MASK                                                                    0x00000020L
+#define GDS_EDC_OA_DED__ME1_PIPE2_DED_MASK                                                                    0x00000040L
+#define GDS_EDC_OA_DED__ME1_PIPE3_DED_MASK                                                                    0x00000080L
+#define GDS_EDC_OA_DED__ME2_PIPE0_DED_MASK                                                                    0x00000100L
+#define GDS_EDC_OA_DED__ME2_PIPE1_DED_MASK                                                                    0x00000200L
+#define GDS_EDC_OA_DED__ME2_PIPE2_DED_MASK                                                                    0x00000400L
+#define GDS_EDC_OA_DED__ME2_PIPE3_DED_MASK                                                                    0x00000800L
+#define GDS_EDC_OA_DED__UNUSED1_MASK                                                                          0xFFFFF000L
+//GDS_EDC_OA_PHY_CNT
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC__SHIFT                                                        0x0
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED__SHIFT                                                        0x2
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC__SHIFT                                                        0x4
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED__SHIFT                                                        0x6
+#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SEC__SHIFT                                                       0x8
+#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_DED__SHIFT                                                       0xa
+#define GDS_EDC_OA_PHY_CNT__UNUSED1__SHIFT                                                                    0xc
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC_MASK                                                          0x00000003L
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED_MASK                                                          0x0000000CL
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC_MASK                                                          0x00000030L
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED_MASK                                                          0x000000C0L
+#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SEC_MASK                                                         0x00000300L
+#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_DED_MASK                                                         0x00000C00L
+#define GDS_EDC_OA_PHY_CNT__UNUSED1_MASK                                                                      0xFFFFF000L
+//GDS_EDC_OA_PIPE_CNT
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC__SHIFT                                                    0x0
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED__SHIFT                                                    0x2
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC__SHIFT                                                    0x4
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED__SHIFT                                                    0x6
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC__SHIFT                                                    0x8
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED__SHIFT                                                    0xa
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC__SHIFT                                                    0xc
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED__SHIFT                                                    0xe
+#define GDS_EDC_OA_PIPE_CNT__UNUSED__SHIFT                                                                    0x10
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC_MASK                                                      0x00000003L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED_MASK                                                      0x0000000CL
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC_MASK                                                      0x00000030L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED_MASK                                                      0x000000C0L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC_MASK                                                      0x00000300L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED_MASK                                                      0x00000C00L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC_MASK                                                      0x00003000L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED_MASK                                                      0x0000C000L
+#define GDS_EDC_OA_PIPE_CNT__UNUSED_MASK                                                                      0xFFFF0000L
+
+// addressBlock: gc_shsdec
+//SPI_EDC_CNT
+#define SPI_EDC_CNT__SPI_SR_MEM_SEC_COUNT__SHIFT                                                              0x0
+#define SPI_EDC_CNT__SPI_SR_MEM_DED_COUNT__SHIFT                                                              0x2
+#define SPI_EDC_CNT__SPI_GDS_EXPREQ_SEC_COUNT__SHIFT                                                          0x4
+#define SPI_EDC_CNT__SPI_GDS_EXPREQ_DED_COUNT__SHIFT                                                          0x6
+#define SPI_EDC_CNT__SPI_WB_GRANT_30_SEC_COUNT__SHIFT                                                         0x8
+#define SPI_EDC_CNT__SPI_WB_GRANT_30_DED_COUNT__SHIFT                                                         0xa
+#define SPI_EDC_CNT__SPI_WB_GRANT_61_SEC_COUNT__SHIFT                                                         0xc
+#define SPI_EDC_CNT__SPI_WB_GRANT_61_DED_COUNT__SHIFT                                                         0xe
+#define SPI_EDC_CNT__SPI_LIFE_CNT_SEC_COUNT__SHIFT                                                            0x10
+#define SPI_EDC_CNT__SPI_LIFE_CNT_DED_COUNT__SHIFT                                                            0x12
+#define SPI_EDC_CNT__SPI_SR_MEM_SEC_COUNT_MASK                                                                0x00000003L
+#define SPI_EDC_CNT__SPI_SR_MEM_DED_COUNT_MASK                                                                0x0000000CL
+#define SPI_EDC_CNT__SPI_GDS_EXPREQ_SEC_COUNT_MASK                                                            0x00000030L
+#define SPI_EDC_CNT__SPI_GDS_EXPREQ_DED_COUNT_MASK                                                            0x000000C0L
+#define SPI_EDC_CNT__SPI_WB_GRANT_30_SEC_COUNT_MASK                                                           0x00000300L
+#define SPI_EDC_CNT__SPI_WB_GRANT_30_DED_COUNT_MASK                                                           0x00000C00L
+#define SPI_EDC_CNT__SPI_WB_GRANT_61_SEC_COUNT_MASK                                                           0x00003000L
+#define SPI_EDC_CNT__SPI_WB_GRANT_61_DED_COUNT_MASK                                                           0x0000C000L
+#define SPI_EDC_CNT__SPI_LIFE_CNT_SEC_COUNT_MASK                                                              0x00030000L
+#define SPI_EDC_CNT__SPI_LIFE_CNT_DED_COUNT_MASK                                                              0x000C0000L
+
+// addressBlock: gc_sqdec
+//SQC_EDC_CNT2
+#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_SEC_COUNT__SHIFT                                                     0x0
+#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_DED_COUNT__SHIFT                                                     0x2
+#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_SEC_COUNT__SHIFT                                                    0x4
+#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_DED_COUNT__SHIFT                                                    0x6
+#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_SEC_COUNT__SHIFT                                                     0x8
+#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_DED_COUNT__SHIFT                                                     0xa
+#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_SEC_COUNT__SHIFT                                                    0xc
+#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_DED_COUNT__SHIFT                                                    0xe
+#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_SEC_COUNT__SHIFT                                                       0x10
+#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_DED_COUNT__SHIFT                                                       0x12
+#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_SEC_COUNT_MASK                                                       0x00000003L
+#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_DED_COUNT_MASK                                                       0x0000000CL
+#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_SEC_COUNT_MASK                                                      0x00000030L
+#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_DED_COUNT_MASK                                                      0x000000C0L
+#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_SEC_COUNT_MASK                                                       0x00000300L
+#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_DED_COUNT_MASK                                                       0x00000C00L
+#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_SEC_COUNT_MASK                                                      0x00003000L
+#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_DED_COUNT_MASK                                                      0x0000C000L
+#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_SEC_COUNT_MASK                                                         0x00030000L
+#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_DED_COUNT_MASK                                                         0x000C0000L
+//SQC_EDC_CNT3
+#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_SEC_COUNT__SHIFT                                                     0x0
+#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_DED_COUNT__SHIFT                                                     0x2
+#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_SEC_COUNT__SHIFT                                                    0x4
+#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_DED_COUNT__SHIFT                                                    0x6
+#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_SEC_COUNT__SHIFT                                                     0x8
+#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_DED_COUNT__SHIFT                                                     0xa
+#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_SEC_COUNT__SHIFT                                                    0xc
+#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_DED_COUNT__SHIFT                                                    0xe
+#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_SEC_COUNT_MASK                                                       0x00000003L
+#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_DED_COUNT_MASK                                                       0x0000000CL
+#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_SEC_COUNT_MASK                                                      0x00000030L
+#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_DED_COUNT_MASK                                                      0x000000C0L
+#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_SEC_COUNT_MASK                                                       0x00000300L
+#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_DED_COUNT_MASK                                                       0x00000C00L
+#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_SEC_COUNT_MASK                                                      0x00003000L
+#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_DED_COUNT_MASK                                                      0x0000C000L
+//SQC_EDC_PARITY_CNT3
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT__SHIFT                                      0x0
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT__SHIFT                                      0x2
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_SEC_COUNT__SHIFT                                            0x4
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_DED_COUNT__SHIFT                                            0x6
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_SEC_COUNT__SHIFT                                             0x8
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_DED_COUNT__SHIFT                                             0xa
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_SEC_COUNT__SHIFT                                            0xc
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_DED_COUNT__SHIFT                                            0xe
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT__SHIFT                                      0x10
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT__SHIFT                                      0x12
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_SEC_COUNT__SHIFT                                            0x14
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_DED_COUNT__SHIFT                                            0x16
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_SEC_COUNT__SHIFT                                             0x18
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_DED_COUNT__SHIFT                                             0x1a
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_SEC_COUNT__SHIFT                                            0x1c
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_DED_COUNT__SHIFT                                            0x1e
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT_MASK                                        0x00000003L
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT_MASK                                        0x0000000CL
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_SEC_COUNT_MASK                                              0x00000030L
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_DED_COUNT_MASK                                              0x000000C0L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_SEC_COUNT_MASK                                               0x00000300L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_DED_COUNT_MASK                                               0x00000C00L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_SEC_COUNT_MASK                                              0x00003000L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_DED_COUNT_MASK                                              0x0000C000L
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT_MASK                                        0x00030000L
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT_MASK                                        0x000C0000L
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_SEC_COUNT_MASK                                              0x00300000L
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_DED_COUNT_MASK                                              0x00C00000L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_SEC_COUNT_MASK                                               0x03000000L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_DED_COUNT_MASK                                               0x0C000000L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_SEC_COUNT_MASK                                              0x30000000L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_DED_COUNT_MASK                                              0xC0000000L
+//SQC_EDC_CNT
+#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_SEC_COUNT__SHIFT                                                 0x0
+#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_DED_COUNT__SHIFT                                                 0x2
+#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_SEC_COUNT__SHIFT                                                    0x4
+#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_DED_COUNT__SHIFT                                                    0x6
+#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_SEC_COUNT__SHIFT                                                 0x8
+#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_DED_COUNT__SHIFT                                                 0xa
+#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_SEC_COUNT__SHIFT                                                    0xc
+#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_DED_COUNT__SHIFT                                                    0xe
+#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_SEC_COUNT__SHIFT                                                 0x10
+#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_DED_COUNT__SHIFT                                                 0x12
+#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_SEC_COUNT__SHIFT                                                    0x14
+#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_DED_COUNT__SHIFT                                                    0x16
+#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_SEC_COUNT__SHIFT                                                 0x18
+#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_DED_COUNT__SHIFT                                                 0x1a
+#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_SEC_COUNT__SHIFT                                                    0x1c
+#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_DED_COUNT__SHIFT                                                    0x1e
+#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_SEC_COUNT_MASK                                                   0x00000003L
+#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_DED_COUNT_MASK                                                   0x0000000CL
+#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_SEC_COUNT_MASK                                                      0x00000030L
+#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_DED_COUNT_MASK                                                      0x000000C0L
+#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_SEC_COUNT_MASK                                                   0x00000300L
+#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_DED_COUNT_MASK                                                   0x00000C00L
+#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_SEC_COUNT_MASK                                                      0x00003000L
+#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_DED_COUNT_MASK                                                      0x0000C000L
+#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_SEC_COUNT_MASK                                                   0x00030000L
+#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_DED_COUNT_MASK                                                   0x000C0000L
+#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_SEC_COUNT_MASK                                                      0x00300000L
+#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_DED_COUNT_MASK                                                      0x00C00000L
+#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_SEC_COUNT_MASK                                                   0x03000000L
+#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_DED_COUNT_MASK                                                   0x0C000000L
+#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_SEC_COUNT_MASK                                                      0x30000000L
+#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_DED_COUNT_MASK                                                      0xC0000000L
+//SQ_EDC_SEC_CNT
+#define SQ_EDC_SEC_CNT__LDS_SEC__SHIFT                                                                        0x0
+#define SQ_EDC_SEC_CNT__SGPR_SEC__SHIFT                                                                       0x8
+#define SQ_EDC_SEC_CNT__VGPR_SEC__SHIFT                                                                       0x10
+#define SQ_EDC_SEC_CNT__LDS_SEC_MASK                                                                          0x000000FFL
+#define SQ_EDC_SEC_CNT__SGPR_SEC_MASK                                                                         0x0000FF00L
+#define SQ_EDC_SEC_CNT__VGPR_SEC_MASK                                                                         0x00FF0000L
+//SQ_EDC_DED_CNT
+#define SQ_EDC_DED_CNT__LDS_DED__SHIFT                                                                        0x0
+#define SQ_EDC_DED_CNT__SGPR_DED__SHIFT                                                                       0x8
+#define SQ_EDC_DED_CNT__VGPR_DED__SHIFT                                                                       0x10
+#define SQ_EDC_DED_CNT__LDS_DED_MASK                                                                          0x000000FFL
+#define SQ_EDC_DED_CNT__SGPR_DED_MASK                                                                         0x0000FF00L
+#define SQ_EDC_DED_CNT__VGPR_DED_MASK                                                                         0x00FF0000L
+//SQ_EDC_INFO
+#define SQ_EDC_INFO__WAVE_ID__SHIFT                                                                           0x0
+#define SQ_EDC_INFO__SIMD_ID__SHIFT                                                                           0x4
+#define SQ_EDC_INFO__SOURCE__SHIFT                                                                            0x6
+#define SQ_EDC_INFO__VM_ID__SHIFT                                                                             0x9
+#define SQ_EDC_INFO__WAVE_ID_MASK                                                                             0x0000000FL
+#define SQ_EDC_INFO__SIMD_ID_MASK                                                                             0x00000030L
+#define SQ_EDC_INFO__SOURCE_MASK                                                                              0x000001C0L
+#define SQ_EDC_INFO__VM_ID_MASK                                                                               0x00001E00L
+//SQ_EDC_CNT
+#define SQ_EDC_CNT__LDS_D_SEC_COUNT__SHIFT                                                                    0x0
+#define SQ_EDC_CNT__LDS_D_DED_COUNT__SHIFT                                                                    0x2
+#define SQ_EDC_CNT__LDS_I_SEC_COUNT__SHIFT                                                                    0x4
+#define SQ_EDC_CNT__LDS_I_DED_COUNT__SHIFT                                                                    0x6
+#define SQ_EDC_CNT__SGPR_SEC_COUNT__SHIFT                                                                     0x8
+#define SQ_EDC_CNT__SGPR_DED_COUNT__SHIFT                                                                     0xa
+#define SQ_EDC_CNT__VGPR0_SEC_COUNT__SHIFT                                                                    0xc
+#define SQ_EDC_CNT__VGPR0_DED_COUNT__SHIFT                                                                    0xe
+#define SQ_EDC_CNT__VGPR1_SEC_COUNT__SHIFT                                                                    0x10
+#define SQ_EDC_CNT__VGPR1_DED_COUNT__SHIFT                                                                    0x12
+#define SQ_EDC_CNT__VGPR2_SEC_COUNT__SHIFT                                                                    0x14
+#define SQ_EDC_CNT__VGPR2_DED_COUNT__SHIFT                                                                    0x16
+#define SQ_EDC_CNT__VGPR3_SEC_COUNT__SHIFT                                                                    0x18
+#define SQ_EDC_CNT__VGPR3_DED_COUNT__SHIFT                                                                    0x1a
+#define SQ_EDC_CNT__LDS_D_SEC_COUNT_MASK                                                                      0x00000003L
+#define SQ_EDC_CNT__LDS_D_DED_COUNT_MASK                                                                      0x0000000CL
+#define SQ_EDC_CNT__LDS_I_SEC_COUNT_MASK                                                                      0x00000030L
+#define SQ_EDC_CNT__LDS_I_DED_COUNT_MASK                                                                      0x000000C0L
+#define SQ_EDC_CNT__SGPR_SEC_COUNT_MASK                                                                       0x00000300L
+#define SQ_EDC_CNT__SGPR_DED_COUNT_MASK                                                                       0x00000C00L
+#define SQ_EDC_CNT__VGPR0_SEC_COUNT_MASK                                                                      0x00003000L
+#define SQ_EDC_CNT__VGPR0_DED_COUNT_MASK                                                                      0x0000C000L
+#define SQ_EDC_CNT__VGPR1_SEC_COUNT_MASK                                                                      0x00030000L
+#define SQ_EDC_CNT__VGPR1_DED_COUNT_MASK                                                                      0x000C0000L
+#define SQ_EDC_CNT__VGPR2_SEC_COUNT_MASK                                                                      0x00300000L
+#define SQ_EDC_CNT__VGPR2_DED_COUNT_MASK                                                                      0x00C00000L
+#define SQ_EDC_CNT__VGPR3_SEC_COUNT_MASK                                                                      0x03000000L
+#define SQ_EDC_CNT__VGPR3_DED_COUNT_MASK                                                                      0x0C000000L
+
+// addressBlock: gc_tpdec
+//TA_EDC_CNT
+#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT__SHIFT                                                              0x0
+#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT__SHIFT                                                              0x2
+#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT__SHIFT                                                              0x4
+#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT__SHIFT                                                              0x6
+#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT__SHIFT                                                              0x8
+#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT__SHIFT                                                              0xa
+#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT__SHIFT                                                              0xc
+#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT__SHIFT                                                              0xe
+#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT__SHIFT                                                              0x10
+#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT__SHIFT                                                              0x12
+#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT_MASK                                                                0x00000003L
+#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT_MASK                                                                0x0000000CL
+#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT_MASK                                                                0x00000030L
+#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT_MASK                                                                0x000000C0L
+#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT_MASK                                                                0x00000300L
+#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT_MASK                                                                0x00000C00L
+#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT_MASK                                                                0x00003000L
+#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT_MASK                                                                0x0000C000L
+#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT_MASK                                                                0x00030000L
+#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT_MASK                                                                0x000C0000L
+
+// addressBlock: gc_tcdec
+//TCP_EDC_CNT
+#define TCP_EDC_CNT__SEC_COUNT__SHIFT                                                                         0x0
+#define TCP_EDC_CNT__LFIFO_SED_COUNT__SHIFT                                                                   0x8
+#define TCP_EDC_CNT__DED_COUNT__SHIFT                                                                         0x10
+#define TCP_EDC_CNT__SEC_COUNT_MASK                                                                           0x000000FFL
+#define TCP_EDC_CNT__LFIFO_SED_COUNT_MASK                                                                     0x0000FF00L
+#define TCP_EDC_CNT__DED_COUNT_MASK                                                                           0x00FF0000L
+//TCP_EDC_CNT_NEW
+#define TCP_EDC_CNT_NEW__CACHE_RAM_SEC_COUNT__SHIFT                                                           0x0
+#define TCP_EDC_CNT_NEW__CACHE_RAM_DED_COUNT__SHIFT                                                           0x2
+#define TCP_EDC_CNT_NEW__LFIFO_RAM_SEC_COUNT__SHIFT                                                           0x4
+#define TCP_EDC_CNT_NEW__LFIFO_RAM_DED_COUNT__SHIFT                                                           0x6
+#define TCP_EDC_CNT_NEW__CMD_FIFO_SEC_COUNT__SHIFT                                                            0x8
+#define TCP_EDC_CNT_NEW__CMD_FIFO_DED_COUNT__SHIFT                                                            0xa
+#define TCP_EDC_CNT_NEW__VM_FIFO_SEC_COUNT__SHIFT                                                             0xc
+#define TCP_EDC_CNT_NEW__VM_FIFO_DED_COUNT__SHIFT                                                             0xe
+#define TCP_EDC_CNT_NEW__DB_RAM_SED_COUNT__SHIFT                                                              0x10
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_SEC_COUNT__SHIFT                                                        0x12
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_DED_COUNT__SHIFT                                                        0x14
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_SEC_COUNT__SHIFT                                                        0x16
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_DED_COUNT__SHIFT                                                        0x18
+#define TCP_EDC_CNT_NEW__CACHE_RAM_SEC_COUNT_MASK                                                             0x00000003L
+#define TCP_EDC_CNT_NEW__CACHE_RAM_DED_COUNT_MASK                                                             0x0000000CL
+#define TCP_EDC_CNT_NEW__LFIFO_RAM_SEC_COUNT_MASK                                                             0x00000030L
+#define TCP_EDC_CNT_NEW__LFIFO_RAM_DED_COUNT_MASK                                                             0x000000C0L
+#define TCP_EDC_CNT_NEW__CMD_FIFO_SEC_COUNT_MASK                                                              0x00000300L
+#define TCP_EDC_CNT_NEW__CMD_FIFO_DED_COUNT_MASK                                                              0x00000C00L
+#define TCP_EDC_CNT_NEW__VM_FIFO_SEC_COUNT_MASK                                                               0x00003000L
+#define TCP_EDC_CNT_NEW__VM_FIFO_DED_COUNT_MASK                                                               0x0000C000L
+#define TCP_EDC_CNT_NEW__DB_RAM_SED_COUNT_MASK                                                                0x00030000L
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_SEC_COUNT_MASK                                                          0x000C0000L
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_DED_COUNT_MASK                                                          0x00300000L
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_SEC_COUNT_MASK                                                          0x00C00000L
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_DED_COUNT_MASK                                                          0x03000000L
+//TCP_ATC_EDC_GATCL1_CNT
+#define TCP_ATC_EDC_GATCL1_CNT__DATA_SEC__SHIFT                                                               0x0
+#define TCP_ATC_EDC_GATCL1_CNT__DATA_SEC_MASK                                                                 0x000000FFL
+//TCI_EDC_CNT
+#define TCI_EDC_CNT__WRITE_RAM_SEC_COUNT__SHIFT                                                               0x0
+#define TCI_EDC_CNT__WRITE_RAM_DED_COUNT__SHIFT                                                               0x2
+#define TCI_EDC_CNT__WRITE_RAM_SEC_COUNT_MASK                                                                 0x00000003L
+#define TCI_EDC_CNT__WRITE_RAM_DED_COUNT_MASK                                                                 0x0000000CL
+//TCA_EDC_CNT
+#define TCA_EDC_CNT__HOLE_FIFO_SEC_COUNT__SHIFT                                                               0x0
+#define TCA_EDC_CNT__HOLE_FIFO_DED_COUNT__SHIFT                                                               0x2
+#define TCA_EDC_CNT__REQ_FIFO_SEC_COUNT__SHIFT                                                                0x4
+#define TCA_EDC_CNT__REQ_FIFO_DED_COUNT__SHIFT                                                                0x6
+#define TCA_EDC_CNT__HOLE_FIFO_SEC_COUNT_MASK                                                                 0x00000003L
+#define TCA_EDC_CNT__HOLE_FIFO_DED_COUNT_MASK                                                                 0x0000000CL
+#define TCA_EDC_CNT__REQ_FIFO_SEC_COUNT_MASK                                                                  0x00000030L
+#define TCA_EDC_CNT__REQ_FIFO_DED_COUNT_MASK                                                                  0x000000C0L
+//TCC_EDC_CNT
+#define TCC_EDC_CNT__CACHE_DATA_SEC_COUNT__SHIFT                                                              0x0
+#define TCC_EDC_CNT__CACHE_DATA_DED_COUNT__SHIFT                                                              0x2
+#define TCC_EDC_CNT__CACHE_DIRTY_SEC_COUNT__SHIFT                                                             0x4
+#define TCC_EDC_CNT__CACHE_DIRTY_DED_COUNT__SHIFT                                                             0x6
+#define TCC_EDC_CNT__HIGH_RATE_TAG_SEC_COUNT__SHIFT                                                           0x8
+#define TCC_EDC_CNT__HIGH_RATE_TAG_DED_COUNT__SHIFT                                                           0xa
+#define TCC_EDC_CNT__LOW_RATE_TAG_SEC_COUNT__SHIFT                                                            0xc
+#define TCC_EDC_CNT__LOW_RATE_TAG_DED_COUNT__SHIFT                                                            0xe
+#define TCC_EDC_CNT__SRC_FIFO_SEC_COUNT__SHIFT                                                                0x10
+#define TCC_EDC_CNT__SRC_FIFO_DED_COUNT__SHIFT                                                                0x12
+#define TCC_EDC_CNT__LATENCY_FIFO_SEC_COUNT__SHIFT                                                            0x14
+#define TCC_EDC_CNT__LATENCY_FIFO_DED_COUNT__SHIFT                                                            0x16
+#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_SEC_COUNT__SHIFT                                                   0x18
+#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_DED_COUNT__SHIFT                                                   0x1a
+#define TCC_EDC_CNT__CACHE_DATA_SEC_COUNT_MASK                                                                0x00000003L
+#define TCC_EDC_CNT__CACHE_DATA_DED_COUNT_MASK                                                                0x0000000CL
+#define TCC_EDC_CNT__CACHE_DIRTY_SEC_COUNT_MASK                                                               0x00000030L
+#define TCC_EDC_CNT__CACHE_DIRTY_DED_COUNT_MASK                                                               0x000000C0L
+#define TCC_EDC_CNT__HIGH_RATE_TAG_SEC_COUNT_MASK                                                             0x00000300L
+#define TCC_EDC_CNT__HIGH_RATE_TAG_DED_COUNT_MASK                                                             0x00000C00L
+#define TCC_EDC_CNT__LOW_RATE_TAG_SEC_COUNT_MASK                                                              0x00003000L
+#define TCC_EDC_CNT__LOW_RATE_TAG_DED_COUNT_MASK                                                              0x0000C000L
+#define TCC_EDC_CNT__SRC_FIFO_SEC_COUNT_MASK                                                                  0x00030000L
+#define TCC_EDC_CNT__SRC_FIFO_DED_COUNT_MASK                                                                  0x000C0000L
+#define TCC_EDC_CNT__LATENCY_FIFO_SEC_COUNT_MASK                                                              0x00300000L
+#define TCC_EDC_CNT__LATENCY_FIFO_DED_COUNT_MASK                                                              0x00C00000L
+#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_SEC_COUNT_MASK                                                     0x03000000L
+#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_DED_COUNT_MASK                                                     0x0C000000L
+//TCC_EDC_CNT2
+#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_SEC_COUNT__SHIFT                                                   0x0
+#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_DED_COUNT__SHIFT                                                   0x2
+#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_SEC_COUNT__SHIFT                                                         0x4
+#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_DED_COUNT__SHIFT                                                         0x6
+#define TCC_EDC_CNT2__WRITE_CACHE_READ_SEC_COUNT__SHIFT                                                       0x8
+#define TCC_EDC_CNT2__WRITE_CACHE_READ_DED_COUNT__SHIFT                                                       0xa
+#define TCC_EDC_CNT2__RETURN_CONTROL_SEC_COUNT__SHIFT                                                         0xc
+#define TCC_EDC_CNT2__RETURN_CONTROL_DED_COUNT__SHIFT                                                         0xe
+#define TCC_EDC_CNT2__IN_USE_TRANSFER_SEC_COUNT__SHIFT                                                        0x10
+#define TCC_EDC_CNT2__IN_USE_TRANSFER_DED_COUNT__SHIFT                                                        0x12
+#define TCC_EDC_CNT2__IN_USE_DEC_SEC_COUNT__SHIFT                                                             0x14
+#define TCC_EDC_CNT2__IN_USE_DEC_DED_COUNT__SHIFT                                                             0x16
+#define TCC_EDC_CNT2__WRITE_RETURN_SEC_COUNT__SHIFT                                                           0x18
+#define TCC_EDC_CNT2__WRITE_RETURN_DED_COUNT__SHIFT                                                           0x1a
+#define TCC_EDC_CNT2__RETURN_DATA_SEC_COUNT__SHIFT                                                            0x1c
+#define TCC_EDC_CNT2__RETURN_DATA_DED_COUNT__SHIFT                                                            0x1e
+#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_SEC_COUNT_MASK                                                     0x00000003L
+#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_DED_COUNT_MASK                                                     0x0000000CL
+#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_SEC_COUNT_MASK                                                           0x00000030L
+#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_DED_COUNT_MASK                                                           0x000000C0L
+#define TCC_EDC_CNT2__WRITE_CACHE_READ_SEC_COUNT_MASK                                                         0x00000300L
+#define TCC_EDC_CNT2__WRITE_CACHE_READ_DED_COUNT_MASK                                                         0x00000C00L
+#define TCC_EDC_CNT2__RETURN_CONTROL_SEC_COUNT_MASK                                                           0x00003000L
+#define TCC_EDC_CNT2__RETURN_CONTROL_DED_COUNT_MASK                                                           0x0000C000L
+#define TCC_EDC_CNT2__IN_USE_TRANSFER_SEC_COUNT_MASK                                                          0x00030000L
+#define TCC_EDC_CNT2__IN_USE_TRANSFER_DED_COUNT_MASK                                                          0x000C0000L
+#define TCC_EDC_CNT2__IN_USE_DEC_SEC_COUNT_MASK                                                               0x00300000L
+#define TCC_EDC_CNT2__IN_USE_DEC_DED_COUNT_MASK                                                               0x00C00000L
+#define TCC_EDC_CNT2__WRITE_RETURN_SEC_COUNT_MASK                                                             0x03000000L
+#define TCC_EDC_CNT2__WRITE_RETURN_DED_COUNT_MASK                                                             0x0C000000L
+#define TCC_EDC_CNT2__RETURN_DATA_SEC_COUNT_MASK                                                              0x30000000L
+#define TCC_EDC_CNT2__RETURN_DATA_DED_COUNT_MASK                                                              0xC0000000L
+
+// addressBlock: gc_tpdec
+//TD_EDC_CNT
+#define TD_EDC_CNT__SS_FIFO_LO_SEC_COUNT__SHIFT                                                               0x0
+#define TD_EDC_CNT__SS_FIFO_LO_DED_COUNT__SHIFT                                                               0x2
+#define TD_EDC_CNT__SS_FIFO_HI_SEC_COUNT__SHIFT                                                               0x4
+#define TD_EDC_CNT__SS_FIFO_HI_DED_COUNT__SHIFT                                                               0x6
+#define TD_EDC_CNT__CS_FIFO_SEC_COUNT__SHIFT                                                                  0x8
+#define TD_EDC_CNT__CS_FIFO_DED_COUNT__SHIFT                                                                  0xa
+#define TD_EDC_CNT__SS_FIFO_LO_SEC_COUNT_MASK                                                                 0x00000003L
+#define TD_EDC_CNT__SS_FIFO_LO_DED_COUNT_MASK                                                                 0x0000000CL
+#define TD_EDC_CNT__SS_FIFO_HI_SEC_COUNT_MASK                                                                 0x00000030L
+#define TD_EDC_CNT__SS_FIFO_HI_DED_COUNT_MASK                                                                 0x000000C0L
+#define TD_EDC_CNT__CS_FIFO_SEC_COUNT_MASK                                                                    0x00000300L
+#define TD_EDC_CNT__CS_FIFO_DED_COUNT_MASK                                                                    0x00000C00L
+//TA_EDC_CNT
+#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT__SHIFT                                                              0x0
+#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT__SHIFT                                                              0x2
+#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT__SHIFT                                                              0x4
+#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT__SHIFT                                                              0x6
+#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT__SHIFT                                                              0x8
+#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT__SHIFT                                                              0xa
+#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT__SHIFT                                                              0xc
+#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT__SHIFT                                                              0xe
+#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT__SHIFT                                                              0x10
+#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT__SHIFT                                                              0x12
+#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT_MASK                                                                0x00000003L
+#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT_MASK                                                                0x0000000CL
+#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT_MASK                                                                0x00000030L
+#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT_MASK                                                                0x000000C0L
+#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT_MASK                                                                0x00000300L
+#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT_MASK                                                                0x00000C00L
+#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT_MASK                                                                0x00003000L
+#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT_MASK                                                                0x0000C000L
+#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT_MASK                                                                0x00030000L
+#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT_MASK                                                                0x000C0000L
+
+// addressBlock: gc_ea_gceadec2
+//GCEA_EDC_CNT
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT                                                          0x0
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT                                                          0x2
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT                                                          0x4
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT                                                          0x6
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT                                                         0x8
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT                                                         0xa
+#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT                                                            0xc
+#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT                                                            0xe
+#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT                                                            0x10
+#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT                                                            0x12
+#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT                                                         0x14
+#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT                                                         0x16
+#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT                                                            0x18
+#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT                                                            0x1a
+#define GCEA_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT                                                           0x1c
+#define GCEA_EDC_CNT__MAM_AFMEM_SEC_COUNT__SHIFT                                                              0x1e
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK                                                            0x00000003L
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK                                                            0x0000000CL
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK                                                            0x00000030L
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK                                                            0x000000C0L
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK                                                           0x00000300L
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK                                                           0x00000C00L
+#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK                                                              0x00003000L
+#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK                                                              0x0000C000L
+#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK                                                              0x00030000L
+#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK                                                              0x000C0000L
+#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK                                                           0x00300000L
+#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK                                                           0x00C00000L
+#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK                                                              0x03000000L
+#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK                                                              0x0C000000L
+#define GCEA_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK                                                             0x30000000L
+#define GCEA_EDC_CNT__MAM_AFMEM_SEC_COUNT_MASK                                                                0xC0000000L
+//GCEA_EDC_CNT2
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT                                                          0x0
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT                                                          0x2
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT                                                          0x4
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT                                                          0x6
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT                                                         0x8
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT                                                         0xa
+#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT                                                         0xc
+#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT                                                         0xe
+#define GCEA_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT                                                             0x10
+#define GCEA_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT                                                             0x12
+#define GCEA_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT                                                             0x14
+#define GCEA_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT                                                             0x16
+#define GCEA_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT                                                             0x18
+#define GCEA_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT                                                             0x1a
+#define GCEA_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT                                                             0x1c
+#define GCEA_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT                                                             0x1e
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK                                                            0x00000003L
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK                                                            0x0000000CL
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK                                                            0x00000030L
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK                                                            0x000000C0L
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK                                                           0x00000300L
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK                                                           0x00000C00L
+#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK                                                           0x00003000L
+#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK                                                           0x0000C000L
+#define GCEA_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK                                                               0x00030000L
+#define GCEA_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK                                                               0x000C0000L
+#define GCEA_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK                                                               0x00300000L
+#define GCEA_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK                                                               0x00C00000L
+#define GCEA_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK                                                               0x03000000L
+#define GCEA_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK                                                               0x0C000000L
+#define GCEA_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK                                                               0x30000000L
+#define GCEA_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK                                                               0xC0000000L
+//GCEA_EDC_CNT3
+#define GCEA_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT                                                        0x0
+#define GCEA_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT                                                        0x2
+#define GCEA_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT                                                           0x4
+#define GCEA_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT                                                           0x6
+#define GCEA_EDC_CNT3__IOWR_DATAMEM_DED_COUNT__SHIFT                                                          0x8
+#define GCEA_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT                                                         0xa
+#define GCEA_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT                                                         0xc
+#define GCEA_EDC_CNT3__MAM_AFMEM_DED_COUNT__SHIFT                                                             0xe
+#define GCEA_EDC_CNT3__MAM_A0MEM_SEC_COUNT__SHIFT                                                             0x10
+#define GCEA_EDC_CNT3__MAM_A0MEM_DED_COUNT__SHIFT                                                             0x12
+#define GCEA_EDC_CNT3__MAM_A1MEM_SEC_COUNT__SHIFT                                                             0x14
+#define GCEA_EDC_CNT3__MAM_A1MEM_DED_COUNT__SHIFT                                                             0x16
+#define GCEA_EDC_CNT3__MAM_A2MEM_SEC_COUNT__SHIFT                                                             0x18
+#define GCEA_EDC_CNT3__MAM_A2MEM_DED_COUNT__SHIFT                                                             0x1a
+#define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT__SHIFT                                                             0x1c
+#define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT__SHIFT                                                             0x1e
+#define GCEA_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK                                                          0x00000003L
+#define GCEA_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK                                                          0x0000000CL
+#define GCEA_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK                                                             0x00000030L
+#define GCEA_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK                                                             0x000000C0L
+#define GCEA_EDC_CNT3__IOWR_DATAMEM_DED_COUNT_MASK                                                            0x00000300L
+#define GCEA_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK                                                           0x00000C00L
+#define GCEA_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK                                                           0x00003000L
+#define GCEA_EDC_CNT3__MAM_AFMEM_DED_COUNT_MASK                                                               0x0000C000L
+#define GCEA_EDC_CNT3__MAM_A0MEM_SEC_COUNT_MASK                                                               0x00030000L
+#define GCEA_EDC_CNT3__MAM_A0MEM_DED_COUNT_MASK                                                               0x000C0000L
+#define GCEA_EDC_CNT3__MAM_A1MEM_SEC_COUNT_MASK                                                               0x00300000L
+#define GCEA_EDC_CNT3__MAM_A1MEM_DED_COUNT_MASK                                                               0x00C00000L
+#define GCEA_EDC_CNT3__MAM_A2MEM_SEC_COUNT_MASK                                                               0x03000000L
+#define GCEA_EDC_CNT3__MAM_A2MEM_DED_COUNT_MASK                                                               0x0C000000L
+#define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT_MASK                                                               0x30000000L
+#define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT_MASK                                                               0xC0000000L
+
+// addressBlock: gc_gfxudec
+//GRBM_GFX_INDEX
+#define GRBM_GFX_INDEX__INSTANCE_INDEX__SHIFT                                                                 0x0
+#define GRBM_GFX_INDEX__SH_INDEX__SHIFT                                                                       0x8
+#define GRBM_GFX_INDEX__SE_INDEX__SHIFT                                                                       0x10
+#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES__SHIFT                                                            0x1d
+#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT                                                      0x1e
+#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT                                                            0x1f
+#define GRBM_GFX_INDEX__INSTANCE_INDEX_MASK                                                                   0x000000FFL
+#define GRBM_GFX_INDEX__SH_INDEX_MASK                                                                         0x0000FF00L
+#define GRBM_GFX_INDEX__SE_INDEX_MASK                                                                         0x00FF0000L
+#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK                                                              0x20000000L
+#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK                                                        0x40000000L
+#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK                                                              0x80000000L
+
+// addressBlock: gc_utcl2_atcl2dec
+//ATC_L2_CNTL
+//ATC_L2_CACHE_4K_DSM_INDEX
+#define ATC_L2_CACHE_4K_DSM_INDEX__INDEX__SHIFT                                                               0x0
+#define ATC_L2_CACHE_4K_DSM_INDEX__INDEX_MASK                                                                 0x000000FFL
+//ATC_L2_CACHE_2M_DSM_INDEX
+#define ATC_L2_CACHE_2M_DSM_INDEX__INDEX__SHIFT                                                               0x0
+#define ATC_L2_CACHE_2M_DSM_INDEX__INDEX_MASK                                                                 0x000000FFL
+//ATC_L2_CACHE_4K_DSM_CNTL
+#define ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT__SHIFT                                                            0xd
+#define ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT__SHIFT                                                            0xf
+#define ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT_MASK                                                              0x00006000L
+#define ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT_MASK                                                              0x00018000L
+//ATC_L2_CACHE_2M_DSM_CNTL
+#define ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT__SHIFT                                                            0xd
+#define ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT__SHIFT                                                            0xf
+#define ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT_MASK                                                              0x00006000L
+#define ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT_MASK                                                              0x00018000L
+
+// addressBlock: gc_utcl2_vml2pfdec
+//VML2_MEM_ECC_INDEX
+#define VML2_MEM_ECC_INDEX__INDEX__SHIFT                                                                      0x0
+#define VML2_MEM_ECC_INDEX__INDEX_MASK                                                                        0x000000FFL
+//VML2_WALKER_MEM_ECC_INDEX
+#define VML2_WALKER_MEM_ECC_INDEX__INDEX__SHIFT                                                               0x0
+#define VML2_WALKER_MEM_ECC_INDEX__INDEX_MASK                                                                 0x000000FFL
+//UTCL2_MEM_ECC_INDEX
+#define UTCL2_MEM_ECC_INDEX__INDEX__SHIFT                                                                     0x0
+#define UTCL2_MEM_ECC_INDEX__INDEX_MASK                                                                       0x000000FFL
+//VML2_MEM_ECC_CNTL
+#define VML2_MEM_ECC_CNTL__SEC_COUNT__SHIFT                                                                   0xc
+#define VML2_MEM_ECC_CNTL__DED_COUNT__SHIFT                                                                   0xe
+#define VML2_MEM_ECC_CNTL__SEC_COUNT_MASK                                                                     0x00003000L
+#define VML2_MEM_ECC_CNTL__DED_COUNT_MASK                                                                     0x0000C000L
+//VML2_WALKER_MEM_ECC_CNTL
+#define VML2_WALKER_MEM_ECC_CNTL__SEC_COUNT__SHIFT                                                            0xc
+#define VML2_WALKER_MEM_ECC_CNTL__DED_COUNT__SHIFT                                                            0xe
+#define VML2_WALKER_MEM_ECC_CNTL__SEC_COUNT_MASK                                                              0x00003000L
+#define VML2_WALKER_MEM_ECC_CNTL__DED_COUNT_MASK                                                              0x0000C000L
+//UTCL2_MEM_ECC_CNTL
+#define UTCL2_MEM_ECC_CNTL__SEC_COUNT__SHIFT                                                                  0xc
+#define UTCL2_MEM_ECC_CNTL__DED_COUNT__SHIFT                                                                  0xe
+#define UTCL2_MEM_ECC_CNTL__SEC_COUNT_MASK                                                                    0x00003000L
+#define UTCL2_MEM_ECC_CNTL__DED_COUNT_MASK                                                                    0x0000C000L
+
+// addressBlock: gc_rlcpdec
+//RLC_EDC_CNT
+#define RLC_EDC_CNT__RLCG_INSTR_RAM_SEC_COUNT__SHIFT                                                          0x0
+#define RLC_EDC_CNT__RLCG_INSTR_RAM_DED_COUNT__SHIFT                                                          0x2
+#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_SEC_COUNT__SHIFT                                                        0x4
+#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_DED_COUNT__SHIFT                                                        0x6
+#define RLC_EDC_CNT__RLCV_INSTR_RAM_SEC_COUNT__SHIFT                                                          0x8
+#define RLC_EDC_CNT__RLCV_INSTR_RAM_DED_COUNT__SHIFT                                                          0xa
+#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_SEC_COUNT__SHIFT                                                        0xc
+#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_DED_COUNT__SHIFT                                                        0xe
+#define RLC_EDC_CNT__RLC_TCTAG_RAM_SEC_COUNT__SHIFT                                                           0x10
+#define RLC_EDC_CNT__RLC_TCTAG_RAM_DED_COUNT__SHIFT                                                           0x12
+#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_SEC_COUNT__SHIFT                                                     0x14
+#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_DED_COUNT__SHIFT                                                      0x16
+#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_SEC_COUNT__SHIFT                                                        0x18
+#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_DED_COUNT__SHIFT                                                        0x1a
+#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_SEC_COUNT__SHIFT                                                        0x1c
+#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_DED_COUNT__SHIFT                                                        0x1e
+#define RLC_EDC_CNT__RLCG_INSTR_RAM_SEC_COUNT_MASK                                                            0x00000003L
+#define RLC_EDC_CNT__RLCG_INSTR_RAM_DED_COUNT_MASK                                                            0x0000000CL
+#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_SEC_COUNT_MASK                                                          0x00000030L
+#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_DED_COUNT_MASK                                                          0x000000C0L
+#define RLC_EDC_CNT__RLCV_INSTR_RAM_SEC_COUNT_MASK                                                            0x00000300L
+#define RLC_EDC_CNT__RLCV_INSTR_RAM_DED_COUNT_MASK                                                            0x00000C00L
+#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_SEC_COUNT_MASK                                                          0x00003000L
+#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_DED_COUNT_MASK                                                          0x0000C000L
+#define RLC_EDC_CNT__RLC_TCTAG_RAM_SEC_COUNT_MASK                                                             0x00030000L
+#define RLC_EDC_CNT__RLC_TCTAG_RAM_DED_COUNT_MASK                                                             0x000C0000L
+#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_SEC_COUNT_MASK                                                       0x00300000L
+#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_DED_COUNT_MASK                                                        0x00C00000L
+#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_SEC_COUNT_MASK                                                          0x03000000L
+#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_DED_COUNT_MASK                                                          0x0C000000L
+#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_SEC_COUNT_MASK                                                          0x30000000L
+#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_DED_COUNT_MASK                                                          0xC0000000L
+//RLC_EDC_CNT2
+#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT__SHIFT                                                0x0
+#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT__SHIFT                                                0x2
+#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT__SHIFT                                                0x4
+#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT__SHIFT                                                0x6
+#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT__SHIFT                                                0x8
+#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT__SHIFT                                                0xa
+#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT__SHIFT                                                0xc
+#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT__SHIFT                                                0xe
+#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT__SHIFT                                                0x10
+#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT__SHIFT                                                0x12
+#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT__SHIFT                                                0x14
+#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT__SHIFT                                                0x16
+#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT__SHIFT                                                0x18
+#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT__SHIFT                                                0x1a
+#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT__SHIFT                                                0x1c
+#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT__SHIFT                                                0x1e
+#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT_MASK                                                  0x00000003L
+#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT_MASK                                                  0x0000000CL
+#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT_MASK                                                  0x00000030L
+#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT_MASK                                                  0x000000C0L
+#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT_MASK                                                  0x00000300L
+#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT_MASK                                                  0x00000C00L
+#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT_MASK                                                  0x00003000L
+#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT_MASK                                                  0x0000C000L
+#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT_MASK                                                  0x00030000L
+#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT_MASK                                                  0x000C0000L
+#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT_MASK                                                  0x00300000L
+#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT_MASK                                                  0x00C00000L
+#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT_MASK                                                  0x03000000L
+#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT_MASK                                                  0x0C000000L
+#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT_MASK                                                  0x30000000L
+#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT_MASK                                                  0xC0000000L
+
+#endif
\ No newline at end of file
commit 504c5e72d781e48946c32bf427c665ed15d532d0
Author: Dennis Li <Dennis.Li at amd.com>
Date:   Thu Jan 16 11:25:08 2020 +0800

    drm/amdgpu: abstract EDC counter clear to a separated function
    
    1. Add IP prefix for the IP related codes.
    2. Refactor the code to clear EDC counter.
    
    Signed-off-by: Dennis Li <Dennis.Li at amd.com>
    Reviewed-by: Guchun Chen <guchun.chen at amd.com>
    Reviewed-by: Tao Zhou <tao.zhou1 at amd.com>
    Reviewed-by: Hawking Zhang <Hawking.Zhang at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 2ff8b9e312fe..583800ab5fe5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -736,6 +736,7 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
 					  void *ras_error_status);
+static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev);
 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
 				     void *inject_if);
 
@@ -4029,7 +4030,7 @@ static const struct soc15_reg_entry sgpr2_init_regs[] = {
    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 },
 };
 
-static const struct soc15_reg_entry sec_ded_counter_registers[] = {
+static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1},
    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1},
    { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1},
@@ -4118,7 +4119,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
 	struct amdgpu_ib ib;
 	struct dma_fence *f = NULL;
-	int r, i, j, k;
+	int r, i;
 	unsigned total_size, vgpr_offset, sgpr_offset;
 	u64 gpu_addr;
 
@@ -4264,18 +4265,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 		goto fail;
 	}
 
-	/* read back registers to clear the counters */
-	mutex_lock(&adev->grbm_idx_mutex);
-	for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) {
-		for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) {
-			for (k = 0; k < sec_ded_counter_registers[i].instance; k++) {
-				gfx_v9_0_select_se_sh(adev, j, 0x0, k);
-				RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
-			}
-		}
-	}
-	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
-	mutex_unlock(&adev->grbm_idx_mutex);
+	gfx_v9_0_clear_ras_edc_counter(adev);
 
 fail:
 	amdgpu_ib_free(adev, &ib, NULL);
@@ -5546,7 +5536,7 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
 }
 
 
-static const struct soc15_ras_field_entry gc_ras_fields_vg20[] = {
+static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = {
 	{ "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
 	  SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
 	  SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
@@ -6119,7 +6109,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
 
-	for (i = 0; i < 16; i++) {
+	for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
 		WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
 		data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
 
@@ -6138,7 +6128,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
 		}
 	}
 
-	for (i = 0; i < 7; i++) {
+	for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
 		WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
 		data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
 
@@ -6159,7 +6149,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
 		}
 	}
 
-	for (i = 0; i < 4; i++) {
+	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
 		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
 		data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
 
@@ -6171,7 +6161,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
 		}
 	}
 
-	for (i = 0; i < 32; i++) {
+	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
 		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
 		data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
 
@@ -6198,36 +6188,36 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
 	return 0;
 }
 
-static int __get_ras_error_count(const struct soc15_reg_entry *reg,
+static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
 	uint32_t se_id, uint32_t inst_id, uint32_t value,
 	uint32_t *sec_count, uint32_t *ded_count)
 {
 	uint32_t i;
 	uint32_t sec_cnt, ded_cnt;
 
-	for (i = 0; i < ARRAY_SIZE(gc_ras_fields_vg20); i++) {
-		if(gc_ras_fields_vg20[i].reg_offset != reg->reg_offset ||
-			gc_ras_fields_vg20[i].seg != reg->seg ||
-			gc_ras_fields_vg20[i].inst != reg->inst)
+	for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) {
+		if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset ||
+			gfx_v9_0_ras_fields[i].seg != reg->seg ||
+			gfx_v9_0_ras_fields[i].inst != reg->inst)
 			continue;
 
 		sec_cnt = (value &
-				gc_ras_fields_vg20[i].sec_count_mask) >>
-				gc_ras_fields_vg20[i].sec_count_shift;
+				gfx_v9_0_ras_fields[i].sec_count_mask) >>
+				gfx_v9_0_ras_fields[i].sec_count_shift;
 		if (sec_cnt) {
 			DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
-				gc_ras_fields_vg20[i].name,
+				gfx_v9_0_ras_fields[i].name,
 				se_id, inst_id,
 				sec_cnt);
 			*sec_count += sec_cnt;
 		}
 
 		ded_cnt = (value &
-				gc_ras_fields_vg20[i].ded_count_mask) >>
-				gc_ras_fields_vg20[i].ded_count_shift;
+				gfx_v9_0_ras_fields[i].ded_count_mask) >>
+				gfx_v9_0_ras_fields[i].ded_count_shift;
 		if (ded_cnt) {
 			DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
-				gc_ras_fields_vg20[i].name,
+				gfx_v9_0_ras_fields[i].name,
 				se_id, inst_id,
 				ded_cnt);
 			*ded_count += ded_cnt;
@@ -6237,6 +6227,58 @@ static int __get_ras_error_count(const struct soc15_reg_entry *reg,
 	return 0;
 }
 
+static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev)
+{
+	int i, j, k;
+
+	/* read back registers to clear the counters */
+	mutex_lock(&adev->grbm_idx_mutex);
+	for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
+		for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
+			for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
+				gfx_v9_0_select_se_sh(adev, j, 0x0, k);
+				RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
+			}
+		}
+	}
+	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
+	mutex_unlock(&adev->grbm_idx_mutex);
+
+	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
+	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
+
+	for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
+		WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
+		RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
+		WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
+		RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
+		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
+		RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
+		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
+		RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
+	}
+
+	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
+}
+
 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
 					  void *ras_error_status)
 {
@@ -6253,14 +6295,14 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
 
 	mutex_lock(&adev->grbm_idx_mutex);
 
-	for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) {
-		for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) {
-			for (k = 0; k < sec_ded_counter_registers[i].instance; k++) {
+	for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
+		for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
+			for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
 				gfx_v9_0_select_se_sh(adev, j, 0, k);
 				reg_value =
-					RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
+					RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
 				if (reg_value)
-					__get_ras_error_count(&sec_ded_counter_registers[i],
+					gfx_v9_0_ras_error_count(&gfx_v9_0_edc_counter_regs[i],
 							j, k, reg_value,
 							&sec_count, &ded_count);
 			}
commit 5e66403e4d709decacf2b53e55deb55d8f6e6982
Author: Dennis Li <Dennis.Li at amd.com>
Date:   Thu Jan 16 11:07:55 2020 +0800

    drm/amdgpu: refine the security check for RAS functions
    
    To avoid calling RAS related functions when RAS feature isn't
    supported in hardware. Change to check supported features, instead
    of checking asic type.
    
    v2: reuse amdgpu_ras_is_supported function, instead of introducing
    a new flag for hardware ras feature.
    
    Signed-off-by: Dennis Li <Dennis.Li at amd.com>
    Reviewed-by: Guchun Chen <guchun.chen at amd.com>
    Reviewed-by: Hawking Zhang <Hawking.Zhang at amd.com>
    Reviewed-by: Tao Zhou <tao.zhou1 at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index fbf4ea3e0cfb..2ff8b9e312fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -5994,7 +5994,7 @@ static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
 	int ret;
 	struct ta_ras_trigger_error_input block_info = { 0 };
 
-	if (adev->asic_type != CHIP_VEGA20)
+	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
 		return -EINVAL;
 
 	if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks))
@@ -6245,7 +6245,7 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
 	uint32_t i, j, k;
 	uint32_t reg_value;
 
-	if (adev->asic_type != CHIP_VEGA20)
+	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
 		return -EINVAL;
 
 	err_data->ue_count = 0;
commit 39aa0ef1638ad7722e8182331c61deae8da97105
Author: Dennis Li <Dennis.Li at amd.com>
Date:   Mon Jan 13 17:03:53 2020 +0800

    drm/amdgpu: enable RAS feature for more mmhub sub-blocks of Acrturus
    
    Compared with Vg20, the size of mmhub range is changed from 2 to 8.
    
    Signed-off-by: Dennis Li <Dennis.Li at amd.com>
    Reviewed-by: Guchun Chen <guchun.chen at amd.com>
    Reviewed-by: Hawking Zhang <Hawking.Zhang at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 5c42387c9274..a32c9757711f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -663,6 +663,7 @@ void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags)
 }
 
 static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = {
+	/* MMHUB Range 0 */
 	{ "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
 	SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
 	SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
@@ -751,6 +752,24 @@ static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = {
 	0, 0,
 	SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
 	},
+	{ "MMEA0_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+	},
+	{ "MMEA0_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+	},
+	{ "MMEA0_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+	},
+	{ "MMEA0_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+	},
+
+	/* MMHUB Range 1 */
 	{ "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
 	SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
 	SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
@@ -838,16 +857,686 @@ static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = {
 	{ "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
 	0, 0,
 	SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA1_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+	},
+	{ "MMEA1_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+	},
+	{ "MMEA1_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+	},
+	{ "MMEA1_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+	},
+
+	/* MMHAB Range 2*/
+	{ "MMEA2_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA2_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA2_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA2_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA2_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA2_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA2_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA2_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA2_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA2_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA2_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA2_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA2_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA2_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+	},
+	{ "MMEA2_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+	},
+	{ "MMEA2_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+	},
+	{ "MMEA2_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+	},
+
+	/* MMHUB Rang 3 */
+	{ "MMEA3_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA3_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA3_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA3_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA3_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA3_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA3_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA3_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA3_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA3_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA3_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA3_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA3_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA3_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+	},
+	{ "MMEA3_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+	},
+	{ "MMEA3_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+	},
+	{ "MMEA3_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+	},
+
+	/* MMHUB Range 4 */
+	{ "MMEA4_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA4_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA4_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA4_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA4_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA4_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA4_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA4_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA4_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA4_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA4_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA4_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA4_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA4_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+	},
+	{ "MMEA4_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+	},
+	{ "MMEA4_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+	},
+	{ "MMEA4_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+	},
+
+	/* MMHUAB Range 5 */
+	{ "MMEA5_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA5_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA5_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA5_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA5_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA5_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA5_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA5_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA5_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA5_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA5_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA5_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA5_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA5_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+	},
+	{ "MMEA5_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+	},
+	{ "MMEA5_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+	},
+	{ "MMEA5_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+	},
+
+	/* MMHUB Range 6 */
+	{ "MMEA6_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA6_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA6_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA6_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA6_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA6_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA6_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA6_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA6_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA6_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA6_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA6_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA6_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA6_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA6_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA6_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA6_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA6_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA6_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA6_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA6_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA6_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA6_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA6_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA6_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA6_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA6_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA6_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA6_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA6_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+	},
+	{ "MMEA6_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+	},
+	{ "MMEA6_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+	},
+	{ "MMEA6_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+	},
+
+	/* MMHUB Range 7*/
+	{ "MMEA7_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA7_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA7_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA7_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA7_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA7_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA7_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA7_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA7_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA7_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA7_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA7_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA7_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA7_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA7_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA7_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA7_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA7_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA7_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA7_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA7_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA7_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA7_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA7_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA7_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA7_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA7_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA7_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA7_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA7_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+	},
+	{ "MMEA7_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+	},
+	{ "MMEA7_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+	},
+	{ "MMEA7_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D3MEM_DED_COUNT),
 	}
 };
 
 static const struct soc15_reg_entry mmhub_v9_4_edc_cnt_regs[] = {
-   { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), 0, 0, 0},
-   { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 0, 0, 0},
-   { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), 0, 0, 0},
-   { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), 0, 0, 0},
-   { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 0, 0, 0},
-   { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, 0},
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), 0, 0, 0 },
 };
 
 static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg,
commit f519cd13c23e8c884011295b205c1463babbf416
Author: Dennis Li <Dennis.Li at amd.com>
Date:   Mon Jan 13 16:15:38 2020 +0800

    drm/amdgpu: update mmhub 9.4.1 header files for Acrturus
    
    Add mask & shift definition of MAM_D(0~3)MEM for all mmhub
    ranges.
    
    Signed-off-by: Dennis Li <Dennis.Li at amd.com>
    Acked-by: Alex Deucher <alexander.deucher at amd.com>
    Reviewed-by: Hawking Zhang <Hawking.Zhang at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
index 40dfbf16bd34..111a71b434e2 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
@@ -11185,6 +11185,14 @@
 #define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT                                                        0xa
 #define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT                                                        0xc
 #define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT                                                        0xe
+#define MMEA0_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT                                                            0x10
+#define MMEA0_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT                                                            0x12
+#define MMEA0_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT                                                            0x14
+#define MMEA0_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT                                                            0x16
+#define MMEA0_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT                                                            0x18
+#define MMEA0_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT                                                            0x1a
+#define MMEA0_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT                                                            0x1c
+#define MMEA0_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT                                                            0x1e
 #define MMEA0_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK                                                           0x00000003L
 #define MMEA0_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK                                                           0x0000000CL
 #define MMEA0_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK                                                           0x00000030L
@@ -11193,6 +11201,14 @@
 #define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK                                                          0x00000C00L
 #define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK                                                          0x00003000L
 #define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK                                                          0x0000C000L
+#define MMEA0_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK                                                              0x00030000L
+#define MMEA0_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK                                                              0x000C0000L
+#define MMEA0_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK                                                              0x00300000L
+#define MMEA0_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK                                                              0x00C00000L
+#define MMEA0_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK                                                              0x03000000L
+#define MMEA0_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK                                                              0x0C000000L
+#define MMEA0_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK                                                              0x30000000L
+#define MMEA0_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK                                                              0xC0000000L
 //MMEA0_DSM_CNTL
 #define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT                                               0x0
 #define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT                                              0x2
@@ -14197,6 +14213,14 @@
 #define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT                                                        0xa
 #define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT                                                        0xc
 #define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT                                                        0xe
+#define MMEA1_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT                                                            0x10
+#define MMEA1_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT                                                            0x12
+#define MMEA1_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT                                                            0x14
+#define MMEA1_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT                                                            0x16
+#define MMEA1_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT                                                            0x18
+#define MMEA1_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT                                                            0x1a
+#define MMEA1_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT                                                            0x1c
+#define MMEA1_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT                                                            0x1e
 #define MMEA1_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK                                                           0x00000003L
 #define MMEA1_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK                                                           0x0000000CL
 #define MMEA1_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK                                                           0x00000030L
@@ -14205,6 +14229,14 @@
 #define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK                                                          0x00000C00L
 #define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK                                                          0x00003000L
 #define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK                                                          0x0000C000L
+#define MMEA1_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK                                                              0x00030000L
+#define MMEA1_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK                                                              0x000C0000L
+#define MMEA1_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK                                                              0x00300000L
+#define MMEA1_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK                                                              0x00C00000L
+#define MMEA1_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK                                                              0x03000000L
+#define MMEA1_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK                                                              0x0C000000L
+#define MMEA1_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK                                                              0x30000000L
+#define MMEA1_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK                                                              0xC0000000L
 //MMEA1_DSM_CNTL
 #define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT                                               0x0
 #define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT                                              0x2
@@ -17209,6 +17241,14 @@
 #define MMEA2_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT                                                        0xa
 #define MMEA2_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT                                                        0xc
 #define MMEA2_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT                                                        0xe
+#define MMEA2_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT                                                            0x10
+#define MMEA2_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT                                                            0x12
+#define MMEA2_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT                                                            0x14
+#define MMEA2_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT                                                            0x16
+#define MMEA2_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT                                                            0x18
+#define MMEA2_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT                                                            0x1a
+#define MMEA2_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT                                                            0x1c
+#define MMEA2_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT                                                            0x1e
 #define MMEA2_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK                                                           0x00000003L
 #define MMEA2_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK                                                           0x0000000CL
 #define MMEA2_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK                                                           0x00000030L
@@ -17217,6 +17257,14 @@
 #define MMEA2_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK                                                          0x00000C00L
 #define MMEA2_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK                                                          0x00003000L
 #define MMEA2_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK                                                          0x0000C000L
+#define MMEA2_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK                                                              0x00030000L
+#define MMEA2_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK                                                              0x000C0000L
+#define MMEA2_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK                                                              0x00300000L
+#define MMEA2_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK                                                              0x00C00000L
+#define MMEA2_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK                                                              0x03000000L
+#define MMEA2_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK                                                              0x0C000000L
+#define MMEA2_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK                                                              0x30000000L
+#define MMEA2_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK                                                              0xC0000000L
 //MMEA2_DSM_CNTL
 #define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT                                               0x0
 #define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT                                              0x2
@@ -20221,6 +20269,14 @@
 #define MMEA3_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT                                                        0xa
 #define MMEA3_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT                                                        0xc
 #define MMEA3_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT                                                        0xe
+#define MMEA3_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT                                                            0x10
+#define MMEA3_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT                                                            0x12
+#define MMEA3_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT                                                            0x14
+#define MMEA3_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT                                                            0x16
+#define MMEA3_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT                                                            0x18
+#define MMEA3_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT                                                            0x1a
+#define MMEA3_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT                                                            0x1c
+#define MMEA3_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT                                                            0x1e
 #define MMEA3_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK                                                           0x00000003L
 #define MMEA3_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK                                                           0x0000000CL
 #define MMEA3_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK                                                           0x00000030L
@@ -20229,6 +20285,14 @@
 #define MMEA3_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK                                                          0x00000C00L
 #define MMEA3_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK                                                          0x00003000L
 #define MMEA3_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK                                                          0x0000C000L
+#define MMEA3_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK                                                              0x00030000L
+#define MMEA3_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK                                                              0x000C0000L
+#define MMEA3_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK                                                              0x00300000L
+#define MMEA3_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK                                                              0x00C00000L
+#define MMEA3_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK                                                              0x03000000L
+#define MMEA3_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK                                                              0x0C000000L
+#define MMEA3_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK                                                              0x30000000L
+#define MMEA3_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK                                                              0xC0000000L
 //MMEA3_DSM_CNTL
 #define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT                                               0x0
 #define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT                                              0x2
@@ -23233,6 +23297,14 @@
 #define MMEA4_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT                                                        0xa
 #define MMEA4_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT                                                        0xc
 #define MMEA4_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT                                                        0xe
+#define MMEA4_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT                                                            0x10
+#define MMEA4_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT                                                            0x12
+#define MMEA4_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT                                                            0x14
+#define MMEA4_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT                                                            0x16
+#define MMEA4_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT                                                            0x18
+#define MMEA4_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT                                                            0x1a
+#define MMEA4_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT                                                            0x1c
+#define MMEA4_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT                                                            0x1e
 #define MMEA4_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK                                                           0x00000003L
 #define MMEA4_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK                                                           0x0000000CL
 #define MMEA4_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK                                                           0x00000030L
@@ -23241,6 +23313,14 @@
 #define MMEA4_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK                                                          0x00000C00L
 #define MMEA4_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK                                                          0x00003000L
 #define MMEA4_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK                                                          0x0000C000L
+#define MMEA4_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK                                                              0x00030000L
+#define MMEA4_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK                                                              0x000C0000L
+#define MMEA4_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK                                                              0x00300000L
+#define MMEA4_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK                                                              0x00C00000L
+#define MMEA4_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK                                                              0x03000000L
+#define MMEA4_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK                                                              0x0C000000L
+#define MMEA4_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK                                                              0x30000000L
+#define MMEA4_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK                                                              0xC0000000L
 //MMEA4_DSM_CNTL
 #define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT                                               0x0
 #define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT                                              0x2
@@ -34952,6 +35032,14 @@
 #define MMEA5_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT                                                        0xa
 #define MMEA5_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT                                                        0xc
 #define MMEA5_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT                                                        0xe
+#define MMEA5_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT                                                            0x10
+#define MMEA5_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT                                                            0x12
+#define MMEA5_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT                                                            0x14
+#define MMEA5_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT                                                            0x16
+#define MMEA5_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT                                                            0x18
+#define MMEA5_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT                                                            0x1a
+#define MMEA5_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT                                                            0x1c
+#define MMEA5_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT                                                            0x1e
 #define MMEA5_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK                                                           0x00000003L
 #define MMEA5_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK                                                           0x0000000CL
 #define MMEA5_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK                                                           0x00000030L
@@ -34960,6 +35048,14 @@
 #define MMEA5_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK                                                          0x00000C00L
 #define MMEA5_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK                                                          0x00003000L
 #define MMEA5_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK                                                          0x0000C000L
+#define MMEA5_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK                                                              0x00030000L
+#define MMEA5_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK                                                              0x000C0000L
+#define MMEA5_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK                                                              0x00300000L
+#define MMEA5_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK                                                              0x00C00000L
+#define MMEA5_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK                                                              0x03000000L
+#define MMEA5_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK                                                              0x0C000000L
+#define MMEA5_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK                                                              0x30000000L
+#define MMEA5_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK                                                              0xC0000000L
 //MMEA5_DSM_CNTL
 #define MMEA5_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT                                               0x0
 #define MMEA5_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT                                              0x2
@@ -37964,6 +38060,14 @@
 #define MMEA6_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT                                                        0xa
 #define MMEA6_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT                                                        0xc
 #define MMEA6_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT                                                        0xe
+#define MMEA6_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT                                                            0x10
+#define MMEA6_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT                                                            0x12
+#define MMEA6_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT                                                            0x14
+#define MMEA6_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT                                                            0x16
+#define MMEA6_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT                                                            0x18
+#define MMEA6_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT                                                            0x1a
+#define MMEA6_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT                                                            0x1c
+#define MMEA6_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT                                                            0x1e
 #define MMEA6_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK                                                           0x00000003L
 #define MMEA6_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK                                                           0x0000000CL
 #define MMEA6_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK                                                           0x00000030L
@@ -37972,6 +38076,14 @@
 #define MMEA6_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK                                                          0x00000C00L
 #define MMEA6_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK                                                          0x00003000L
 #define MMEA6_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK                                                          0x0000C000L
+#define MMEA6_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK                                                              0x00030000L
+#define MMEA6_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK                                                              0x000C0000L
+#define MMEA6_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK                                                              0x00300000L
+#define MMEA6_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK                                                              0x00C00000L
+#define MMEA6_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK                                                              0x03000000L
+#define MMEA6_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK                                                              0x0C000000L
+#define MMEA6_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK                                                              0x30000000L
+#define MMEA6_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK                                                              0xC0000000L
 //MMEA6_DSM_CNTL
 #define MMEA6_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT                                               0x0
 #define MMEA6_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT                                              0x2
@@ -40976,6 +41088,14 @@
 #define MMEA7_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT                                                        0xa
 #define MMEA7_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT                                                        0xc
 #define MMEA7_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT                                                        0xe
+#define MMEA7_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT                                                            0x10
+#define MMEA7_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT                                                            0x12
+#define MMEA7_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT                                                            0x14
+#define MMEA7_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT                                                            0x16
+#define MMEA7_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT                                                            0x18
+#define MMEA7_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT                                                            0x1a
+#define MMEA7_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT                                                            0x1c
+#define MMEA7_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT                                                            0x1e
 #define MMEA7_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK                                                           0x00000003L
 #define MMEA7_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK                                                           0x0000000CL
 #define MMEA7_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK                                                           0x00000030L
@@ -40984,6 +41104,14 @@
 #define MMEA7_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK                                                          0x00000C00L
 #define MMEA7_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK                                                          0x00003000L
 #define MMEA7_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK                                                          0x0000C000L
+#define MMEA7_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK                                                              0x00030000L
+#define MMEA7_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK                                                              0x000C0000L
+#define MMEA7_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK                                                              0x00300000L
+#define MMEA7_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK                                                              0x00C00000L
+#define MMEA7_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK                                                              0x03000000L
+#define MMEA7_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK                                                              0x0C000000L
+#define MMEA7_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK                                                              0x30000000L
+#define MMEA7_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK                                                              0xC0000000L
 //MMEA7_DSM_CNTL
 #define MMEA7_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT                                               0x0
 #define MMEA7_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT                                              0x2
commit e3cd03603dbe0e972196f12db75b6917b4db049c
Author: chen gong <curry.gong at amd.com>
Date:   Tue Jan 14 14:28:48 2020 +0800

    drm/amdgpu: read gfx register using RREG32_KIQ macro
    
    Reading CP_MEM_SLP_CNTL register with RREG32_SOC15 macro will lead to
    hang when GPU is in "gfxoff" state.
    I do a uniform substitution here.
    
    Signed-off-by: chen gong <curry.gong at amd.com>
    Reviewed-by: Alex Deucher <alexander.deucher at amd.com>
    Acked-by: Christian König <christian.koenig at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index d700f1fa387c..fbf4ea3e0cfb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -4717,12 +4717,12 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
 		*flags = 0;
 
 	/* AMD_CG_SUPPORT_GFX_MGCG */
-	data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
+	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
 
 	/* AMD_CG_SUPPORT_GFX_CGCG */
-	data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
+	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL));
 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
 
@@ -4731,18 +4731,18 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
 
 	/* AMD_CG_SUPPORT_GFX_RLC_LS */
-	data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
+	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL));
 	if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
 		*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
 
 	/* AMD_CG_SUPPORT_GFX_CP_LS */
-	data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
+	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL));
 	if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
 		*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
 
 	if (adev->asic_type != CHIP_ARCTURUS) {
 		/* AMD_CG_SUPPORT_GFX_3D_CGCG */
-		data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
+		data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
 		if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
 			*flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
 
commit c68dbcd8f9b0aa01e0459a5841716cb6a2f447b9
Author: chen gong <curry.gong at amd.com>
Date:   Wed Jan 15 14:44:43 2020 +0800

    drm/amdgpu: add kiq version interface for RREG32/WREG32
    
    Reading some registers by mmio will result in hang when GPU is in
    "gfxoff" state.This problem can be solved by GPU in "ring command
    packages" way.
    
    Signed-off-by: chen gong <curry.gong at amd.com>
    Reviewed-by: Alex Deucher <alexander.deucher at amd.com>
    Acked-by: Christian König <christian.koenig at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index b1bb10625cd9..da3bcff61b97 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1009,10 +1009,14 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
 
 #define AMDGPU_REGS_IDX       (1<<0)
 #define AMDGPU_REGS_NO_KIQ    (1<<1)
+#define AMDGPU_REGS_KIQ       (1<<2)
 
 #define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
 #define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
 
+#define RREG32_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_KIQ)
+#define WREG32_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_KIQ)
+
 #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
 #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index afdea8f594ae..64b1b2d2d19b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -216,7 +216,7 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
 {
 	uint32_t ret;
 
-	if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
+	if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
 		return amdgpu_kiq_rreg(adev, reg);
 
 	if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
@@ -294,7 +294,7 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
 		adev->last_mm_index = v;
 	}
 
-	if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
+	if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
 		return amdgpu_kiq_wreg(adev, reg, v);
 
 	if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
commit d33a99c4b63643d3d7a85d4dfb2ac5afb01eebca
Author: chen gong <curry.gong at amd.com>
Date:   Wed Jan 15 14:32:00 2020 +0800

    drm/amdgpu: provide a generic function interface for reading/writing register by KIQ
    
    Move amdgpu_virt_kiq_rreg/amdgpu_virt_kiq_wreg function to amdgpu_gfx.c,
    and rename them to amdgpu_kiq_rreg/amdgpu_kiq_wreg.Make it generic and
    flexible.
    
    Signed-off-by: chen gong <curry.gong at amd.com>
    Reviewed-by: Alex Deucher <alexander.deucher at amd.com>
    Reviewed-by: Huang Rui <ray.huang at amd.com>
    Acked-by: Christian König <christian.koenig at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 53d882000101..afdea8f594ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -217,7 +217,7 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
 	uint32_t ret;
 
 	if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
-		return amdgpu_virt_kiq_rreg(adev, reg);
+		return amdgpu_kiq_rreg(adev, reg);
 
 	if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
 		ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
@@ -295,7 +295,7 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
 	}
 
 	if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
-		return amdgpu_virt_kiq_wreg(adev, reg, v);
+		return amdgpu_kiq_wreg(adev, reg, v);
 
 	if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
 		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index b88b8b82bb64..0f960b498792 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -296,7 +296,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
 
 	spin_lock_init(&kiq->ring_lock);
 
-	r = amdgpu_device_wb_get(adev, &adev->virt.reg_val_offs);
+	r = amdgpu_device_wb_get(adev, &kiq->reg_val_offs);
 	if (r)
 		return r;
 
@@ -321,7 +321,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
 
 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
 {
-	amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
+	amdgpu_device_wb_free(ring->adev, ring->adev->gfx.kiq.reg_val_offs);
 	amdgpu_ring_fini(ring);
 }
 
@@ -658,3 +658,95 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
 	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
 	return 0;
 }
+
+uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
+{
+	signed long r, cnt = 0;
+	unsigned long flags;
+	uint32_t seq;
+	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+	struct amdgpu_ring *ring = &kiq->ring;
+
+	BUG_ON(!ring->funcs->emit_rreg);
+
+	spin_lock_irqsave(&kiq->ring_lock, flags);
+	amdgpu_ring_alloc(ring, 32);
+	amdgpu_ring_emit_rreg(ring, reg);
+	amdgpu_fence_emit_polling(ring, &seq);
+	amdgpu_ring_commit(ring);
+	spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+	/* don't wait anymore for gpu reset case because this way may
+	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+	 * never return if we keep waiting in virt_kiq_rreg, which cause
+	 * gpu_recover() hang there.
+	 *
+	 * also don't wait anymore for IRQ context
+	 * */
+	if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+		goto failed_kiq_read;
+
+	might_sleep();
+	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+	}
+
+	if (cnt > MAX_KIQ_REG_TRY)
+		goto failed_kiq_read;
+
+	return adev->wb.wb[kiq->reg_val_offs];
+
+failed_kiq_read:
+	pr_err("failed to read reg:%x\n", reg);
+	return ~0;
+}
+
+void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
+{
+	signed long r, cnt = 0;
+	unsigned long flags;
+	uint32_t seq;
+	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+	struct amdgpu_ring *ring = &kiq->ring;
+
+	BUG_ON(!ring->funcs->emit_wreg);
+
+	spin_lock_irqsave(&kiq->ring_lock, flags);
+	amdgpu_ring_alloc(ring, 32);
+	amdgpu_ring_emit_wreg(ring, reg, v);
+	amdgpu_fence_emit_polling(ring, &seq);
+	amdgpu_ring_commit(ring);
+	spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+	/* don't wait anymore for gpu reset case because this way may
+	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+	 * never return if we keep waiting in virt_kiq_rreg, which cause
+	 * gpu_recover() hang there.
+	 *
+	 * also don't wait anymore for IRQ context
+	 * */
+	if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+		goto failed_kiq_write;
+
+	might_sleep();
+	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+
+		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+	}
+
+	if (cnt > MAX_KIQ_REG_TRY)
+		goto failed_kiq_write;
+
+	return;
+
+failed_kiq_write:
+	pr_err("failed to write reg:%x\n", reg);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index af4bd279f42f..ca17ffb01301 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -94,6 +94,7 @@ struct amdgpu_kiq {
 	struct amdgpu_ring	ring;
 	struct amdgpu_irq_src	irq;
 	const struct kiq_pm4_funcs *pmf;
+	uint32_t			reg_val_offs;
 };
 
 /*
@@ -375,4 +376,6 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
 				  struct amdgpu_irq_src *source,
 				  struct amdgpu_iv_entry *entry);
+uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
+void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 103033f96f13..adc813cde8e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -45,98 +45,6 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
 	adev->pg_flags = 0;
 }
 
-uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
-{
-	signed long r, cnt = 0;
-	unsigned long flags;
-	uint32_t seq;
-	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
-	struct amdgpu_ring *ring = &kiq->ring;
-
-	BUG_ON(!ring->funcs->emit_rreg);
-
-	spin_lock_irqsave(&kiq->ring_lock, flags);
-	amdgpu_ring_alloc(ring, 32);
-	amdgpu_ring_emit_rreg(ring, reg);
-	amdgpu_fence_emit_polling(ring, &seq);
-	amdgpu_ring_commit(ring);
-	spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
-	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-
-	/* don't wait anymore for gpu reset case because this way may
-	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
-	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
-	 * never return if we keep waiting in virt_kiq_rreg, which cause
-	 * gpu_recover() hang there.
-	 *
-	 * also don't wait anymore for IRQ context
-	 * */
-	if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
-		goto failed_kiq_read;
-
-	might_sleep();
-	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
-		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
-		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-	}
-
-	if (cnt > MAX_KIQ_REG_TRY)
-		goto failed_kiq_read;
-
-	return adev->wb.wb[adev->virt.reg_val_offs];
-
-failed_kiq_read:
-	pr_err("failed to read reg:%x\n", reg);
-	return ~0;
-}
-
-void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
-{
-	signed long r, cnt = 0;
-	unsigned long flags;
-	uint32_t seq;
-	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
-	struct amdgpu_ring *ring = &kiq->ring;
-
-	BUG_ON(!ring->funcs->emit_wreg);
-
-	spin_lock_irqsave(&kiq->ring_lock, flags);
-	amdgpu_ring_alloc(ring, 32);
-	amdgpu_ring_emit_wreg(ring, reg, v);
-	amdgpu_fence_emit_polling(ring, &seq);
-	amdgpu_ring_commit(ring);
-	spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
-	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-
-	/* don't wait anymore for gpu reset case because this way may
-	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
-	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
-	 * never return if we keep waiting in virt_kiq_rreg, which cause
-	 * gpu_recover() hang there.
-	 *
-	 * also don't wait anymore for IRQ context
-	 * */
-	if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
-		goto failed_kiq_write;
-
-	might_sleep();
-	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
-
-		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
-		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-	}
-
-	if (cnt > MAX_KIQ_REG_TRY)
-		goto failed_kiq_write;
-
-	return;
-
-failed_kiq_write:
-	pr_err("failed to write reg:%x\n", reg);
-}
-
 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
 					uint32_t reg0, uint32_t reg1,
 					uint32_t ref, uint32_t mask)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 4d1ac7612967..daaf909d009a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -287,8 +287,6 @@ static inline bool is_virtual_machine(void)
 
 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
 void amdgpu_virt_init_setting(struct amdgpu_device *adev);
-uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
-void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
 					uint32_t reg0, uint32_t rreg1,
 					uint32_t ref, uint32_t mask);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index a0d4e79c4821..65885ba92a5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4737,6 +4737,7 @@ static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
 static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
 {
 	struct amdgpu_device *adev = ring->adev;
+	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
 
 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
 	amdgpu_ring_write(ring, 0 |	/* src: register*/
@@ -4745,9 +4746,9 @@ static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
 	amdgpu_ring_write(ring, reg);
 	amdgpu_ring_write(ring, 0);
 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
-				adev->virt.reg_val_offs * 4));
+				kiq->reg_val_offs * 4));
 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
-				adev->virt.reg_val_offs * 4));
+				kiq->reg_val_offs * 4));
 }
 
 static void gfx_v10_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 46f0533ba43f..fa245973de12 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -6449,6 +6449,7 @@ static void gfx_v8_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
 static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
 {
 	struct amdgpu_device *adev = ring->adev;
+	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
 
 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
 	amdgpu_ring_write(ring, 0 |	/* src: register*/
@@ -6457,9 +6458,9 @@ static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
 	amdgpu_ring_write(ring, reg);
 	amdgpu_ring_write(ring, 0);
 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
-				adev->virt.reg_val_offs * 4));
+				kiq->reg_val_offs * 4));
 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
-				adev->virt.reg_val_offs * 4));
+				kiq->reg_val_offs * 4));
 }
 
 static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 6c02fe62b497..d700f1fa387c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -5213,6 +5213,7 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
 {
 	struct amdgpu_device *adev = ring->adev;
+	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
 
 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
 	amdgpu_ring_write(ring, 0 |	/* src: register*/
@@ -5221,9 +5222,9 @@ static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
 	amdgpu_ring_write(ring, reg);
 	amdgpu_ring_write(ring, 0);
 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
-				adev->virt.reg_val_offs * 4));
+				kiq->reg_val_offs * 4));
 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
-				adev->virt.reg_val_offs * 4));
+				kiq->reg_val_offs * 4));
 }
 
 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
commit a6c44d2538c469f412c3fded0de2290494d762d7
Author: John Clements <john.clements at amd.com>
Date:   Fri Jan 17 12:18:00 2020 +0800

    drm/amdgpu: added support to get mGPU DRAM base
    
    resolves issue with RAS error injection in mGPU configuration
    
    Reviewed-by: Hawking Zhang <Hawking.Zhang at amd.com>
    Signed-off-by: John Clements <john.clements at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
index 61a26c15c8dd..057f6ea645d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
@@ -52,6 +52,9 @@ struct amdgpu_df_funcs {
 	uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
 	void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val,
 			 uint32_t ficadl_val, uint32_t ficadh_val);
+	uint64_t (*get_dram_base_addr)(struct amdgpu_device *adev,
+				       uint32_t df_inst);
+	uint32_t (*get_df_inst_id)(struct amdgpu_device *adev);
 };
 
 struct amdgpu_df {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 766be7f18282..cef94e2169fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -742,6 +742,20 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
 	return 0;
 }
 
+uint64_t get_xgmi_relative_phy_addr(struct amdgpu_device *adev, uint64_t addr)
+{
+	uint32_t df_inst_id;
+
+	if ((!adev->df.funcs)                 ||
+	    (!adev->df.funcs->get_df_inst_id) ||
+	    (!adev->df.funcs->get_dram_base_addr))
+		return addr;
+
+	df_inst_id = adev->df.funcs->get_df_inst_id(adev);
+
+	return addr + adev->df.funcs->get_dram_base_addr(adev, df_inst_id);
+}
+
 /* wrapper of psp_ras_trigger_error */
 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
 		struct ras_inject_if *info)
@@ -759,6 +773,12 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
 	if (!obj)
 		return -EINVAL;
 
+	/* Calculate XGMI relative offset */
+	if (adev->gmc.xgmi.num_physical_nodes > 1) {
+		block_info.address = get_xgmi_relative_phy_addr(adev,
+								block_info.address);
+	}
+
 	switch (info->head.block) {
 	case AMDGPU_RAS_BLOCK__GFX:
 		if (adev->gfx.funcs->ras_error_inject)
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index f51326598a8c..5a1bd8ed1a6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -27,6 +27,9 @@
 #include "df/df_3_6_offset.h"
 #include "df/df_3_6_sh_mask.h"
 
+#define DF_3_6_SMN_REG_INST_DIST        0x8
+#define DF_3_6_INST_CNT                 8
+
 static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0,
 				       16, 32, 0, 0, 0, 2, 4, 8};
 
@@ -683,6 +686,58 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
 	}
 }
 
+static uint64_t df_v3_6_get_dram_base_addr(struct amdgpu_device *adev,
+					   uint32_t df_inst)
+{
+	uint32_t base_addr_reg_val 	= 0;
+	uint64_t base_addr	 	= 0;
+
+	base_addr_reg_val = RREG32_PCIE(smnDF_CS_UMC_AON0_DramBaseAddress0 +
+					df_inst * DF_3_6_SMN_REG_INST_DIST);
+
+	if (REG_GET_FIELD(base_addr_reg_val,
+			  DF_CS_UMC_AON0_DramBaseAddress0,
+			  AddrRngVal) == 0) {
+		DRM_WARN("address range not valid");
+		return 0;
+	}
+
+	base_addr = REG_GET_FIELD(base_addr_reg_val,
+				  DF_CS_UMC_AON0_DramBaseAddress0,
+				  DramBaseAddr);
+
+	return base_addr << 28;
+}
+
+static uint32_t df_v3_6_get_df_inst_id(struct amdgpu_device *adev)
+{
+	uint32_t xgmi_node_id	= 0;
+	uint32_t df_inst_id 	= 0;
+
+	/* Walk through DF dst nodes to find current XGMI node */
+	for (df_inst_id = 0; df_inst_id < DF_3_6_INST_CNT; df_inst_id++) {
+
+		xgmi_node_id = RREG32_PCIE(smnDF_CS_UMC_AON0_DramLimitAddress0 +
+					   df_inst_id * DF_3_6_SMN_REG_INST_DIST);
+		xgmi_node_id = REG_GET_FIELD(xgmi_node_id,
+					     DF_CS_UMC_AON0_DramLimitAddress0,
+					     DstFabricID);
+
+		/* TODO: establish reason dest fabric id is offset by 7 */
+		xgmi_node_id = xgmi_node_id >> 7;
+
+		if (adev->gmc.xgmi.physical_node_id == xgmi_node_id)
+			break;
+	}
+
+	if (df_inst_id == DF_3_6_INST_CNT) {
+		DRM_WARN("cant match df dst id with gpu node");
+		return 0;
+	}
+
+	return df_inst_id;
+}
+
 const struct amdgpu_df_funcs df_v3_6_funcs = {
 	.sw_init = df_v3_6_sw_init,
 	.sw_fini = df_v3_6_sw_fini,
@@ -696,5 +751,7 @@ const struct amdgpu_df_funcs df_v3_6_funcs = {
 	.pmc_stop = df_v3_6_pmc_stop,
 	.pmc_get_count = df_v3_6_pmc_get_count,
 	.get_fica = df_v3_6_get_fica,
-	.set_fica = df_v3_6_set_fica
+	.set_fica = df_v3_6_set_fica,
+	.get_dram_base_addr = df_v3_6_get_dram_base_addr,
+	.get_df_inst_id = df_v3_6_get_df_inst_id
 };
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
index 87c84691b5be..bb2c9c7a18df 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
@@ -71,4 +71,7 @@
 #define smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3		0x1d098UL
 #define smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3		0x1d09cUL
 
+#define smnDF_CS_UMC_AON0_DramBaseAddress0 	0x1c110UL
+#define smnDF_CS_UMC_AON0_DramLimitAddress0 	0x1c114UL
+
 #endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
index 65e9f756e86e..7afa87c7ff54 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
@@ -53,4 +53,12 @@
 #define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel_MASK						0x00000E00L
 #define DF_CS_UMC_AON0_DramBaseAddress0__DramBaseAddr_MASK						0xFFFFF000L
 
+//DF_CS_UMC_AON0_DramLimitAddress0
+#define DF_CS_UMC_AON0_DramLimitAddress0__DstFabricID__SHIFT                                                  0x0
+#define DF_CS_UMC_AON0_DramLimitAddress0__AllowReqIO__SHIFT                                                   0xa
+#define DF_CS_UMC_AON0_DramLimitAddress0__DramLimitAddr__SHIFT                                                0xc
+#define DF_CS_UMC_AON0_DramLimitAddress0__DstFabricID_MASK                                                    0x000003FFL
+#define DF_CS_UMC_AON0_DramLimitAddress0__AllowReqIO_MASK                                                     0x00000400L
+#define DF_CS_UMC_AON0_DramLimitAddress0__DramLimitAddr_MASK                                                  0xFFFFF000L
+
 #endif
commit 36a1707afda9abc704543d6b419a998c64df41ca
Author: Alex Sierra <alex.sierra at amd.com>
Date:   Mon Jan 13 21:27:56 2020 -0600

    drm/amdgpu: modify packet size for pm4 flush tlbs
    
    [Why]
    PM4 packet size for flush message was oversized.
    
    [How]
    Packet size adjusted to allocate flush + fence packets.
    
    Signed-off-by: Alex Sierra <alex.sierra at amd.com>
    Reviewed-by: Christian König <christian.koenig at amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 874f641de281..a0d4e79c4821 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -368,7 +368,7 @@ static const struct kiq_pm4_funcs gfx_v10_0_kiq_pm4_funcs = {
 	.map_queues_size = 7,
 	.unmap_queues_size = 6,
 	.query_status_size = 7,
-	.invalidate_tlbs_size = 12,
+	.invalidate_tlbs_size = 2,
 };
 
 static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 46ab46757b25..6c02fe62b497 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -859,7 +859,7 @@ static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
 	.map_queues_size = 7,
 	.unmap_queues_size = 6,
 	.query_status_size = 7,
-	.invalidate_tlbs_size = 12,
+	.invalidate_tlbs_size = 2,
 };
 
 static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index bbede09983e1..86f4ffe408e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -418,7 +418,8 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
 
 	if (amdgpu_emu_mode == 0 && ring->sched.ready) {
 		spin_lock(&adev->gfx.kiq.ring_lock);
-		amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size);
+		/* 2 dwords flush + 8 dwords fence */
+		amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
 		kiq->pmf->kiq_invalidate_tlbs(ring,
 					pasid, flush_type, all_hub);
 		amdgpu_fence_emit_polling(ring, &seq);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 40a496804356..54bdc1786ab1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -578,7 +578,8 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
 
 	if (ring->sched.ready) {
 		spin_lock(&adev->gfx.kiq.ring_lock);
-		amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size);
+		/* 2 dwords flush + 8 dwords fence */
+		amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
 		kiq->pmf->kiq_invalidate_tlbs(ring,
 					pasid, flush_type, all_hub);
 		amdgpu_fence_emit_polling(ring, &seq);


More information about the openchrome-devel mailing list