[PATCH 060/117] drm/amdgpu: apply nbio7 for Raven (v3)

Alex Deucher alexdeucher at gmail.com
Wed May 10 18:46:51 UTC 2017


From: Chunming Zhou <David1.Zhou at amd.com>

nbio handles misc bus io operations. Handle
differences between different nbio bus versions.

v2: switch checks from RAVEN to APU (Alex)
    squash in raven rev id fetch
    squash in fix uninitalized hdp flush reg index for raven
v3: add some missed RAVEN to APU checks (Alex)

Signed-off-by: Chunming Zhou <David1.Zhou at amd.com>
Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c  | 14 ++++++++++---
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c |  9 ++++++--
 drivers/gpu/drm/amd/amdgpu/soc15.c     | 38 ++++++++++++++++++++++++----------
 drivers/gpu/drm/amd/amdgpu/soc15.h     |  1 +
 drivers/gpu/drm/amd/amdgpu/vega10_ih.c | 10 +++++++--
 5 files changed, 54 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 7a875ff..627d442 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -33,6 +33,7 @@
 #include "soc15_common.h"
 
 #include "nbio_v6_1.h"
+#include "nbio_v7_0.h"
 #include "gfxhub_v1_0.h"
 #include "mmhub_v1_0.h"
 
@@ -215,7 +216,10 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
 	unsigned i, j;
 
 	/* flush hdp cache */
-	nbio_v6_1_hdp_flush(adev);
+	if (adev->flags & AMD_IS_APU)
+		nbio_v7_0_hdp_flush(adev);
+	else
+		nbio_v6_1_hdp_flush(adev);
 
 	spin_lock(&adev->mc.invalidate_lock);
 
@@ -479,7 +483,8 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
 	adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
 	/* size in MB on si */
 	adev->mc.mc_vram_size =
-		nbio_v6_1_get_memsize(adev) * 1024ULL * 1024ULL;
+		((adev->flags & AMD_IS_APU) ? nbio_v7_0_get_memsize(adev) :
+		 nbio_v6_1_get_memsize(adev)) * 1024ULL * 1024ULL;
 	adev->mc.real_vram_size = adev->mc.mc_vram_size;
 	adev->mc.visible_vram_size = adev->mc.aper_size;
 
@@ -718,7 +723,10 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
 		return r;
 
 	/* After HDP is initialized, flush HDP.*/
-	nbio_v6_1_hdp_flush(adev);
+	if (adev->flags & AMD_IS_APU)
+		nbio_v7_0_hdp_flush(adev);
+	else
+		nbio_v6_1_hdp_flush(adev);
 
 	r = gfxhub_v1_0_gart_enable(adev);
 	if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index e4825a3..76cb766 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -386,7 +386,9 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 	u32 ref_and_mask = 0;
 	struct nbio_hdp_flush_reg *nbio_hf_reg;
 
-	if (ring->adev->asic_type == CHIP_VEGA10)
+	if (ring->adev->flags & AMD_IS_APU)
+		nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
+	else
 		nbio_hf_reg = &nbio_v6_1_hdp_flush_reg;
 
 	if (ring == &ring->adev->sdma.instance[0].ring)
@@ -617,7 +619,10 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
 		}
 		WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL), doorbell);
 		WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
-		nbio_v6_1_sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index);
+		if (adev->flags & AMD_IS_APU)
+			nbio_v7_0_sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index);
+		else
+			nbio_v6_1_sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index);
 
 		if (amdgpu_sriov_vf(adev))
 			sdma_v4_0_ring_set_wptr(ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 26d415c..853b0e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -104,10 +104,10 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
 	u32 r;
 	struct nbio_pcie_index_data *nbio_pcie_id;
 
-	if (adev->asic_type == CHIP_VEGA10)
-		nbio_pcie_id = &nbio_v6_1_pcie_index_data;
+	if (adev->flags & AMD_IS_APU)
+		nbio_pcie_id = &nbio_v7_0_pcie_index_data;
 	else
-		BUG();
+		nbio_pcie_id = &nbio_v6_1_pcie_index_data;
 
 	address = nbio_pcie_id->index_offset;
 	data = nbio_pcie_id->data_offset;
@@ -125,10 +125,10 @@ static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 	unsigned long flags, address, data;
 	struct nbio_pcie_index_data *nbio_pcie_id;
 
-	if (adev->asic_type == CHIP_VEGA10)
-		nbio_pcie_id = &nbio_v6_1_pcie_index_data;
+	if (adev->flags & AMD_IS_APU)
+		nbio_pcie_id = &nbio_v7_0_pcie_index_data;
 	else
-		BUG();
+		nbio_pcie_id = &nbio_v6_1_pcie_index_data;
 
 	address = nbio_pcie_id->index_offset;
 	data = nbio_pcie_id->data_offset;
@@ -199,7 +199,10 @@ static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 
 static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
 {
-	return nbio_v6_1_get_memsize(adev);
+	if (adev->flags & AMD_IS_APU)
+		return nbio_v7_0_get_memsize(adev);
+	else
+		return nbio_v6_1_get_memsize(adev);
 }
 
 static const u32 vega10_golden_init[] =
@@ -376,7 +379,10 @@ static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev)
 
 	/* wait for asic to come out of reset */
 	for (i = 0; i < adev->usec_timeout; i++) {
-		if (nbio_v6_1_get_memsize(adev) != 0xffffffff)
+		u32 memsize = (adev->flags & AMD_IS_APU) ?
+			nbio_v7_0_get_memsize(adev) :
+			nbio_v6_1_get_memsize(adev);
+		if (memsize != 0xffffffff)
 			break;
 		udelay(1);
 	}
@@ -450,8 +456,12 @@ static void soc15_program_aspm(struct amdgpu_device *adev)
 static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
 					bool enable)
 {
-	nbio_v6_1_enable_doorbell_aperture(adev, enable);
-	nbio_v6_1_enable_doorbell_selfring_aperture(adev, enable);
+	if (adev->flags & AMD_IS_APU) {
+		nbio_v7_0_enable_doorbell_aperture(adev, enable);
+	} else {
+		nbio_v6_1_enable_doorbell_aperture(adev, enable);
+		nbio_v6_1_enable_doorbell_selfring_aperture(adev, enable);
+	}
 }
 
 static const struct amdgpu_ip_block_version vega10_common_ip_block =
@@ -518,7 +528,10 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 
 static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
 {
-	return nbio_v6_1_get_rev_id(adev);
+	if (adev->flags & AMD_IS_APU)
+		return nbio_v7_0_get_rev_id(adev);
+	else
+		return nbio_v6_1_get_rev_id(adev);
 }
 
 
@@ -569,6 +582,9 @@ static int soc15_common_early_init(void *handle)
 	case CHIP_VEGA10:
 		nbio_v6_1_init(adev);
 		break;
+	case CHIP_RAVEN:
+		nbio_v7_0_init(adev);
+		break;
 	default:
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index 378a46d..acb3cdb 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -25,6 +25,7 @@
 #define __SOC15_H__
 
 #include "nbio_v6_1.h"
+#include "nbio_v7_0.h"
 
 extern const struct amd_ip_funcs soc15_common_ip_funcs;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 071f56e..67610f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -97,7 +97,10 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
 	/* disable irqs */
 	vega10_ih_disable_interrupts(adev);
 
-	nbio_v6_1_ih_control(adev);
+	if (adev->flags & AMD_IS_APU)
+		nbio_v7_0_ih_control(adev);
+	else
+		nbio_v6_1_ih_control(adev);
 
 	ih_rb_cntl = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
 	/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
@@ -148,7 +151,10 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
 						 ENABLE, 0);
 	}
 	WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR), ih_doorbell_rtpr);
-	nbio_v6_1_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
+	if (adev->flags & AMD_IS_APU)
+		nbio_v7_0_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
+	else
+		nbio_v6_1_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
 
 	tmp = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL));
 	tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
-- 
2.5.5



More information about the amd-gfx mailing list