[PATCH 014/159] drm/amdgpu: add mmhub support for aldebaran (v3)

Alex Deucher alexander.deucher at amd.com
Wed Feb 24 22:16:34 UTC 2021


From: Le Ma <le.ma at amd.com>

v1: dupilcate mmhub_v1_7.c from mmhub_v1_0.c because
mmhub register address for aldebaran is different
from existing asics (Le)
v2: switch to latest mmhub_v9_4_2 register headers (Hawking)
v3: squash in init VM_L2_CNTL3 default value for mmhub v1_7

Signed-off-by: Le Ma <le.ma at amd.com>
Signed-off-by: Kevin Wang <kevin1.wang at amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang at amd.com>
Acked-by: Evan Quan <evan.quan at amd.com>
Signed-off-by: Hawking Zhang <Hawking.Zhang at amd.com>
Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/Makefile     |   3 +-
 drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c | 598 ++++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h |  42 ++
 3 files changed, 642 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
 create mode 100644 drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h

diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 906bf2eaa158..95afdbda0b44 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -83,7 +83,8 @@ amdgpu-y += \
 	gmc_v7_0.o \
 	gmc_v8_0.o \
 	gfxhub_v1_0.o mmhub_v1_0.o gmc_v9_0.o gfxhub_v1_1.o mmhub_v9_4.o \
-	gfxhub_v2_0.o mmhub_v2_0.o gmc_v10_0.o gfxhub_v2_1.o mmhub_v2_3.o
+	gfxhub_v2_0.o mmhub_v2_0.o gmc_v10_0.o gfxhub_v2_1.o mmhub_v2_3.o \
+	mmhub_v1_7.o
 
 # add UMC block
 amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
new file mode 100644
index 000000000000..fd9aebb0ca2d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
@@ -0,0 +1,598 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+#include "mmhub_v1_7.h"
+
+#include "mmhub/mmhub_1_7_offset.h"
+#include "mmhub/mmhub_1_7_sh_mask.h"
+#include "vega10_enum.h"
+
+#include "soc15_common.h"
+
+#define EA_EDC_CNT_MASK 0x3
+#define EA_EDC_CNT_SHIFT 0x2
+
+#define regVM_L2_CNTL3_DEFAULT	0x80100007
+#define regVM_L2_CNTL4_DEFAULT	0x000000c1
+
+u64 mmhub_v1_7_get_fb_location(struct amdgpu_device *adev)
+{
+	u64 base = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE);
+	u64 top = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP);
+
+	base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
+	base <<= 24;
+
+	top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK;
+	top <<= 24;
+
+	adev->gmc.fb_start = base;
+	adev->gmc.fb_end = top;
+
+	return base;
+}
+
+void mmhub_v1_7_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+				uint64_t page_table_base)
+{
+	/* two registers distance between regVM_CONTEXT0_* to regVM_CONTEXT1_* */
+	int offset = regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+			- regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
+
+	WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+			offset * vmid, lower_32_bits(page_table_base));
+
+	WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+			offset * vmid, upper_32_bits(page_table_base));
+}
+
+static void mmhub_v1_7_init_gart_aperture_regs(struct amdgpu_device *adev)
+{
+	uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+	mmhub_v1_7_setup_vm_pt_regs(adev, 0, pt_base);
+
+	WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+		     (u32)(adev->gmc.gart_start >> 12));
+	WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+		     (u32)(adev->gmc.gart_start >> 44));
+
+	WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+		     (u32)(adev->gmc.gart_end >> 12));
+	WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+		     (u32)(adev->gmc.gart_end >> 44));
+}
+
+static void mmhub_v1_7_init_system_aperture_regs(struct amdgpu_device *adev)
+{
+	uint64_t value;
+	uint32_t tmp;
+
+	/* Program the AGP BAR */
+	WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BASE, 0);
+	WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
+	WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
+
+	/* Program the system aperture low logical page number. */
+	WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+		     min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
+
+	WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+		     max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
+
+	if (amdgpu_sriov_vf(adev))
+		return;
+
+	/* Set default page address. */
+	value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
+		adev->vm_manager.vram_base_offset;
+	WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
+		     (u32)(value >> 12));
+	WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+		     (u32)(value >> 44));
+
+	/* Program "protection fault". */
+	WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
+		     (u32)(adev->dummy_page_addr >> 12));
+	WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
+		     (u32)((u64)adev->dummy_page_addr >> 44));
+
+	tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL2);
+	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
+			    ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
+	WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL2, tmp);
+}
+
+static void mmhub_v1_7_init_tlb_regs(struct amdgpu_device *adev)
+{
+	uint32_t tmp;
+
+	/* Setup TLB control */
+	tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL);
+
+	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
+	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
+	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+			    ENABLE_ADVANCED_DRIVER_MODEL, 1);
+	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+			    SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
+	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
+	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+			    MTYPE, MTYPE_UC);/* XXX for emulation. */
+	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
+
+	WREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL, tmp);
+}
+
+static void mmhub_v1_7_init_cache_regs(struct amdgpu_device *adev)
+{
+	uint32_t tmp;
+
+	if (amdgpu_sriov_vf(adev))
+		return;
+
+	/* Setup L2 cache */
+	tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_CNTL);
+	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
+	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
+	/* XXX for emulation, Refer to closed source code.*/
+	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
+			    0);
+	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
+	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
+	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
+	WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL, tmp);
+
+	tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_CNTL2);
+	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
+	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
+	WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL2, tmp);
+
+	tmp = regVM_L2_CNTL3_DEFAULT;
+	if (adev->gmc.translate_further) {
+		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
+		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+				    L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+	} else {
+		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
+		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+				    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+	}
+	WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL3, tmp);
+
+	tmp = regVM_L2_CNTL4_DEFAULT;
+	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
+	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
+	WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL4, tmp);
+}
+
+static void mmhub_v1_7_enable_system_domain(struct amdgpu_device *adev)
+{
+	uint32_t tmp;
+
+	tmp = RREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_CNTL);
+	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
+	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
+			    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
+	WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_CNTL, tmp);
+}
+
+static void mmhub_v1_7_disable_identity_aperture(struct amdgpu_device *adev)
+{
+	if (amdgpu_sriov_vf(adev))
+		return;
+
+	WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
+		     0XFFFFFFFF);
+	WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
+		     0x0000000F);
+
+	WREG32_SOC15(MMHUB, 0,
+		     regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0);
+	WREG32_SOC15(MMHUB, 0,
+		     regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0);
+
+	WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
+		     0);
+	WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
+		     0);
+}
+
+static void mmhub_v1_7_setup_vmid_config(struct amdgpu_device *adev)
+{
+	unsigned num_level, block_size;
+	uint32_t tmp;
+	int i;
+
+	num_level = adev->vm_manager.num_level;
+	block_size = adev->vm_manager.block_size;
+	if (adev->gmc.translate_further)
+		num_level -= 1;
+	else
+		block_size -= 9;
+
+	for (i = 0; i <= 14; i++) {
+		tmp = RREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_CNTL, i);
+		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
+		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
+				    num_level);
+		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+				    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+				    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
+				    1);
+		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+				    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+				    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+				    READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+				    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+				    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+				    PAGE_TABLE_BLOCK_SIZE,
+				    block_size);
+		/* Send no-retry XNACK on fault to suppress VM fault storm. */
+		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+				    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
+				    !amdgpu_noretry);
+		WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_CNTL, i, tmp);
+		WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
+		WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
+		WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, i*2,
+			lower_32_bits(adev->vm_manager.max_pfn - 1));
+		WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, i*2,
+			upper_32_bits(adev->vm_manager.max_pfn - 1));
+	}
+}
+
+static void mmhub_v1_7_program_invalidation(struct amdgpu_device *adev)
+{
+	unsigned i;
+
+	for (i = 0; i < 18; ++i) {
+		WREG32_SOC15_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
+				    2 * i, 0xffffffff);
+		WREG32_SOC15_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
+				    2 * i, 0x1f);
+	}
+}
+
+void mmhub_v1_7_update_power_gating(struct amdgpu_device *adev,
+				bool enable)
+{
+	if (amdgpu_sriov_vf(adev))
+		return;
+
+	if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) {
+		if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_powergating_by_smu)
+			amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
+
+	}
+}
+
+int mmhub_v1_7_gart_enable(struct amdgpu_device *adev)
+{
+	if (amdgpu_sriov_vf(adev)) {
+		/*
+		 * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
+		 * VF copy registers so vbios post doesn't program them, for
+		 * SRIOV driver need to program them
+		 */
+		WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE,
+			     adev->gmc.vram_start >> 24);
+		WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP,
+			     adev->gmc.vram_end >> 24);
+	}
+
+	/* GART Enable. */
+	mmhub_v1_7_init_gart_aperture_regs(adev);
+	mmhub_v1_7_init_system_aperture_regs(adev);
+	mmhub_v1_7_init_tlb_regs(adev);
+	mmhub_v1_7_init_cache_regs(adev);
+
+	mmhub_v1_7_enable_system_domain(adev);
+	mmhub_v1_7_disable_identity_aperture(adev);
+	mmhub_v1_7_setup_vmid_config(adev);
+	mmhub_v1_7_program_invalidation(adev);
+
+	return 0;
+}
+
+void mmhub_v1_7_gart_disable(struct amdgpu_device *adev)
+{
+	u32 tmp;
+	u32 i;
+
+	/* Disable all tables */
+	for (i = 0; i < 16; i++)
+		WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_CNTL, i, 0);
+
+	/* Setup TLB control */
+	tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL);
+	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
+	tmp = REG_SET_FIELD(tmp,
+				MC_VM_MX_L1_TLB_CNTL,
+				ENABLE_ADVANCED_DRIVER_MODEL,
+				0);
+	WREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL, tmp);
+
+	if (!amdgpu_sriov_vf(adev)) {
+		/* Setup L2 cache */
+		tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_CNTL);
+		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
+		WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL, tmp);
+		WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL3, 0);
+	}
+}
+
+/**
+ * mmhub_v1_7_set_fault_enable_default - update GART/VM fault handling
+ *
+ * @adev: amdgpu_device pointer
+ * @value: true redirects VM faults to the default page
+ */
+void mmhub_v1_7_set_fault_enable_default(struct amdgpu_device *adev, bool value)
+{
+	u32 tmp;
+
+	if (amdgpu_sriov_vf(adev))
+		return;
+
+	tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL);
+	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+			RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+			PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+			PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+			PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+	tmp = REG_SET_FIELD(tmp,
+			VM_L2_PROTECTION_FAULT_CNTL,
+			TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
+			value);
+	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+			NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+			DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+			VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+			READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+			WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+			EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+	if (!value) {
+		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+				CRASH_ON_NO_RETRY_FAULT, 1);
+		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+				CRASH_ON_RETRY_FAULT, 1);
+    }
+
+	WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL, tmp);
+}
+
+void mmhub_v1_7_init(struct amdgpu_device *adev)
+{
+	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+
+	hub->ctx0_ptb_addr_lo32 =
+		SOC15_REG_OFFSET(MMHUB, 0,
+				 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
+	hub->ctx0_ptb_addr_hi32 =
+		SOC15_REG_OFFSET(MMHUB, 0,
+				 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+	hub->vm_inv_eng0_req =
+		SOC15_REG_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_REQ);
+	hub->vm_inv_eng0_ack =
+		SOC15_REG_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_ACK);
+	hub->vm_context0_cntl =
+		SOC15_REG_OFFSET(MMHUB, 0, regVM_CONTEXT0_CNTL);
+	hub->vm_l2_pro_fault_status =
+		SOC15_REG_OFFSET(MMHUB, 0, regVM_L2_PROTECTION_FAULT_STATUS);
+	hub->vm_l2_pro_fault_cntl =
+		SOC15_REG_OFFSET(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL);
+
+}
+
+static void mmhub_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+							bool enable)
+{
+	uint32_t def, data, def1, data1, def2 = 0, data2 = 0;
+
+	def  = data  = RREG32_SOC15(MMHUB, 0, regATC_L2_MISC_CG);
+
+	def1 = data1 = RREG32_SOC15(MMHUB, 0, regDAGB0_CNTL_MISC2);
+	def2 = data2 = RREG32_SOC15(MMHUB, 0, regDAGB1_CNTL_MISC2);
+
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
+		data |= ATC_L2_MISC_CG__ENABLE_MASK;
+
+		data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+		           DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
+		           DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
+		           DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
+		           DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
+		           DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
+
+		data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+		           DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
+		           DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
+		           DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
+		           DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
+		           DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
+	} else {
+		data &= ~ATC_L2_MISC_CG__ENABLE_MASK;
+
+		data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+			  DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
+			  DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
+			  DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
+			  DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
+			  DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
+
+		data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+		          DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
+		          DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
+		          DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
+		          DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
+		          DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
+	}
+
+	if (def != data)
+		WREG32_SOC15(MMHUB, 0, regATC_L2_MISC_CG, data);
+
+	if (def1 != data1)
+		WREG32_SOC15(MMHUB, 0, regDAGB0_CNTL_MISC2, data1);
+
+	if (def2 != data2)
+		WREG32_SOC15(MMHUB, 0, regDAGB1_CNTL_MISC2, data2);
+}
+
+static void mmhub_v1_7_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+						       bool enable)
+{
+	uint32_t def, data;
+
+	def = data = RREG32_SOC15(MMHUB, 0, regATC_L2_MISC_CG);
+
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
+		data |= ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+	else
+		data &= ~ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+
+	if (def != data)
+		WREG32_SOC15(MMHUB, 0, regATC_L2_MISC_CG, data);
+}
+
+int mmhub_v1_7_set_clockgating(struct amdgpu_device *adev,
+			       enum amd_clockgating_state state)
+{
+	if (amdgpu_sriov_vf(adev))
+		return 0;
+
+	switch (adev->asic_type) {
+	case CHIP_ALDEBARAN:
+		mmhub_v1_7_update_medium_grain_clock_gating(adev,
+				state == AMD_CG_STATE_GATE ? true : false);
+		mmhub_v1_7_update_medium_grain_light_sleep(adev,
+				state == AMD_CG_STATE_GATE ? true : false);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+void mmhub_v1_7_get_clockgating(struct amdgpu_device *adev, u32 *flags)
+{
+	int data, data1;
+
+	if (amdgpu_sriov_vf(adev))
+		*flags = 0;
+
+	data = RREG32_SOC15(MMHUB, 0, regATC_L2_MISC_CG);
+
+	data1 = RREG32_SOC15(MMHUB, 0, regDAGB0_CNTL_MISC2);
+
+	/* AMD_CG_SUPPORT_MC_MGCG */
+	if ((data & ATC_L2_MISC_CG__ENABLE_MASK) &&
+	    !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+		       DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
+		       DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
+		       DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
+		       DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
+		       DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
+		*flags |= AMD_CG_SUPPORT_MC_MGCG;
+
+	/* AMD_CG_SUPPORT_MC_LS */
+	if (data & ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
+		*flags |= AMD_CG_SUPPORT_MC_LS;
+}
+
+static void mmhub_v1_7_query_ras_error_count(struct amdgpu_device *adev,
+					   void *ras_error_status)
+{
+	int i;
+	uint32_t ea0_edc_cnt, ea0_edc_cnt2;
+	uint32_t ea1_edc_cnt, ea1_edc_cnt2;
+	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+	/* EDC CNT will be cleared automatically after read */
+	ea0_edc_cnt = RREG32_SOC15(MMHUB, 0, regMMEA0_EDC_CNT);
+	ea0_edc_cnt2 = RREG32_SOC15(MMHUB, 0, regMMEA0_EDC_CNT2);
+	ea1_edc_cnt = RREG32_SOC15(MMHUB, 0, regMMEA1_EDC_CNT);
+	ea1_edc_cnt2 = RREG32_SOC15(MMHUB, 0, regMMEA1_EDC_CNT2);
+
+	/* error count of each error type is recorded by 2 bits,
+	 * ce and ue count in EDC_CNT
+	 */
+	for (i = 0; i < 5; i++) {
+		err_data->ce_count += (ea0_edc_cnt & EA_EDC_CNT_MASK);
+		err_data->ce_count += (ea1_edc_cnt & EA_EDC_CNT_MASK);
+		ea0_edc_cnt >>= EA_EDC_CNT_SHIFT;
+		ea1_edc_cnt >>= EA_EDC_CNT_SHIFT;
+		err_data->ue_count += (ea0_edc_cnt & EA_EDC_CNT_MASK);
+		err_data->ue_count += (ea1_edc_cnt & EA_EDC_CNT_MASK);
+		ea0_edc_cnt >>= EA_EDC_CNT_SHIFT;
+		ea1_edc_cnt >>= EA_EDC_CNT_SHIFT;
+	}
+	/* successive ue count in EDC_CNT */
+	for (i = 0; i < 5; i++) {
+		err_data->ue_count += (ea0_edc_cnt & EA_EDC_CNT_MASK);
+		err_data->ue_count += (ea1_edc_cnt & EA_EDC_CNT_MASK);
+		ea0_edc_cnt >>= EA_EDC_CNT_SHIFT;
+		ea1_edc_cnt >>= EA_EDC_CNT_SHIFT;
+	}
+
+	/* ce and ue count in EDC_CNT2 */
+	for (i = 0; i < 3; i++) {
+		err_data->ce_count += (ea0_edc_cnt2 & EA_EDC_CNT_MASK);
+		err_data->ce_count += (ea1_edc_cnt2 & EA_EDC_CNT_MASK);
+		ea0_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
+		ea1_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
+		err_data->ue_count += (ea0_edc_cnt2 & EA_EDC_CNT_MASK);
+		err_data->ue_count += (ea1_edc_cnt2 & EA_EDC_CNT_MASK);
+		ea0_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
+		ea1_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
+	}
+	/* successive ue count in EDC_CNT2 */
+	for (i = 0; i < 6; i++) {
+		err_data->ue_count += (ea0_edc_cnt2 & EA_EDC_CNT_MASK);
+		err_data->ue_count += (ea1_edc_cnt2 & EA_EDC_CNT_MASK);
+		ea0_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
+		ea1_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
+	}
+}
+
+const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs = {
+	.ras_late_init = amdgpu_mmhub_ras_late_init,
+	.query_ras_error_count = mmhub_v1_7_query_ras_error_count,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h
new file mode 100644
index 000000000000..dc2bb6cfbe73
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __MMHUB_V1_7_H__
+#define __MMHUB_V1_7_H__
+
+extern const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs;
+
+u64 mmhub_v1_7_get_fb_location(struct amdgpu_device *adev);
+int mmhub_v1_7_gart_enable(struct amdgpu_device *adev);
+void mmhub_v1_7_gart_disable(struct amdgpu_device *adev);
+void mmhub_v1_7_set_fault_enable_default(struct amdgpu_device *adev,
+					 bool value);
+void mmhub_v1_7_init(struct amdgpu_device *adev);
+int mmhub_v1_7_set_clockgating(struct amdgpu_device *adev,
+			       enum amd_clockgating_state state);
+void mmhub_v1_7_get_clockgating(struct amdgpu_device *adev, u32 *flags);
+void mmhub_v1_7_update_power_gating(struct amdgpu_device *adev,
+                                bool enable);
+void mmhub_v1_7_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+				uint64_t page_table_base);
+
+#endif
-- 
2.29.2



More information about the amd-gfx mailing list