[PATCH] drm/amdgpu: update df_v3_6 for xgmi perfmons
Kim, Jonathan
Jonathan.Kim at amd.com
Tue Jun 18 19:24:33 UTC 2019
v3: fixed cleanup by adding fini to free up adev df config counters
v2: simplified by removing xgmi references in function names and moving to
generic df function names. fixed issue by removing hardcoded cake tx data
events. streamlined error handling by having df_v3_6_pmc_get_ctrl return
error code.
add pmu attribute groups and structures for perf events.
add sysfs to track available df perfmon counters
fix overflow handling in perfmon counter reads.
Change-Id: I61f731c0066b17834656c746e7efe038c4f62acf
Signed-off-by: Jonathan Kim <Jonathan.Kim at amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu.h | 17 ++
drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 409 +++++++++++++--------------
drivers/gpu/drm/amd/amdgpu/df_v3_6.h | 19 +-
drivers/gpu/drm/amd/amdgpu/soc15.c | 3 +
4 files changed, 217 insertions(+), 231 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index d355e9a09ad1..8605691de416 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -682,6 +682,7 @@ struct amdgpu_nbio_funcs {
struct amdgpu_df_funcs {
void (*init)(struct amdgpu_device *adev);
+ void (*fini)(struct amdgpu_device *adev);
void (*enable_broadcast_mode)(struct amdgpu_device *adev,
bool enable);
u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
@@ -962,6 +963,7 @@ struct amdgpu_device {
long compute_timeout;
uint64_t unique_id;
+ uint64_t *df_perfmon_config_assign_mask;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -1201,4 +1203,19 @@ static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return
#endif
#include "amdgpu_object.h"
+
+/* used by df_v3_6.c and amdgpu_pmu.c */
+#define AMDGPU_PMU_ATTR(_name, _object) \
+static ssize_t \
+_name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *page) \
+{ \
+ BUILD_BUG_ON(sizeof(_object) >= PAGE_SIZE - 1); \
+ return sprintf(page, _object "\n"); \
+} \
+ \
+static struct device_attribute pmu_attr_##_name = __ATTR_RO(_name)
+
#endif
+
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 8c09bf994acd..5b10c0f9d29d 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -30,8 +30,113 @@
static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0,
16, 32, 0, 0, 0, 2, 4, 8};
+/* init df format attrs */
+AMDGPU_PMU_ATTR(event, "config:0-7");
+AMDGPU_PMU_ATTR(instance, "config:8-15");
+AMDGPU_PMU_ATTR(umask, "config:16-23");
+
+/* df format attributes */
+static struct attribute *df_v3_6_format_attrs[] = {
+ &pmu_attr_event.attr,
+ &pmu_attr_instance.attr,
+ &pmu_attr_umask.attr,
+ NULL
+};
+
+/* df format attribute group */
+static struct attribute_group df_v3_6_format_attr_group = {
+ .name = "format",
+ .attrs = df_v3_6_format_attrs,
+};
+
+/* df event attrs */
+AMDGPU_PMU_ATTR(cake0_pcsout_txdata,
+ "event=0x7,instance=0x46,umask=0x2");
+AMDGPU_PMU_ATTR(cake1_pcsout_txdata,
+ "event=0x7,instance=0x47,umask=0x2");
+AMDGPU_PMU_ATTR(cake0_pcsout_txmeta,
+ "event=0x7,instance=0x46,umask=0x4");
+AMDGPU_PMU_ATTR(cake1_pcsout_txmeta,
+ "event=0x7,instance=0x47,umask=0x4");
+AMDGPU_PMU_ATTR(cake0_ftiinstat_reqalloc,
+ "event=0xb,instance=0x46,umask=0x4");
+AMDGPU_PMU_ATTR(cake1_ftiinstat_reqalloc,
+ "event=0xb,instance=0x47,umask=0x4");
+AMDGPU_PMU_ATTR(cake0_ftiinstat_rspalloc,
+ "event=0xb,instance=0x46,umask=0x8");
+AMDGPU_PMU_ATTR(cake1_ftiinstat_rspalloc,
+ "event=0xb,instance=0x47,umask=0x8");
+
+/* df event attributes */
+static struct attribute *df_v3_6_event_attrs[] = {
+ &pmu_attr_cake0_pcsout_txdata.attr,
+ &pmu_attr_cake1_pcsout_txdata.attr,
+ &pmu_attr_cake0_pcsout_txmeta.attr,
+ &pmu_attr_cake1_pcsout_txmeta.attr,
+ &pmu_attr_cake0_ftiinstat_reqalloc.attr,
+ &pmu_attr_cake1_ftiinstat_reqalloc.attr,
+ &pmu_attr_cake0_ftiinstat_rspalloc.attr,
+ &pmu_attr_cake1_ftiinstat_rspalloc.attr,
+ NULL
+};
+
+/* df event attribute group */
+static struct attribute_group df_v3_6_event_attr_group = {
+ .name = "events",
+ .attrs = df_v3_6_event_attrs
+};
+
+/* df event attr groups */
+const struct attribute_group *df_v3_6_attr_groups[] = {
+ &df_v3_6_format_attr_group,
+ &df_v3_6_event_attr_group,
+ NULL
+};
+
+/* get the number of df counters available */
+static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amdgpu_device *adev;
+ struct drm_device *ddev;
+ int i, count;
+
+ ddev = dev_get_drvdata(dev);
+ adev = ddev->dev_private;
+ count = 0;
+
+ for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) {
+ if (adev->df_perfmon_config_assign_mask[i] == 0)
+ count++;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%i\n", count);
+}
+
+/* device attr for available perfmon counters */
+static DEVICE_ATTR(df_cntr_avail, S_IRUGO, df_v3_6_get_df_cntr_avail, NULL);
+
+/* init perfmons */
static void df_v3_6_init(struct amdgpu_device *adev)
{
+ int i, ret;
+
+ ret = device_create_file(adev->dev, &dev_attr_df_cntr_avail);
+ if (ret)
+ DRM_ERROR("failed to create file for available df counters\n");
+
+ adev->df_perfmon_config_assign_mask =
+ kzalloc(sizeof(uint64_t) * DF_V3_6_MAX_COUNTERS, GFP_KERNEL);
+
+ for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++)
+ adev->df_perfmon_config_assign_mask[i] = 0;
+
+}
+
+static void df_v3_6_fini(struct amdgpu_device *adev)
+{
+ kfree(adev->df_perfmon_config_assign_mask);
}
static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev,
@@ -105,24 +210,17 @@ static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev,
*flags |= AMD_CG_SUPPORT_DF_MGCG;
}
-/* hold counter assignment per gpu struct */
-struct df_v3_6_event_mask {
- struct amdgpu_device gpu;
- uint64_t config_assign_mask[AMDGPU_DF_MAX_COUNTERS];
-};
-
/* get assigned df perfmon ctr as int */
-static void df_v3_6_pmc_config_2_cntr(struct amdgpu_device *adev,
+static void df_v3_6_pmc_config_2_cntr(uint64_t *config_assign_mask,
uint64_t config,
int *counter)
{
- struct df_v3_6_event_mask *mask;
int i;
- mask = container_of(adev, struct df_v3_6_event_mask, gpu);
+ *counter = -1;
- for (i = 0; i < AMDGPU_DF_MAX_COUNTERS; i++) {
- if ((config & 0x0FFFFFFUL) == mask->config_assign_mask[i]) {
+ for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) {
+ if ((config & 0x0FFFFFFUL) == config_assign_mask[i]) {
*counter = i;
return;
}
@@ -136,10 +234,10 @@ static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev,
uint32_t *lo_base_addr,
uint32_t *hi_base_addr)
{
+ int target_cntr;
- int target_cntr = -1;
-
- df_v3_6_pmc_config_2_cntr(adev, config, &target_cntr);
+ df_v3_6_pmc_config_2_cntr(adev->df_perfmon_config_assign_mask, config,
+ &target_cntr);
if (target_cntr < 0)
return;
@@ -177,31 +275,30 @@ static void df_v3_6_pmc_get_read_settings(struct amdgpu_device *adev,
}
/* get control counter settings i.e. address and values to set */
-static void df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev,
+static int df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev,
uint64_t config,
uint32_t *lo_base_addr,
uint32_t *hi_base_addr,
uint32_t *lo_val,
uint32_t *hi_val)
{
-
uint32_t eventsel, instance, unitmask;
uint32_t es_5_0, es_13_0, es_13_6, es_13_12, es_11_8, es_7_0;
df_v3_6_pmc_get_addr(adev, config, 1, lo_base_addr, hi_base_addr);
if (lo_val == NULL || hi_val == NULL)
- return;
+ return -EINVAL;
if ((*lo_base_addr == 0) || (*hi_base_addr == 0)) {
DRM_ERROR("DF PMC addressing not retrieved! Lo: %x, Hi: %x",
*lo_base_addr, *hi_base_addr);
- return;
+ return -ENXIO;
}
- eventsel = GET_EVENT(config);
- instance = GET_INSTANCE(config);
- unitmask = GET_UNITMASK(config);
+ eventsel = DF_V3_6_GET_EVENT(config);
+ instance = DF_V3_6_GET_INSTANCE(config);
+ unitmask = DF_V3_6_GET_UNITMASK(config);
es_5_0 = eventsel & 0x3FUL;
es_13_6 = instance;
@@ -211,6 +308,8 @@ static void df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev,
es_7_0 = es_13_0 & 0x0FFUL;
*lo_val = (es_7_0 & 0xFFUL) | ((unitmask & 0x0FUL) << 8);
*hi_val = (es_11_8 | ((es_13_12)<<(29)));
+
+ return 0;
}
/* assign df performance counters for read */
@@ -218,26 +317,23 @@ static int df_v3_6_pmc_assign_cntr(struct amdgpu_device *adev,
uint64_t config,
int *is_assigned)
{
-
- struct df_v3_6_event_mask *mask;
int i, target_cntr;
- target_cntr = -1;
-
*is_assigned = 0;
- df_v3_6_pmc_config_2_cntr(adev, config, &target_cntr);
+ df_v3_6_pmc_config_2_cntr(adev->df_perfmon_config_assign_mask, config,
+ &target_cntr);
if (target_cntr >= 0) {
*is_assigned = 1;
return 0;
}
- mask = container_of(adev, struct df_v3_6_event_mask, gpu);
+ for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) {
+ if (adev->df_perfmon_config_assign_mask[i] == 0ULL) {
+ adev->df_perfmon_config_assign_mask[i] =
+ config & 0x0FFFFFFUL;
- for (i = 0; i < AMDGPU_DF_MAX_COUNTERS; i++) {
- if (mask->config_assign_mask[i] == 0ULL) {
- mask->config_assign_mask[i] = config & 0x0FFFFFFUL;
return 0;
}
}
@@ -249,66 +345,20 @@ static int df_v3_6_pmc_assign_cntr(struct amdgpu_device *adev,
static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev,
uint64_t config)
{
-
- struct df_v3_6_event_mask *mask;
int target_cntr;
- target_cntr = -1;
-
- df_v3_6_pmc_config_2_cntr(adev, config, &target_cntr);
-
- mask = container_of(adev, struct df_v3_6_event_mask, gpu);
+ df_v3_6_pmc_config_2_cntr(adev->df_perfmon_config_assign_mask, config,
+ &target_cntr);
if (target_cntr >= 0)
- mask->config_assign_mask[target_cntr] = 0ULL;
-
+ adev->df_perfmon_config_assign_mask[target_cntr] = 0ULL;
}
-/*
- * get xgmi link counters via programmable data fabric (df) counters (max 4)
- * using cake tx event.
- *
- * @adev -> amdgpu device
- * @instance-> currently cake has 2 links to poll on vega20
- * @count -> counters to pass
- *
- */
-static void df_v3_6_get_xgmi_link_cntr(struct amdgpu_device *adev,
- int instance,
- uint64_t *count)
-{
- uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
- uint64_t config;
-
- config = GET_INSTANCE_CONFIG(instance);
-
- df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
- &hi_base_addr);
-
- if ((lo_base_addr == 0) || (hi_base_addr == 0))
- return;
-
- lo_val = RREG32_PCIE(lo_base_addr);
- hi_val = RREG32_PCIE(hi_base_addr);
-
- *count = ((hi_val | 0ULL) << 32) | (lo_val | 0ULL);
-}
-
-/*
- * reset xgmi link counters
- *
- * @adev -> amdgpu device
- * @instance-> currently cake has 2 links to poll on vega20
- *
- */
-static void df_v3_6_reset_xgmi_link_cntr(struct amdgpu_device *adev,
- int instance)
+static void df_v3_6_reset_perfmon_cntr(struct amdgpu_device *adev,
+ uint64_t config)
{
uint32_t lo_base_addr, hi_base_addr;
- uint64_t config;
-
- config = 0ULL | (0x7ULL) | ((0x46ULL + instance) << 8) | (0x2 << 16);
df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
&hi_base_addr);
@@ -320,185 +370,100 @@ static void df_v3_6_reset_xgmi_link_cntr(struct amdgpu_device *adev,
WREG32_PCIE(hi_base_addr, 0UL);
}
-/*
- * add xgmi link counters
- *
- * @adev -> amdgpu device
- * @instance-> currently cake has 2 links to poll on vega20
- *
- */
-static int df_v3_6_add_xgmi_link_cntr(struct amdgpu_device *adev,
- int instance)
+static int df_v3_6_add_perfmon_cntr(struct amdgpu_device *adev,
+ uint64_t config)
{
uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
- uint64_t config;
int ret, is_assigned;
- if (instance < 0 || instance > 1)
- return -EINVAL;
-
- config = GET_INSTANCE_CONFIG(instance);
-
ret = df_v3_6_pmc_assign_cntr(adev, config, &is_assigned);
if (ret || is_assigned)
return ret;
- df_v3_6_pmc_get_ctrl_settings(adev,
+ ret = df_v3_6_pmc_get_ctrl_settings(adev,
config,
&lo_base_addr,
&hi_base_addr,
&lo_val,
&hi_val);
+ if (ret)
+ return ret;
+
WREG32_PCIE(lo_base_addr, lo_val);
WREG32_PCIE(hi_base_addr, hi_val);
return ret;
}
-
-/*
- * start xgmi link counters
- *
- * @adev -> amdgpu device
- * @instance-> currently cake has 2 links to poll on vega20
- * @is_enable -> either resume or assign event via df perfmon
- *
- */
-
-static int df_v3_6_start_xgmi_link_cntr(struct amdgpu_device *adev,
- int instance,
- int is_enable)
+static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
+ int is_enable)
{
uint32_t lo_base_addr, hi_base_addr, lo_val;
- uint64_t config;
- int ret;
+ int ret = 0;
- if (instance < 0 || instance > 1)
- return -EINVAL;
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
- if (is_enable) {
+ df_v3_6_reset_perfmon_cntr(adev, config);
- ret = df_v3_6_add_xgmi_link_cntr(adev, instance);
+ if (is_enable) {
- if (ret)
- return ret;
+ ret = df_v3_6_add_perfmon_cntr(adev, config);
- } else {
-
- config = GET_INSTANCE_CONFIG(instance);
+ } else {
- df_v3_6_pmc_get_ctrl_settings(adev,
- config,
- &lo_base_addr,
- &hi_base_addr,
- NULL,
- NULL);
+ ret = df_v3_6_pmc_get_ctrl_settings(adev,
+ config,
+ &lo_base_addr,
+ &hi_base_addr,
+ NULL,
+ NULL);
- if (lo_base_addr == 0)
- return -EINVAL;
+ if (ret)
+ return ret;
- lo_val = RREG32_PCIE(lo_base_addr);
+ lo_val = RREG32_PCIE(lo_base_addr);
- WREG32_PCIE(lo_base_addr, lo_val | (1ULL << 22));
+ WREG32_PCIE(lo_base_addr, lo_val | (1ULL << 22));
+ }
- ret = 0;
+ break;
+ default:
+ break;
}
return ret;
-
}
-/*
- * start xgmi link counters
- *
- * @adev -> amdgpu device
- * @instance-> currently cake has 2 links to poll on vega20
- * @is_enable -> either pause or unassign event via df perfmon
- *
- */
-
-static int df_v3_6_stop_xgmi_link_cntr(struct amdgpu_device *adev,
- int instance,
- int is_disable)
+static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
+ int is_disable)
{
-
uint32_t lo_base_addr, hi_base_addr, lo_val;
- uint64_t config;
-
- config = GET_INSTANCE_CONFIG(instance);
-
- if (is_disable) {
- df_v3_6_reset_xgmi_link_cntr(adev, instance);
- df_v3_6_pmc_release_cntr(adev, config);
- } else {
-
- df_v3_6_pmc_get_ctrl_settings(adev,
- config,
- &lo_base_addr,
- &hi_base_addr,
- NULL,
- NULL);
-
- if ((lo_base_addr == 0) || (hi_base_addr == 0))
- return -EINVAL;
-
- lo_val = RREG32_PCIE(lo_base_addr);
-
- WREG32_PCIE(lo_base_addr, lo_val & ~(1ULL << 22));
- }
-
- return 0;
-}
-
-static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
- int is_enable)
-{
- int xgmi_tx_link, ret = 0;
+ int ret = 0;
switch (adev->asic_type) {
case CHIP_VEGA20:
- xgmi_tx_link = IS_DF_XGMI_0_TX(config) ? 0
- : (IS_DF_XGMI_1_TX(config) ? 1 : -1);
-
- if (xgmi_tx_link >= 0)
- ret = df_v3_6_start_xgmi_link_cntr(adev, xgmi_tx_link,
- is_enable);
+ ret = df_v3_6_pmc_get_ctrl_settings(adev,
+ config,
+ &lo_base_addr,
+ &hi_base_addr,
+ NULL,
+ NULL);
if (ret)
return ret;
- ret = 0;
- break;
- default:
- break;
- }
+ lo_val = RREG32_PCIE(lo_base_addr);
- return ret;
-}
+ WREG32_PCIE(lo_base_addr, lo_val & ~(1ULL << 22));
-static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
- int is_disable)
-{
- int xgmi_tx_link, ret = 0;
+ if (is_disable)
+ df_v3_6_pmc_release_cntr(adev, config);
- switch (adev->asic_type) {
- case CHIP_VEGA20:
- xgmi_tx_link = IS_DF_XGMI_0_TX(config) ? 0
- : (IS_DF_XGMI_1_TX(config) ? 1 : -1);
-
- if (xgmi_tx_link >= 0) {
- ret = df_v3_6_stop_xgmi_link_cntr(adev,
- xgmi_tx_link,
- is_disable);
- if (ret)
- return ret;
- }
-
- ret = 0;
- break;
+ break;
default:
break;
}
@@ -510,28 +475,36 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
uint64_t config,
uint64_t *count)
{
-
- int xgmi_tx_link;
+ uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
+ *count = 0;
switch (adev->asic_type) {
case CHIP_VEGA20:
- xgmi_tx_link = IS_DF_XGMI_0_TX(config) ? 0
- : (IS_DF_XGMI_1_TX(config) ? 1 : -1);
- if (xgmi_tx_link >= 0) {
- df_v3_6_reset_xgmi_link_cntr(adev, xgmi_tx_link);
- df_v3_6_get_xgmi_link_cntr(adev, xgmi_tx_link, count);
- }
+ df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
+ &hi_base_addr);
+
+ if ((lo_base_addr == 0) || (hi_base_addr == 0))
+ return;
+
+ lo_val = RREG32_PCIE(lo_base_addr);
+ hi_val = RREG32_PCIE(hi_base_addr);
+
+ *count = ((hi_val | 0ULL) << 32) | (lo_val | 0ULL);
+
+ if (*count >= DF_V3_6_PERFMON_OVERFLOW)
+ *count = 0;
break;
+
default:
break;
}
-
}
const struct amdgpu_df_funcs df_v3_6_funcs = {
.init = df_v3_6_init,
+ .fini = df_v3_6_fini,
.enable_broadcast_mode = df_v3_6_enable_broadcast_mode,
.get_fb_channel_number = df_v3_6_get_fb_channel_number,
.get_hbm_channel_number = df_v3_6_get_hbm_channel_number,
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.h b/drivers/gpu/drm/amd/amdgpu/df_v3_6.h
index fcffd807764d..76998541bc30 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.h
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.h
@@ -36,22 +36,15 @@ enum DF_V3_6_MGCG {
};
/* Defined in global_features.h as FTI_PERFMON_VISIBLE */
-#define AMDGPU_DF_MAX_COUNTERS 4
+#define DF_V3_6_MAX_COUNTERS 4
/* get flags from df perfmon config */
-#define GET_EVENT(x) (x & 0xFFUL)
-#define GET_INSTANCE(x) ((x >> 8) & 0xFFUL)
-#define GET_UNITMASK(x) ((x >> 16) & 0xFFUL)
-#define GET_INSTANCE_CONFIG(x) (0ULL | (0x07ULL) \
- | ((0x046ULL + x) << 8) \
- | (0x02 << 16))
-
-/* df event conf macros */
-#define IS_DF_XGMI_0_TX(x) (GET_EVENT(x) == 0x7 \
- && GET_INSTANCE(x) == 0x46 && GET_UNITMASK(x) == 0x2)
-#define IS_DF_XGMI_1_TX(x) (GET_EVENT(x) == 0x7 \
- && GET_INSTANCE(x) == 0x47 && GET_UNITMASK(x) == 0x2)
+#define DF_V3_6_GET_EVENT(x) (x & 0xFFUL)
+#define DF_V3_6_GET_INSTANCE(x) ((x >> 8) & 0xFFUL)
+#define DF_V3_6_GET_UNITMASK(x) ((x >> 16) & 0xFFUL)
+#define DF_V3_6_PERFMON_OVERFLOW 0xFFFFFFFFFFFFULL
+extern const struct attribute_group *df_v3_6_attr_groups[];
extern const struct amdgpu_df_funcs df_v3_6_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 78bd4fc07bab..4e746f97cc8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -1079,6 +1079,7 @@ static int soc15_common_hw_init(void *handle)
*/
if (adev->nbio_funcs->remap_hdp_registers)
adev->nbio_funcs->remap_hdp_registers(adev);
+ adev->df_funcs->init(adev);
/* enable the doorbell aperture */
soc15_enable_doorbell_aperture(adev, true);
/* HW doorbell routing policy: doorbell writing not
@@ -1100,6 +1101,8 @@ static int soc15_common_hw_fini(void *handle)
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_put_irq(adev);
+ adev->df_funcs->fini(adev);
+
return 0;
}
--
2.17.1
More information about the amd-gfx
mailing list