[PATCH] drm/amdgpu: xgmi counters via perfevent
Kim, Jonathan
Jonathan.Kim at amd.com
Wed Apr 17 18:27:45 UTC 2019
get xgmi counters through perf_event.h.
added files:
amdgpu_pmu.c/h - handles perf event logic (e.g. start/stop/add/delete)
amdgpu_df_pmc.c/h - xgmi counter setup logic via DF PerfmonCtr
modified files:
soc15.c - added xgmi specific r/w functions to setup/read DF perfmonCtr
amdgpu_device.c - added init call for amdgpu_pmu
amdgpu.h - added macros for xgmi perf functions in soc15.c
Makefile - added call to build amdgpu_pmu and amdgpu_df_pmc object files
validated on Vega20 D16302 2P system.
Change-Id: I5ce94953b0a7919684c753d3006f0d44963b6bb5
Signed-off-by: Jonathan Kim <jonathan.kim at amd.com>
---
drivers/gpu/drm/amd/amdgpu/Makefile | 2 +-
drivers/gpu/drm/amd/amdgpu/amdgpu.h | 17 +
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 5 +
drivers/gpu/drm/amd/amdgpu/amdgpu_df_pmc.c | 170 +++++++++
drivers/gpu/drm/amd/amdgpu/amdgpu_df_pmc.h | 65 ++++
drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c | 383 +++++++++++++++++++++
drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.h | 37 ++
drivers/gpu/drm/amd/amdgpu/soc15.c | 159 ++++++++-
8 files changed, 829 insertions(+), 9 deletions(-)
create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_df_pmc.c
create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_df_pmc.h
create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.h
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 7d539ba6400d..29876219d9d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -54,7 +54,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
- amdgpu_vm_sdma.o
+ amdgpu_vm_sdma.o amdgpu_df_pmc.o amdgpu_pmu.o
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index bc96ec45cd96..c63075d3a5a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -558,6 +558,19 @@ struct amdgpu_asic_funcs {
uint64_t *count1);
/* do we need to reset the asic at init time (e.g., kexec) */
bool (*need_reset_on_init)(struct amdgpu_device *adev);
+
+ /* xGMI link utilization */
+ void (*get_xgmi_link_cntr)(struct amdgpu_device *adev,
+ int instance,
+ uint64_t *count);
+ int (*start_xgmi_link_cntr)(struct amdgpu_device *adev,
+ int instance,
+ int is_enable);
+ int (*stop_xgmi_link_cntr)(struct amdgpu_device *adev,
+ int instance,
+ int is_disable);
+ void (*reset_xgmi_link_cntr)(struct amdgpu_device *adev,
+ int instance);
};
/*
@@ -1068,6 +1081,10 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
#define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1)))
#define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev))
+#define amdgpu_asic_get_xgmi_link_cntr(adev, instance, count) ((adev)->asic_funcs->get_xgmi_link_cntr((adev), (instance), (count)))
+#define amdgpu_asic_start_xgmi_link_cntr(adev, instance, is_enable) ((adev)->asic_funcs->start_xgmi_link_cntr((adev), (instance), (is_enable)))
+#define amdgpu_asic_stop_xgmi_link_cntr(adev, instance, is_disable) ((adev)->asic_funcs->stop_xgmi_link_cntr((adev), (instance), (is_disable)))
+#define amdgpu_asic_reset_xgmi_link_cntr(adev, instance) ((adev)->asic_funcs->reset_xgmi_link_cntr((adev), (instance)))
/* Common functions */
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index d46675b89886..5470f8e908db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -61,6 +61,7 @@
#include "amdgpu_xgmi.h"
#include "amdgpu_ras.h"
+#include "amdgpu_pmu.h"
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
@@ -2711,6 +2712,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
goto failed;
}
+ r = amdgpu_pmu_init(adev);
+ if (r)
+ dev_err(adev->dev, "amdgpu_pmu_init failed\n");
+
/* must succeed. */
amdgpu_ras_post_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_df_pmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_df_pmc.c
new file mode 100644
index 000000000000..f10c6c03f876
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_df_pmc.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Jonathan Kim <jonathan.kim at amd.com>
+ *
+ */
+
+#include <linux/perf_event.h>
+#include <linux/init.h>
+#include "amdgpu.h"
+#include "amdgpu_df_pmc.h"
+
+
+/* hold counter assignment per gpu struct */
+struct event_mask {
+ struct amdgpu_device gpu;
+ uint64_t config_assign_mask[AMDGPU_DF_MAX_COUNTERS];
+};
+
+/* get assigned df perfmon ctr as int */
+static void df_pmc_config_2_cntr(struct amdgpu_device *adev,
+ uint64_t config,
+ int *counter)
+{
+ struct event_mask *mask;
+ mask = container_of(adev, struct event_mask, gpu);
+
+ int i;
+ for (i = 0; i < AMDGPU_DF_MAX_COUNTERS; i++) {
+ if ((config & 0x0FFFFFFUL) == mask->config_assign_mask[i]) {
+ *counter = i;
+ return;
+ }
+ }
+}
+
+/* get address based on counter assignment */
+static void df_pmc_get_addr(struct amdgpu_device *adev,
+ uint64_t config,
+ int is_ctrl,
+ uint32_t *lo_base_addr,
+ uint32_t *hi_base_addr)
+{
+ pr_info("DF PMC ADDR config = %x\n", config);
+ int target_cntr = -1;
+ df_pmc_config_2_cntr(adev, config, &target_cntr);
+
+ if (target_cntr < 0)
+ return;
+
+ *lo_base_addr = is_ctrl ? VEGA20_DF_PERFMONCTR_CTRL_BASE
+ : VEGA20_DF_PERFMONCTR_READ_BASE;
+ *hi_base_addr = *lo_base_addr + VEGA20_DF_PERFMONCTR_HI_OFFSET;
+
+ int i;
+ for (i = 0; i < target_cntr; i++) {
+ *lo_base_addr += VEGA20_DF_PERFMONCTR_BASE_OFFSET;
+ *hi_base_addr = *lo_base_addr + VEGA20_DF_PERFMONCTR_HI_OFFSET;
+ }
+ pr_info("DF PMC ADDR lo = %x, hi = %x\n", *lo_base_addr, *hi_base_addr);
+
+}
+
+/* get read counter address */
+void df_pmc_get_read_settings(struct amdgpu_device *adev,
+ uint64_t config,
+ uint32_t *lo_base_addr,
+ uint32_t *hi_base_addr)
+{
+ df_pmc_get_addr(adev, config, 0, lo_base_addr, hi_base_addr);
+}
+
+/* get control counter settings i.e. address and values to set */
+void df_pmc_get_ctrl_settings(struct amdgpu_device *adev,
+ uint64_t config,
+ uint32_t *lo_base_addr,
+ uint32_t *hi_base_addr,
+ uint32_t *lo_val,
+ uint32_t *hi_val)
+{
+ df_pmc_get_addr(adev, config, 1, lo_base_addr, hi_base_addr);
+
+ if ((*lo_base_addr == 0) | (*hi_base_addr == 0))
+ DRM_ERROR("DF PMC addressing not retrived! Lo: %x, Hi: %x",
+ *lo_base_addr, *hi_base_addr);
+ return;
+
+ uint32_t eventsel, instance, unitmask, enable;
+ uint32_t es_5_0, es_13_0, es_13_6, es_13_12, es_11_8, es_7_0;
+
+ eventsel = GET_EVENT(config);
+ instance = GET_INSTANCE(config);
+ unitmask = GET_UNITMASK(config);
+ enable = GET_ENABLE(config);
+
+ es_5_0 = eventsel & 0x3FUL;
+ es_13_6 = instance;
+ es_13_0 = (es_13_6 << 6) + es_5_0;
+ es_13_12 = (es_13_0 & 0x03000UL) >> 12;
+ es_11_8 = (es_13_0 & 0x0F00UL) >> 8;
+ es_7_0 = es_13_0 & 0x0FFUL;
+ *lo_val = (es_7_0 & 0xFFUL) | ((unitmask & 0x0FUL) << 8);
+ *lo_val = enable ? *lo_val | (0x1UL << 22) : *lo_val & ~(0x1UL << 22);
+ *hi_val = (es_11_8 | ((es_13_12)<<(29)));
+}
+
+/* assign df performance counters for read */
+int df_pmc_assign_cntr(struct amdgpu_device *adev,
+ uint64_t config,
+ int *is_assigned)
+{
+
+ int target_cntr = -1;
+ *is_assigned = 0;
+
+ df_pmc_config_2_cntr(adev, config, &target_cntr);
+
+ if (target_cntr >= 0) {
+ *is_assigned = 1;
+ return 0;
+ }
+
+ struct event_mask *mask;
+ mask = container_of(adev, struct event_mask, gpu);
+
+ int i;
+ for (i = 0; i < AMDGPU_DF_MAX_COUNTERS; i++) {
+ if (mask->config_assign_mask[i] == 0ULL) {
+ mask->config_assign_mask[i] = config & 0x0FFFFFFUL;
+ return 0;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+/* release performance counter */
+void df_pmc_release_cntr(struct amdgpu_device *adev, uint64_t config)
+{
+
+ int target_cntr;
+ target_cntr = -1;
+
+ df_pmc_config_2_cntr(adev, config, &target_cntr);
+
+ struct event_mask *mask;
+ mask = container_of(adev, struct event_mask, gpu);
+
+ if (target_cntr >= 0)
+ mask->config_assign_mask[target_cntr] = 0ULL;
+
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_df_pmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_df_pmc.h
new file mode 100644
index 000000000000..d0699c0b7a26
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_df_pmc.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Jonathan Kim <jonathan.kim at amd.com>
+ *
+ */
+
+#ifndef _AMDGPU_DF_PMC_H_
+#define _AMDGPU_DF_PMC_H_
+
+/* Performance Counters Registers for Vega20 */
+#define VEGA20_DF_PERFMONCTR_CTRL_BASE 0x01d440UL
+#define VEGA20_DF_PERFMONCTR_READ_BASE 0x01d448UL
+#define VEGA20_DF_PERFMONCTR_BASE_OFFSET 0x010UL
+#define VEGA20_DF_PERFMONCTR_HI_OFFSET 0x04UL
+
+/* Defined in global_features.h as FTI_PERFMON_VISIBLE */
+#define AMDGPU_DF_MAX_COUNTERS 4
+
+/* get flags from df perfmon config */
+#define GET_EVENT(x) (x & 0xFFUL)
+#define GET_INSTANCE(x) ((x >> 8) & 0xFFUL)
+#define GET_UNITMASK(x) ((x >> 16) & 0xFFUL)
+#define GET_ENABLE(x) ((x >> 24) & 0xFFUL)
+
+/* df event conf macros */
+#define IS_DF_XGMI_0_TX(x) (GET_EVENT(x) == 0x7 && GET_INSTANCE(x) == 0x46 && GET_UNITMASK(x) == 0x2)
+#define IS_DF_XGMI_1_TX(x) (GET_EVENT(x) == 0x7 && GET_INSTANCE(x) == 0x47 && GET_UNITMASK(x) == 0x2)
+
+
+void df_pmc_get_read_settings(struct amdgpu_device *adev,
+ uint64_t config,
+ uint32_t *lo_base_addr,
+ uint32_t *hi_base_addr);
+void df_pmc_get_ctrl_settings(struct amdgpu_device *adev,
+ uint64_t config,
+ uint32_t *lo_base_addr,
+ uint32_t *hi_base_addr,
+ uint32_t *lo_val,
+ uint32_t *hi_val);
+int df_pmc_assign_cntr(struct amdgpu_device *adev,
+ uint64_t config,
+ int *is_assigned);
+void df_pmc_release_cntr(struct amdgpu_device *adev,
+ uint64_t config);
+
+#endif /* _AMDGPU_DF_PMC_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
new file mode 100644
index 000000000000..3383c7d29a4a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
@@ -0,0 +1,383 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Jonathan Kim <jonathan.kim at amd.com>
+ *
+ */
+
+#define pr_fmt(fmt) "perf/amdgpu_pmu: " fmt
+
+#include <linux/perf_event.h>
+#include <linux/init.h>
+#include <linux/cpumask.h>
+#include <linux/slab.h>
+#include "amdgpu.h"
+#include "amdgpu_pmu.h"
+#include "amdgpu_df_pmc.h"
+
+#define PMU_NAME_SIZE 32
+
+static int amdgpu_pmu_node_count[PERF_TYPE_AMDGPU_MAX];
+
+struct amdgpu_perf_status {
+ struct list_head list;
+ struct pmu pmu;
+ struct amdgpu_device *gpu;
+ int node_idx;
+ char name[PMU_NAME_SIZE];
+ u8 max_counters;
+ uint64_t cntr_assign_mask;
+ raw_spinlock_t lock;
+};
+
+static LIST_HEAD(amdgpu_perf_status_list);
+
+
+/*---------------------------------------------
+ * sysfs format attributes
+ *---------------------------------------------*/
+
+PMU_FORMAT_ATTR(df_event, "config:0-7");
+PMU_FORMAT_ATTR(df_instance, "config:8-15");
+PMU_FORMAT_ATTR(df_unitmask, "config:16-24");
+
+static struct attribute *amdgpu_pmu_format_attrs[] = {
+ &format_attr_df_event.attr,
+ &format_attr_df_instance.attr,
+ &format_attr_df_unitmask.attr,
+ NULL,
+};
+
+
+static struct attribute_group amdgpu_pmu_format_group = {
+ .name = "format",
+ .attrs = amdgpu_pmu_format_attrs,
+};
+
+
+/*---------------------------------------------
+ * sysfs events attributes
+ *---------------------------------------------*/
+
+
+static struct attribute_group amdgpu_pmu_events_group = {
+ .name = "events",
+};
+
+struct AMDGPU_PMU_EVENT_DESC {
+ struct kobj_attribute attr;
+ const char *event;
+};
+
+static ssize_t _pmu_event_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct AMDGPU_PMU_EVENT_DESC *event =
+ container_of(attr, struct AMDGPU_PMU_EVENT_DESC, attr);
+ return sprintf(buf, "%s\n", event->event);
+}
+
+#define AMDGPU_PMU_EVENT_DESC(_name, _event) \
+{ \
+ .attr = __ATTR(_name, 0444, _pmu_event_show, NULL), \
+ .event = _event, \
+}
+
+
+/* DF event descriptors */
+static struct AMDGPU_PMU_EVENT_DESC amdgpu_vega20_df_event_descs[] = {
+ AMDGPU_PMU_EVENT_DESC(cake0_tx, "df_event=0x7,df_instance=0x46,df_unitmask=0x2"),
+ AMDGPU_PMU_EVENT_DESC(cake1_tx, "df_event=0x7,df_instance=0x47,df_unitmask=0x2"),
+ { /* end: all zeroes */ },
+};
+
+
+/* Initialize PMU */
+static int amdgpu_perf_event_init(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ /* test the event attr type check for PMU enumeration */
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* update the hw_perf_event struct with config data */
+ hwc->conf = event->attr.config;
+
+ return 0;
+}
+
+/* Start PMU */
+static void amdgpu_perf_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int xgmi_tx_link;
+ struct amdgpu_device *gpu = container_of(event->pmu,
+ struct amdgpu_perf_status, pmu)->gpu;
+
+ if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
+ return;
+
+ WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+ hwc->state = 0;
+
+ switch (event->pmu->type) {
+ case PERF_TYPE_AMDGPU_DF:
+ xgmi_tx_link = IS_DF_XGMI_0_TX(hwc->conf) ? 0
+ : (IS_DF_XGMI_1_TX(hwc->conf) ? 1 : -1);
+
+ if (xgmi_tx_link >= 0) {
+ int is_enable = (flags & PERF_EF_RELOAD) ? 0 : 1;
+ if (is_enable) // if not reload then enable
+ amdgpu_asic_start_xgmi_link_cntr(gpu, xgmi_tx_link, is_enable);
+ }
+
+ break;
+ }
+
+ perf_event_update_userpage(event);
+
+}
+
+/* Read PMU */
+static void amdgpu_perf_read(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int xgmi_tx_link;
+ struct amdgpu_device *gpu = container_of(event->pmu,
+ struct amdgpu_perf_status, pmu)->gpu;
+
+ u64 count, prev;
+
+ switch (event->pmu->type) {
+ case PERF_TYPE_AMDGPU_DF:
+ xgmi_tx_link = IS_DF_XGMI_0_TX(hwc->conf) ? 0
+ : (IS_DF_XGMI_1_TX(hwc->conf) ? 1 : -1);
+
+ if (xgmi_tx_link >= 0)
+ amdgpu_asic_get_xgmi_link_cntr(gpu, xgmi_tx_link, &count);
+
+ break;
+ }
+
+ prev = local64_read(&hwc->prev_count);
+ if (local64_cmpxchg(&hwc->prev_count, prev, count) != prev)
+ return;
+
+ local64_add(count - prev, &event->count);
+}
+
+/* Stop PMU */
+static void amdgpu_perf_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int xgmi_tx_link;
+ struct amdgpu_device *gpu = container_of(event->pmu,
+ struct amdgpu_perf_status, pmu)->gpu;
+
+ if (hwc->state & PERF_HES_UPTODATE)
+ return;
+
+ switch (event->pmu->type) {
+ case PERF_TYPE_AMDGPU_DF:
+ xgmi_tx_link = IS_DF_XGMI_0_TX(hwc->conf) ? 0
+ : (IS_DF_XGMI_1_TX(hwc->conf) ? 1 : -1);
+
+ if (xgmi_tx_link >= 0)
+ amdgpu_asic_stop_xgmi_link_cntr(gpu, xgmi_tx_link, 1);
+
+ break;
+ }
+
+ WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+ hwc->state |= PERF_HES_STOPPED;
+
+ if (hwc->state & PERF_HES_UPTODATE)
+ return;
+
+ amdgpu_perf_read(event);
+ hwc->state |= PERF_HES_UPTODATE;
+}
+
+/* Add PMU */
+static int amdgpu_perf_add(struct perf_event *event, int flags)
+{
+
+ struct hw_perf_event *hwc = &event->hw;
+ int retval;
+ int xgmi_tx_link;
+
+ struct amdgpu_device *gpu = container_of(event->pmu,
+ struct amdgpu_perf_status, pmu)->gpu;
+
+ event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+ switch (event->pmu->type) {
+ case PERF_TYPE_AMDGPU_DF:
+ xgmi_tx_link = IS_DF_XGMI_0_TX(hwc->conf) ? 0
+ : (IS_DF_XGMI_1_TX(hwc->conf) ? 1 : -1);
+
+ if (xgmi_tx_link >= 0) {
+ amdgpu_asic_reset_xgmi_link_cntr(gpu, xgmi_tx_link);
+ retval = amdgpu_asic_start_xgmi_link_cntr(gpu, xgmi_tx_link, 1);
+ }
+
+ break;
+ }
+
+ if (retval)
+ return retval;
+
+ if (flags & PERF_EF_START)
+ amdgpu_perf_start(event, PERF_EF_RELOAD);
+
+ return retval;
+
+}
+
+/* Delete PMU */
+static void amdgpu_perf_del(struct perf_event *event, int flags)
+{
+ amdgpu_perf_stop(event, PERF_EF_UPDATE);
+ perf_event_update_userpage(event);
+}
+
+/* Initialize SYSFS attributes for AMDGPU PMU */
+static int init_events_attrs(struct amdgpu_device *adev)
+{
+
+ int i = 0, j;
+ struct attribute **attrs;
+
+ switch (adev->asic_type) {
+
+ case CHIP_VEGA20:
+ while (amdgpu_vega20_df_event_descs[i].attr.attr.name)
+ i++;
+ break;
+
+ }
+
+ attrs = kcalloc(i + 1, sizeof(struct attribute **), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
+
+ switch (adev->asic_type) {
+
+ case CHIP_VEGA20:
+ for (j = 0; j < i; j++)
+ attrs[j] = &amdgpu_vega20_df_event_descs[j].attr.attr;
+ break;
+ }
+
+ amdgpu_pmu_events_group.attrs = attrs;
+ return 0;
+}
+
+const struct attribute_group *amdgpu_pmu_attr_groups[] = {
+ &amdgpu_pmu_format_group,
+ &amdgpu_pmu_events_group,
+ NULL,
+};
+
+
+static const struct pmu amdgpu_pmu __initconst = {
+ .event_init = amdgpu_perf_event_init,
+ .add = amdgpu_perf_add,
+ .del = amdgpu_perf_del,
+ .start = amdgpu_perf_start,
+ .stop = amdgpu_perf_stop,
+ .read = amdgpu_perf_read,
+ .task_ctx_nr = perf_invalid_context,
+ .attr_groups = amdgpu_pmu_attr_groups,
+};
+
+/* Initialize Data Fabric PMU */
+static int init_df_pmu(struct amdgpu_device *adev)
+{
+ struct amdgpu_perf_status *perf_status;
+ struct cntr_assign *c_assign;
+ int ret;
+
+ perf_status = kzalloc(sizeof(struct amdgpu_perf_status), GFP_KERNEL);
+ if (!perf_status)
+ return -ENOMEM;
+
+ raw_spin_lock_init(&perf_status->lock);
+
+ // initialize pmu and counters assignments
+ perf_status->pmu = amdgpu_pmu;
+ perf_status->gpu = adev;
+ perf_status->max_counters = AMDGPU_DF_MAX_COUNTERS;
+ perf_status->node_idx = amdgpu_pmu_node_count[PERF_TYPE_AMDGPU_DF];
+
+ snprintf(perf_status->name, PMU_NAME_SIZE, "amdgpu_df_%d",
+ amdgpu_pmu_node_count[PERF_TYPE_AMDGPU_DF]);
+
+ amdgpu_pmu_node_count[PERF_TYPE_AMDGPU_DF]++;
+
+ ret = perf_pmu_register(&perf_status->pmu, perf_status->name, PERF_TYPE_AMDGPU_DF);
+ if (!ret) {
+ pr_info("Detected AMDGPU DF Counters. # of Counters = %d.\n",
+ perf_status->max_counters);
+ list_add_tail(&perf_status->list, &amdgpu_perf_status_list);
+ } else {
+ pr_warn("Error initializing AMDGPU DF PMUs.\n");
+ kfree(perf_status);
+ }
+
+ return ret;
+}
+
+/* Initialize all PMU Blocks - Only DF for now */
+static int init_pmu(struct amdgpu_device *adev)
+{
+ int retval;
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ case CHIP_VEGA20:
+ retval = init_df_pmu(adev);
+ break;
+ }
+ return retval;
+}
+
+
+/* initialize AMDGPU PMU */
+int amdgpu_pmu_init(struct amdgpu_device *adev)
+{
+
+ int ret;
+ ret = init_events_attrs(adev);
+ if (ret)
+ return ret;
+
+ ret = init_pmu(adev);
+
+ if (ret) {
+ kfree(amdgpu_pmu_events_group.attrs);
+ return -ENODEV;
+ }
+
+ return ret;
+}
+
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.h
new file mode 100644
index 000000000000..d070d9e252ff
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Jonathan Kim <jonathan.kim at amd.com>
+ *
+ */
+
+#ifndef _AMDGPU_PMU_H_
+#define _AMDGPU_PMU_H_
+
+enum amdgpu_pmu_perf_type {
+ PERF_TYPE_AMDGPU_DF = 0,
+ PERF_TYPE_AMDGPU_MAX
+};
+
+
+int amdgpu_pmu_init(struct amdgpu_device *adev);
+
+#endif /* _AMDGPU_PMU_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index a136632bf91c..0153724b63f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -64,6 +64,8 @@
#include "dce_virtual.h"
#include "mxgpu_ai.h"
#include "amdgpu_smu.h"
+#include "amdgpu_df_pmc.h"
+#include "amdgpu_pmu.h"
#define mmMP0_MISC_CGTT_CTRL0 0x01b9
#define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
@@ -222,7 +224,7 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev)
void soc15_grbm_select(struct amdgpu_device *adev,
- u32 me, u32 pipe, u32 queue, u32 vmid)
+ u32 me, u32 pipe, u32 queue, u32 vmid)
{
u32 grbm_gfx_cntl = 0;
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
@@ -245,7 +247,7 @@ static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
}
static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
- u8 *bios, u32 length_bytes)
+ u8 *bios, u32 length_bytes)
{
u32 *dw_ptr;
u32 i, length_dw;
@@ -334,7 +336,7 @@ static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
}
static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
- u32 sh_num, u32 reg_offset, u32 *value)
+ u32 sh_num, u32 reg_offset, u32 *value)
{
uint32_t i;
struct soc15_allowed_register_entry *en;
@@ -367,8 +369,8 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
*/
void soc15_program_register_sequence(struct amdgpu_device *adev,
- const struct soc15_reg_golden *regs,
- const u32 array_size)
+ const struct soc15_reg_golden *regs,
+ const u32 array_size)
{
const struct soc15_reg_golden *entry;
u32 tmp, reg;
@@ -743,6 +745,143 @@ static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
return false;
}
+/*
+* get xgmi link counters via programmable data fabric (df) counters (max 4)
+* using cake tx event.
+*
+* @adev -> amdgpu device
+* @instance-> currently cake has 2 links to poll on vega20
+* @count -> counters to pass
+*
+*/
+
+static void vega20_get_xgmi_link_cntr(struct amdgpu_device *adev,
+ int instance,
+ uint64_t *count)
+{
+ uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
+ uint64_t config;
+
+ config = 0ULL | (0x7ULL) | ((0x46ULL + instance) << 8) | (0x2 << 16);
+
+ df_pmc_get_read_settings(adev, config, &lo_base_addr, &hi_base_addr);
+
+ if ((lo_base_addr == 0UL) | (hi_base_addr == 0UL))
+ return;
+
+ lo_val = RREG32_PCIE(lo_base_addr);
+ hi_val = RREG32_PCIE(hi_base_addr);
+
+ *count = ((hi_val | 0ULL) << 32) | (lo_val | 0ULL);
+}
+
+/*
+* reset xgmi link counters
+*
+* @adev -> amdgpu device
+* @instance-> currently cake has 2 links to poll on vega20
+*
+*/
+static void vega20_reset_xgmi_link_cntr(struct amdgpu_device *adev,
+ int instance)
+{
+ uint32_t lo_base_addr, hi_base_addr;
+ uint64_t config;
+
+ config = 0ULL | (0x7ULL) | ((0x46ULL + instance) << 8) | (0x2 << 16);
+
+ df_pmc_get_read_settings(adev, config, &lo_base_addr, &hi_base_addr);
+
+ if ((lo_base_addr == 0UL) | (hi_base_addr == 0UL))
+ return;
+
+ WREG32_PCIE(lo_base_addr, 0UL);
+ WREG32_PCIE(hi_base_addr, 0UL);
+}
+
+/*
+* start xgmi link counters
+*
+* @adev -> amdgpu device
+* @instance-> currently cake has 2 links to poll on vega20
+* @is_enable -> either resume or assign event via df perfmon
+*
+*/
+
+static int vega20_start_xgmi_link_cntr(struct amdgpu_device *adev,
+ int instance, int is_enable)
+{
+ int ret, is_assigned;
+ uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
+ uint64_t config;
+
+ if (instance < 0 || instance > 1)
+ return -EINVAL;
+
+ config = 0ULL | (0x07ULL) | ((0x046ULL + instance) << 8)
+ | (0x02 << 16) | (0x1ULL << 24);
+
+ if (is_enable) {
+ ret = df_pmc_assign_cntr(adev, config, &is_assigned);
+ if (ret | is_assigned)
+ return ret;
+ }
+
+ df_pmc_get_ctrl_settings(adev,
+ config,
+ &lo_base_addr,
+ &hi_base_addr,
+ &lo_val,
+ &hi_val);
+
+ WREG32_PCIE(lo_base_addr, lo_val);
+ WREG32_PCIE(hi_base_addr, hi_val);
+
+ return ret;
+}
+
+/*
+* start xgmi link counters
+*
+* @adev -> amdgpu device
+* @instance-> currently cake has 2 links to poll on vega20
+* @is_enable -> either pause or unassign event via df perfmon
+*
+*/
+
+static int vega20_stop_xgmi_link_cntr(struct amdgpu_device *adev,
+ int instance, int is_disable)
+{
+
+ uint32_t lo_base_addr, hi_base_addr, lo_val;
+ uint64_t config;
+
+ config = 0ULL | (0x7ULL) | ((0x46ULL + instance) << 8)
+ | (0x2 << 16);
+
+ if (is_disable) {
+ vega20_reset_xgmi_link_cntr(adev, instance);
+ df_pmc_release_cntr(adev, config);
+ } else {
+
+ df_pmc_get_ctrl_settings(adev,
+ config,
+ &lo_base_addr,
+ &hi_base_addr,
+ NULL,
+ NULL);
+
+ if ((lo_base_addr == 0UL) | (hi_base_addr == 0UL))
+ return -EINVAL;
+
+ lo_val = RREG32_PCIE(lo_base_addr);
+
+ WREG32_PCIE(lo_base_addr, lo_val & ~(1ULL << 22));
+ }
+
+ return 0;
+}
+
static const struct amdgpu_asic_funcs soc15_asic_funcs =
{
.read_disabled_bios = &soc15_read_disabled_bios,
@@ -779,6 +918,10 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
.init_doorbell_index = &vega20_doorbell_index_init,
.get_pcie_usage = &soc15_get_pcie_usage,
.need_reset_on_init = &soc15_need_reset_on_init,
+ .get_xgmi_link_cntr = &vega20_get_xgmi_link_cntr,
+ .start_xgmi_link_cntr = &vega20_start_xgmi_link_cntr,
+ .stop_xgmi_link_cntr = &vega20_stop_xgmi_link_cntr,
+ .reset_xgmi_link_cntr = &vega20_reset_xgmi_link_cntr
};
static int soc15_common_early_init(void *handle)
@@ -1144,7 +1287,7 @@ static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable
}
static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
- bool enable)
+ bool enable)
{
uint32_t def, data;
@@ -1162,7 +1305,7 @@ static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *ade
}
static int soc15_common_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
+ enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1242,7 +1385,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
}
static int soc15_common_set_powergating_state(void *handle,
- enum amd_powergating_state state)
+ enum amd_powergating_state state)
{
/* todo */
return 0;
--
2.17.1
More information about the amd-gfx
mailing list