[PATCH] drm/amdgpu: add pmu counters
Alex Deucher
alexdeucher at gmail.com
Thu May 30 13:30:11 UTC 2019
On Wed, May 29, 2019 at 11:02 AM Kim, Jonathan <Jonathan.Kim at amd.com> wrote:
>
> add pmu counters to monitor amdgpu device performance.
> each pmu registered recorded per pmu type per asic type.
>
> Change-Id: I8449f4ea824c411ee24a5b783ac066189b9de08e
> Signed-off-by: Jonathan Kim <jonathan.kim at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/Makefile | 2 +-
> drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 5 +
> drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c | 394 +++++++++++++++++++++
> drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.h | 37 ++
> 4 files changed, 437 insertions(+), 1 deletion(-)
> create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
> create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.h
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
> index 11a651ff7f0d..90d4c5d299dd 100644
> --- a/drivers/gpu/drm/amd/amdgpu/Makefile
> +++ b/drivers/gpu/drm/amd/amdgpu/Makefile
> @@ -54,7 +54,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
> amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
> amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
> amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
> - amdgpu_vm_sdma.o
> + amdgpu_vm_sdma.o amdgpu_pmu.o
>
> # add asic specific block
> amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 582f5635fcb2..51f479b357a1 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -61,6 +61,7 @@
>
> #include "amdgpu_xgmi.h"
> #include "amdgpu_ras.h"
> +#include "amdgpu_pmu.h"
>
> MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
> MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
> @@ -2748,6 +2749,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
> goto failed;
> }
>
> + r = amdgpu_pmu_init(adev);
> + if (r)
> + dev_err(adev->dev, "amdgpu_pmu_init failed\n");
> +
> /* must succeed. */
> amdgpu_ras_resume(adev);
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
> new file mode 100644
> index 000000000000..39cff772dd9e
> --- /dev/null
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
> @@ -0,0 +1,394 @@
> +/*
> + * Copyright 2019 Advanced Micro Devices, Inc.
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice shall be included in
> + * all copies or substantial portions of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
> + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
> + * OTHER DEALINGS IN THE SOFTWARE.
> + *
> + * Author: Jonathan Kim <jonathan.kim at amd.com>
> + *
> + */
> +
> +#define pr_fmt(fmt) "perf/amdgpu_pmu: " fmt
> +
> +#include <linux/perf_event.h>
> +#include <linux/init.h>
> +#include <linux/cpumask.h>
> +#include <linux/slab.h>
> +#include "amdgpu.h"
> +#include "amdgpu_pmu.h"
> +#include "df_v3_6.h"
> +
> +#define PMU_NAME_SIZE 32
> +
> +/* record to keep track of pmu entry per pmu type per device */
> +struct amdgpu_pmu_entry {
> + struct amdgpu_device *adev;
> + struct pmu pmu;
> + unsigned int pmu_perf_type;
> +};
> +
> +PMU_FORMAT_ATTR(df_event, "config:0-7");
> +PMU_FORMAT_ATTR(df_instance, "config:8-15");
> +PMU_FORMAT_ATTR(df_unitmask, "config:16-23");
> +
> +/* df format attributes */
> +static struct attribute *amdgpu_df_format_attrs[] = {
> + &format_attr_df_event.attr,
> + &format_attr_df_instance.attr,
> + &format_attr_df_unitmask.attr,
> + NULL
> +};
> +
> +/* df format attribute group */
> +static struct attribute_group amdgpu_df_format_attr_group = {
> + .name = "format",
> + .attrs = amdgpu_df_format_attrs,
> +};
> +
> +/* df event attribute group */
> +static struct attribute_group amdgpu_df_events_attr_group = {
> + .name = "events",
> +};
> +
> +struct AMDGPU_PMU_EVENT_DESC {
> + struct kobj_attribute attr;
> + const char *event;
> +};
> +
> +static ssize_t _pmu_event_show(struct kobject *kobj,
> + struct kobj_attribute *attr, char *buf)
> +{
> + struct AMDGPU_PMU_EVENT_DESC *event =
> + container_of(attr, struct AMDGPU_PMU_EVENT_DESC, attr);
> + return sprintf(buf, "%s\n", event->event);
> +};
> +
> +#define AMDGPU_PMU_EVENT_DESC(_name, _event) \
> +{ \
> + .attr = __ATTR(_name, 0444, _pmu_event_show, NULL), \
> + .event = _event, \
> +}
> +
> +/* vega20 df events */
> +static struct AMDGPU_PMU_EVENT_DESC amdgpu_vega20_df_event_descs[] = {
> + AMDGPU_PMU_EVENT_DESC(cake0_pcsout_txdata,
> + "df_event=0x7,df_instance=0x46,df_unitmask=0x2"),
> + AMDGPU_PMU_EVENT_DESC(cake1_pcsout_txdata,
> + "df_event=0x7,df_instance=0x47,df_unitmask=0x2"),
> + AMDGPU_PMU_EVENT_DESC(cake0_pcsout_txmeta,
> + "df_event=0x7,df_instance=0x46,df_unitmask=0x4"),
> + AMDGPU_PMU_EVENT_DESC(cake1_pcsout_txmeta,
> + "df_event=0x7,df_instance=0x47,df_unitmask=0x4"),
> + AMDGPU_PMU_EVENT_DESC(cake0_ftiinstat_reqalloc,
> + "df_event=0xb,df_instance=0x46,df_unitmask=0x4"),
> + AMDGPU_PMU_EVENT_DESC(cake1_ftiinstat_reqalloc,
> + "df_event=0xb,df_instance=0x47,df_unitmask=0x4"),
> + AMDGPU_PMU_EVENT_DESC(cake0_ftiinstat_rspalloc,
> + "df_event=0xb,df_instance=0x46,df_unitmask=0x8"),
> + AMDGPU_PMU_EVENT_DESC(cake1_ftiinstat_rspalloc,
> + "df_event=0xb,df_instance=0x47,df_unitmask=0x8"),
> + { /* end with zeros */ },
> +};
> +
> +/* df attr group */
> +const struct attribute_group *amdgpu_df_attr_groups[] = {
> + &amdgpu_df_format_attr_group,
> + &amdgpu_df_events_attr_group,
> + NULL
> +};
> +
> +
> +/* initialize perf counter */
> +static int amdgpu_perf_event_init(struct perf_event *event)
> +{
> + struct hw_perf_event *hwc = &event->hw;
> +
> + /* test the event attr type check for PMU enumeration */
> + if (event->attr.type != event->pmu->type)
> + return -ENOENT;
> +
> + /* update the hw_perf_event struct with config data */
> + hwc->conf = event->attr.config;
> +
> + return 0;
> +}
> +
> +/* start perf counter */
> +static void amdgpu_perf_start(struct perf_event *event, int flags)
> +{
> + struct hw_perf_event *hwc = &event->hw;
> + struct amdgpu_pmu_entry *pe = container_of(event->pmu,
> + struct amdgpu_pmu_entry,
> + pmu);
> +
> + if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
> + return;
> +
> + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
> + hwc->state = 0;
> +
> + if (!(flags & PERF_EF_RELOAD))
> + pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 1);
> +
> + pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 0);
> +
> + perf_event_update_userpage(event);
> +
> +}
> +
> +/* read perf counter */
> +static void amdgpu_perf_read(struct perf_event *event)
> +{
> + struct hw_perf_event *hwc = &event->hw;
> + struct amdgpu_pmu_entry *pe = container_of(event->pmu,
> + struct amdgpu_pmu_entry,
> + pmu);
> +
> + u64 count, prev;
> +
> + switch (pe->pmu_perf_type) {
> + case PERF_TYPE_AMDGPU_DF:
> + pe->adev->df_funcs->pmc_get_count(pe->adev, hwc->conf, &count);
> + default:
> + count = 0;
> + break;
> + };
> +
> + prev = local64_read(&hwc->prev_count);
> + if (local64_cmpxchg(&hwc->prev_count, prev, count) != prev)
> + return;
> +
> + local64_add(count - prev, &event->count);
> +}
> +
> +/* stop perf counter */
> +static void amdgpu_perf_stop(struct perf_event *event, int flags)
> +{
> + struct hw_perf_event *hwc = &event->hw;
> + struct amdgpu_pmu_entry *pe = container_of(event->pmu,
> + struct amdgpu_pmu_entry,
> + pmu);
> +
> + if (hwc->state & PERF_HES_UPTODATE)
> + return;
> +
> + switch (pe->pmu_perf_type) {
> + case PERF_TYPE_AMDGPU_DF:
> + pe->adev->df_funcs->pmc_stop(pe->adev, hwc->conf, 0);
> + break;
> + default:
> + break;
> + };
> +
> + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
> + hwc->state |= PERF_HES_STOPPED;
> +
> + if (hwc->state & PERF_HES_UPTODATE)
> + return;
> +
> + amdgpu_perf_read(event);
> + hwc->state |= PERF_HES_UPTODATE;
> +}
> +
> +/* add perf counter */
> +static int amdgpu_perf_add(struct perf_event *event, int flags)
> +{
> +
> + struct hw_perf_event *hwc = &event->hw;
> + int retval;
> +
> + struct amdgpu_pmu_entry *pe = container_of(event->pmu,
> + struct amdgpu_pmu_entry,
> + pmu);
> +
> + event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
> +
> + switch (pe->pmu_perf_type) {
> + case PERF_TYPE_AMDGPU_DF:
> + retval = pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 1);
> + break;
> + default:
> + return 0;
> + };
> +
> + if (retval)
> + return retval;
> +
> + if (flags & PERF_EF_START)
> + amdgpu_perf_start(event, PERF_EF_RELOAD);
> +
> + return retval;
> +
> +}
> +
> +/* delete perf counter */
> +static void amdgpu_perf_del(struct perf_event *event, int flags)
> +{
> + struct hw_perf_event *hwc = &event->hw;
> +
> + struct amdgpu_pmu_entry *pe = container_of(event->pmu,
> + struct amdgpu_pmu_entry,
> + pmu);
> +
> + amdgpu_perf_stop(event, PERF_EF_UPDATE);
> +
> + switch (pe->pmu_perf_type) {
> + case PERF_TYPE_AMDGPU_DF:
> + pe->adev->df_funcs->pmc_stop(pe->adev, hwc->conf, 1);
> + break;
> + default:
> + break;
> + };
> +
> + perf_event_update_userpage(event);
> +}
> +
> +/* PMUs - pmus to register per pmu type per asic
> + *
> + * df_pmu - data fabric
> + *
> + */
> +static const struct pmu df_pmu __initconst = {
> + .event_init = amdgpu_perf_event_init,
> + .add = amdgpu_perf_add,
> + .del = amdgpu_perf_del,
> + .start = amdgpu_perf_start,
> + .stop = amdgpu_perf_stop,
> + .read = amdgpu_perf_read,
> + .task_ctx_nr = perf_invalid_context,
> + .attr_groups = amdgpu_df_attr_groups,
> +};
> +
> +/* initialize event attrs per pmu type per asic */
> +static int amdgpu_pmu_set_attributes(struct amdgpu_device *adev,
> + unsigned int perf_type)
> +{
> + struct attribute **attrs;
> + struct AMDGPU_PMU_EVENT_DESC *pmu_event_descs;
> + int i, j;
> +
> + switch (perf_type) {
> + case PERF_TYPE_AMDGPU_DF:
> +
> + switch (adev->asic_type) {
> + case CHIP_VEGA20:
> + pmu_event_descs = amdgpu_vega20_df_event_descs;
> + break;
> + default:
> + return -EINVAL;
> + };
> +
> + i = 0;
> + while (pmu_event_descs[i].attr.attr.name)
> + i++;
> +
> + attrs = kcalloc(i + 1, sizeof(struct attribute **), GFP_KERNEL);
> +
> + if (!attrs)
> + return -ENOMEM;
> +
> + for (j = 0; j < i; j++)
> + attrs[j] = &pmu_event_descs[j].attr.attr;
> +
> + amdgpu_df_events_attr_group.attrs = attrs;
This is a global variable. You'll need to make it per device or it
won't work for multiple GPUs. As Felix suggested, hang them off of
adev somewhere.
> + break;
> + default:
> + return -EINVAL;
> + }
> +
> + return 0;
> +}
> +
> +/* init pmu by type and keep record of pmu entry per asic */
> +static int init_pmu_by_type(struct amdgpu_device *adev,
> + unsigned int pmu_type,
> + char *pmu_type_name,
> + char *pmu_file_name)
> +{
> + char pmu_name[PMU_NAME_SIZE];
> + struct amdgpu_pmu_entry *pmu_entry;
> + int ret;
> +
> + pmu_entry = kzalloc(sizeof(struct amdgpu_pmu_entry), GFP_KERNEL);
> +
> + if (!pmu_entry)
> + return -ENOMEM;
> +
> + ret = amdgpu_pmu_set_attributes(adev, pmu_type);
> + if (ret)
> + return ret;
> +
> + switch (pmu_type) {
> + case PERF_TYPE_AMDGPU_DF:
> + pmu_entry->pmu = df_pmu;
> + break;
> + default:
> + kfree(pmu_entry);
> + return -EINVAL;
> + };
> +
> + pmu_entry->adev = adev;
> + pmu_entry->pmu_perf_type = pmu_type;
> +
> + snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d",
> + pmu_file_name, adev->ddev->primary->index);
> +
> + ret = perf_pmu_register(&pmu_entry->pmu, pmu_name, -1);
> +
> + if (!ret) {
> + pr_info("Detected AMDGPU %s Counters. # of Counters = %d.\n",
> + pmu_type_name, AMDGPU_DF_MAX_COUNTERS);
> + } else {
> + kfree(pmu_entry);
> + pr_warn("Error initializing AMDGPU %s PMUs.\n", pmu_type_name);
> + }
> +
> + return ret;
> +}
> +
> +/* initialize pmu per asic per pmu type */
> +static int init_pmu(struct amdgpu_device *adev)
> +{
> + int retval = 0;
> +
> + switch (adev->asic_type) {
> + case CHIP_VEGA20:
> + retval = init_pmu_by_type(adev, PERF_TYPE_AMDGPU_DF,
> + "DF", "amdgpu_df");
> + break;
> + default: /* ignore all other chips */
> + break;
> + }
> +
> + return retval;
> +}
> +
> +
> +/* initialize amdgpu pmu */
> +int amdgpu_pmu_init(struct amdgpu_device *adev)
> +{
> + int ret;
> +
> + ret = init_pmu(adev);
> +
> + if (ret)
> + return -ENODEV;
> +
> + return ret;
> +}
Is there a reason to not just squash init_pmu() into
amdgpu_pmu_init()? Also we need a function (e.g., amdgpu_pmu_fini())
to tear stuff down on driver unload.
Alex
> +
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.h
> new file mode 100644
> index 000000000000..d070d9e252ff
> --- /dev/null
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.h
> @@ -0,0 +1,37 @@
> +/*
> + * Copyright 2019 Advanced Micro Devices, Inc.
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice shall be included in
> + * all copies or substantial portions of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
> + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
> + * OTHER DEALINGS IN THE SOFTWARE.
> + *
> + * Author: Jonathan Kim <jonathan.kim at amd.com>
> + *
> + */
> +
> +#ifndef _AMDGPU_PMU_H_
> +#define _AMDGPU_PMU_H_
> +
> +enum amdgpu_pmu_perf_type {
> + PERF_TYPE_AMDGPU_DF = 0,
> + PERF_TYPE_AMDGPU_MAX
> +};
> +
> +
> +int amdgpu_pmu_init(struct amdgpu_device *adev);
> +
> +#endif /* _AMDGPU_PMU_H_ */
> --
> 2.17.1
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
More information about the amd-gfx
mailing list