[PATCH] drm/amdgpu: add pmu counters

Kuehling, Felix Felix.Kuehling at amd.com
Wed Jun 19 00:40:02 UTC 2019


On 2019-06-18 17:34, Kim, Jonathan wrote:
> v2: fixed missing break in switch statement
>
> add pmu counters
>
> Change-Id: I1aca271fd12cabce0ccfc076f771cde2d4cadd54
> Signed-off-by: Jonathan Kim <Jonathan.Kim at amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/Makefile        |   2 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu.h        |   3 +
>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |   6 +
>   drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c    | 302 +++++++++++++++++++++
>   drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.h    |  37 +++
>   5 files changed, 349 insertions(+), 1 deletion(-)
>   create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
>   create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.h
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
> index 57ce44cc3226..4c9fd2645f64 100644
> --- a/drivers/gpu/drm/amd/amdgpu/Makefile
> +++ b/drivers/gpu/drm/amd/amdgpu/Makefile
> @@ -54,7 +54,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
>   	amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
>   	amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
>   	amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
> -	amdgpu_vm_sdma.o
> +	amdgpu_vm_sdma.o amdgpu_pmu.o
>   
>   # add asic specific block
>   amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index 4946d9ecc3e3..8ef2ac59ff04 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -962,6 +962,9 @@ struct amdgpu_device {
>   	long				compute_timeout;
>   
>   	uint64_t			unique_id;
> +
> +	/* spin lock for pmu via perf events */
> +	raw_spinlock_t	pmu_lock;

What are you protecting with this spinlock? As far as I can tell the 
kernel already does some locking around the pmu callbacks. So I'm 
wondering if this is really needed.

It's the first time I'm coming across a raw_spinlock in our driver, and 
it makes me suspicious. The driver shouldn't be in the business of doing 
stuff that requires that.


>   };
>   
>   static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 9d1b8d646661..7663c5d4d0e1 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -61,6 +61,7 @@
>   
>   #include "amdgpu_xgmi.h"
>   #include "amdgpu_ras.h"
> +#include "amdgpu_pmu.h"
>   
>   MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
>   MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
> @@ -2746,6 +2747,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
>   		return r;
>   	}
>   
> +	r = amdgpu_pmu_init(adev);
> +	if (r)
> +		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
> +
>   	return 0;
>   
>   failed:
> @@ -2814,6 +2819,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
>   	amdgpu_debugfs_regs_cleanup(adev);
>   	device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
>   	amdgpu_ucode_sysfs_fini(adev);
> +	amdgpu_pmu_fini(adev);
>   }
>   
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
> new file mode 100644
> index 000000000000..cd45095bd815
> --- /dev/null
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
> @@ -0,0 +1,302 @@
> +/*
> + * Copyright 2019 Advanced Micro Devices, Inc.
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice shall be included in
> + * all copies or substantial portions of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
> + * OTHER DEALINGS IN THE SOFTWARE.
> + *
> + * Author: Jonathan Kim <jonathan.kim at amd.com>
> + *
> + */
> +
> +#include <linux/perf_event.h>
> +#include <linux/init.h>
> +#include "amdgpu.h"
> +#include "amdgpu_pmu.h"
> +#include "df_v3_6.h"
> +
> +#define PMU_NAME_SIZE 32
> +
> +/* record to keep track of pmu entry per pmu type per device */
> +struct amdgpu_pmu_entry {
> +	struct list_head entry;
> +	struct amdgpu_device *adev;
> +	struct pmu pmu;
> +	unsigned int pmu_perf_type;
> +};
> +
> +static LIST_HEAD(amdgpu_pmu_list);
> +
> +
> +/* initialize perf counter */
> +static int amdgpu_perf_event_init(struct perf_event *event)
> +{
> +	struct hw_perf_event *hwc = &event->hw;
> +
> +	/* test the event attr type check for PMU enumeration */
> +	if (event->attr.type != event->pmu->type)
> +		return -ENOENT;
> +
> +	/* update the hw_perf_event struct with config data */
> +	hwc->conf = event->attr.config;
> +
> +	return 0;
> +}
> +
> +/* start perf counter */
> +static void amdgpu_perf_start(struct perf_event *event, int flags)
> +{
> +	unsigned long lflags;
> +	struct hw_perf_event *hwc = &event->hw;
> +	struct amdgpu_pmu_entry *pe = container_of(event->pmu,
> +						  struct amdgpu_pmu_entry,
> +						  pmu);
> +
> +	if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
> +		return;
> +
> +	WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
> +	hwc->state = 0;
> +
> +	raw_spin_lock_irqsave(&pe->adev->pmu_lock, lflags);
> +
> +	if (!(flags & PERF_EF_RELOAD))
> +		pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 1);

Should there be a switch-case around the call to df functions?


> +
> +	pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 0);
> +
> +	raw_spin_unlock_irqrestore(&pe->adev->pmu_lock, lflags);
> +
> +	perf_event_update_userpage(event);
> +
> +}
> +
> +/* read perf counter */
> +static void amdgpu_perf_read(struct perf_event *event)
> +{
> +	unsigned long lflags;

"lflags" looks suspiciously like Hungarian notation, which Linux kernel 
developers tend to hate. I think you can just call this "flags".


> +	struct hw_perf_event *hwc = &event->hw;
> +	struct amdgpu_pmu_entry *pe = container_of(event->pmu,
> +						  struct amdgpu_pmu_entry,
> +						  pmu);
> +
> +	u64 count, prev;
> +
> +	raw_spin_lock_irqsave(&pe->adev->pmu_lock, lflags);
> +
> +	switch (pe->pmu_perf_type) {

Instead of registering a separate PMU for each event type, can we use a 
few unused bits in the hwc->conf to distinguish event types and register 
only a single PMU per GPU? Then we could also embed struct pmu directly 
in struct amdgpu_device and wouldn't need the struct amdgpu_pmu_entry 
wrapper.

Then this switch would be something like:

     switch (AMDGPU_PMU_EVENT_TYPE(hwc->conf)) {
     case AMDGPU_PERF_TYPE_DF:
         pe->adev->df_funcs->pmc_get_count(pe->adev, hwc->conf, &count);
         break;
     default:
         break;
     }


> +	case PERF_TYPE_AMDGPU_DF:
> +		pe->adev->df_funcs->pmc_get_count(pe->adev, hwc->conf, &count);
> +		break;
> +	default:
> +		count = 0;
> +		break;
> +	};
> +
> +	pr_info("event %x has count %d\n", hwc->conf, count);
> +
> +	raw_spin_unlock_irqrestore(&pe->adev->pmu_lock, lflags);
> +
> +	prev = local64_read(&hwc->prev_count);
> +	if (local64_cmpxchg(&hwc->prev_count, prev, count) != prev)
> +		return;

This looks like you copied it from somewhere else. I see similar code in 
other perf drivers. But I don't think you got it quite right.

This looks like you're trying to protect against multiple threads 
updating hwc->prev_count and event->count concurrently and only the 
first one updating both hwc->prev_count and event->count. But you're 
doing this under a spin-lock, which should prevent concurrent threads 
executing this. Like I said above, I believe the lock is not necessary. 
This cmpxchg logic seems to be a lockless way of ensuring that only one 
thread updates both variables.

In other pmu drivers I see retry logic here for the case that the 
cmpxchg fails, which you are missing.

Also, for this to be effective, the local64_read should be before the 
df_funcs->pmc_get_count call. Otherwise another thread could update 
hwc->prev_count and event->count with a newer/higher value after you  
called pmc_get_count and your update could result in writing an older 
counter value.

Regards,
   Felix


> +
> +	local64_add(count - prev, &event->count);
> +}
> +
> +/* stop perf counter */
> +static void amdgpu_perf_stop(struct perf_event *event, int flags)
> +{
> +	unsigned long lflags;
> +	struct hw_perf_event *hwc = &event->hw;
> +	struct amdgpu_pmu_entry *pe = container_of(event->pmu,
> +						  struct amdgpu_pmu_entry,
> +						  pmu);
> +
> +	if (hwc->state & PERF_HES_UPTODATE)
> +		return;
> +
> +	raw_spin_lock_irqsave(&pe->adev->pmu_lock, lflags);
> +
> +	switch (pe->pmu_perf_type) {
> +	case PERF_TYPE_AMDGPU_DF:
> +		pe->adev->df_funcs->pmc_stop(pe->adev, hwc->conf, 0);
> +		break;
> +	default:
> +		break;
> +	};
> +
> +	raw_spin_unlock_irqrestore(&pe->adev->pmu_lock, lflags);
> +
> +	WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
> +	hwc->state |= PERF_HES_STOPPED;
> +
> +	if (hwc->state & PERF_HES_UPTODATE)
> +		return;
> +
> +	amdgpu_perf_read(event);
> +	hwc->state |= PERF_HES_UPTODATE;
> +}
> +
> +/* add perf counter  */
> +static int amdgpu_perf_add(struct perf_event *event, int flags)
> +{
> +	unsigned long lflags;
> +	struct hw_perf_event *hwc = &event->hw;
> +	int retval;
> +
> +	struct amdgpu_pmu_entry *pe = container_of(event->pmu,
> +						  struct amdgpu_pmu_entry,
> +						  pmu);
> +
> +	event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
> +
> +	raw_spin_lock_irqsave(&pe->adev->pmu_lock, lflags);
> +
> +	switch (pe->pmu_perf_type) {
> +	case PERF_TYPE_AMDGPU_DF:
> +		retval = pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 1);
> +		break;
> +	default:
> +		return 0;
> +	};
> +
> +	raw_spin_unlock_irqrestore(&pe->adev->pmu_lock, lflags);
> +
> +	if (retval)
> +		return retval;
> +
> +	if (flags & PERF_EF_START)
> +		amdgpu_perf_start(event, PERF_EF_RELOAD);
> +
> +	return retval;
> +
> +}
> +
> +/* delete perf counter  */
> +static void amdgpu_perf_del(struct perf_event *event, int flags)
> +{
> +	unsigned long lflags;
> +	struct hw_perf_event *hwc = &event->hw;
> +	struct amdgpu_pmu_entry *pe = container_of(event->pmu,
> +						  struct amdgpu_pmu_entry,
> +						  pmu);
> +
> +	amdgpu_perf_stop(event, PERF_EF_UPDATE);
> +
> +	raw_spin_lock_irqsave(&pe->adev->pmu_lock, lflags);
> +
> +	switch (pe->pmu_perf_type) {
> +	case PERF_TYPE_AMDGPU_DF:
> +		pe->adev->df_funcs->pmc_stop(pe->adev, hwc->conf, 1);
> +		break;
> +	default:
> +		break;
> +	};
> +
> +	raw_spin_unlock_irqrestore(&pe->adev->pmu_lock, lflags);
> +
> +	perf_event_update_userpage(event);
> +}
> +
> +/* vega20 pmus */
> +
> +/* init pmu tracking per pmu type */
> +int init_pmu_by_type(struct amdgpu_device *adev,
> +		  const struct attribute_group *attr_groups[],
> +		  char *pmu_type_name, char *pmu_file_prefix,
> +		  unsigned int pmu_perf_type)
> +{
> +	char pmu_name[PMU_NAME_SIZE];
> +	struct amdgpu_pmu_entry *pmu_entry;
> +	int ret = 0;
> +
> +	pmu_entry = kzalloc(sizeof(struct amdgpu_pmu_entry), GFP_KERNEL);
> +
> +	if (!pmu_entry)
> +		return -ENOMEM;
> +
> +	pmu_entry->adev = adev;
> +	pmu_entry->pmu = (struct pmu){
> +		.event_init = amdgpu_perf_event_init,
> +		.add = amdgpu_perf_add,
> +		.del = amdgpu_perf_del,
> +		.start = amdgpu_perf_start,
> +		.stop = amdgpu_perf_stop,
> +		.read = amdgpu_perf_read,
> +		.task_ctx_nr = perf_invalid_context,
> +	};
> +
> +	pmu_entry->pmu.attr_groups = attr_groups;
> +	pmu_entry->pmu_perf_type = pmu_perf_type;
> +	snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d",
> +				pmu_file_prefix, adev->ddev->primary->index);
> +
> +	ret = perf_pmu_register(&pmu_entry->pmu, pmu_name, -1);
> +
> +	if (ret) {
> +		kfree(pmu_entry);
> +		pr_warn("Error initializing AMDGPU %s PMUs.\n", pmu_type_name);
> +		return ret;
> +	}
> +
> +	raw_spin_lock_init(&pmu_entry->adev->pmu_lock);
> +
> +	pr_info("Detected AMDGPU %s Counters. # of Counters = %d.\n",
> +			pmu_type_name, DF_V3_6_MAX_COUNTERS);
> +
> +	list_add_tail(&pmu_entry->entry, &amdgpu_pmu_list);
> +
> +	return 0;
> +}
> +
> +/* init amdgpu_pmu */
> +int amdgpu_pmu_init(struct amdgpu_device *adev)
> +{
> +
> +	int ret = 0;
> +
> +	switch (adev->asic_type) {
> +	case CHIP_VEGA20:
> +		/* init df */
> +		ret = init_pmu_by_type(adev, df_v3_6_attr_groups,
> +				"DF", "amdgpu_df", PERF_TYPE_AMDGPU_DF);
> +
> +		/* other pmu types go here*/
> +		break;
> +	default:
> +		return 0;
> +	}
> +
> +	return 0;
> +
> +}
> +
> +
> +/* destroy all pmu data associated with target device */
> +void amdgpu_pmu_fini(struct amdgpu_device *adev)
> +{
> +	struct amdgpu_pmu_entry *pe, *temp;
> +
> +	list_for_each_entry_safe(pe, temp, &amdgpu_pmu_list, entry) {
> +		if (pe->adev == adev) {
> +			list_del(&pe->entry);
> +			perf_pmu_unregister(&temp->pmu);
> +			kfree(temp);
> +		}
> +	}
> +}
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.h
> new file mode 100644
> index 000000000000..7dddb7160a11
> --- /dev/null
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.h
> @@ -0,0 +1,37 @@
> +/*
> + * Copyright 2019 Advanced Micro Devices, Inc.
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice shall be included in
> + * all copies or substantial portions of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
> + * OTHER DEALINGS IN THE SOFTWARE.
> + *
> + * Author: Jonathan Kim <jonathan.kim at amd.com>
> + *
> + */
> +
> +#ifndef _AMDGPU_PMU_H_
> +#define _AMDGPU_PMU_H_
> +
> +enum amdgpu_pmu_perf_type {
> +	PERF_TYPE_AMDGPU_DF = 0,
> +	PERF_TYPE_AMDGPU_MAX
> +};
> +
> +int amdgpu_pmu_init(struct amdgpu_device *adev);
> +void amdgpu_pmu_fini(struct amdgpu_device *adev);
> +
> +#endif /* _AMDGPU_PMU_H_ */


More information about the amd-gfx mailing list