[PATCH 6/6] drm/xe/vsec: Support BMG devices
Ruhl, Michael J
michael.j.ruhl at intel.com
Fri May 31 15:42:04 UTC 2024
>-----Original Message-----
>From: David E. Box <david.e.box at linux.intel.com>
>Sent: Thursday, May 30, 2024 5:07 PM
>To: Ruhl, Michael J <michael.j.ruhl at intel.com>; intel-xe at lists.freedesktop.org
>Subject: Re: [PATCH 6/6] drm/xe/vsec: Support BMG devices
>
>On Fri, 2024-05-10 at 16:59 -0400, Michael J. Ruhl wrote:
>> Utilize the PMT callback API to add support for the BMG
>> devices.
>>
>> Signed-off-by: Michael J. Ruhl <michael.j.ruhl at intel.com>
>> ---
>> drivers/gpu/drm/xe/xe_device.c | 2 +
>> drivers/gpu/drm/xe/xe_device_types.h | 5 +
>> drivers/gpu/drm/xe/xe_vsec.c | 145 +++++++++++++++++++++++++--
>> drivers/platform/x86/intel/vsec.c | 2 +-
>> 4 files changed, 146 insertions(+), 8 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_device.c
>b/drivers/gpu/drm/xe/xe_device.c
>> index e77768bc4471..940f4cf0274a 100644
>> --- a/drivers/gpu/drm/xe/xe_device.c
>> +++ b/drivers/gpu/drm/xe/xe_device.c
>> @@ -315,6 +315,8 @@ struct xe_device *xe_device_create(struct pci_dev
>*pdev,
>> goto err;
>> }
>>
>> + drmm_mutex_init(&xe->drm, &xe->pmt.lock);
>> +
>> err = xe_display_create(xe);
>> if (WARN_ON(err))
>> goto err;
>> diff --git a/drivers/gpu/drm/xe/xe_device_types.h
>> b/drivers/gpu/drm/xe/xe_device_types.h
>> index 0af739981ebf..f451216c2283 100644
>> --- a/drivers/gpu/drm/xe/xe_device_types.h
>> +++ b/drivers/gpu/drm/xe/xe_device_types.h
>> @@ -448,6 +448,11 @@ struct xe_device {
>> struct mutex lock;
>> } d3cold;
>>
>> + struct {
>> + /** @pmt.lock: protect access for telemetry data */
>> + struct mutex lock;
>> + } pmt;
>> +
>> /**
>> * @pm_callback_task: Track the active task that is running in either
>> * the runtime_suspend or runtime_resume callbacks.
>> diff --git a/drivers/gpu/drm/xe/xe_vsec.c b/drivers/gpu/drm/xe/xe_vsec.c
>> index a91aec49d04a..ac840a1e20a4 100644
>> --- a/drivers/gpu/drm/xe/xe_vsec.c
>> +++ b/drivers/gpu/drm/xe/xe_vsec.c
>> @@ -5,9 +5,12 @@
>> #include <linux/intel_vsec.h>
>> #include <linux/pci.h>
>>
>> +#include "xe_device.h"
>> #include "xe_device_types.h"
>> #include "xe_drv.h"
>> +#include "xe_mmio.h"
>> #include "xe_platform_types.h"
>> +#include "xe_pm.h"
>> #include "xe_vsec.h"
>>
>> #define SOC_BASE 0x280000
>> @@ -15,6 +18,10 @@
>> /* from drivers/platform/x86/intel/pmt/telemetry.c */
>> #define TELEM_BASE_OFFSET 0x8
>>
>> +/* Decode the guid information */
>> +#define GUID_RECORD_MASK GENMASK(1, 0)
>> +#define GUID_CAP_TYPE GENMASK(3, 2)
>> +
>> #define DG2_PMT_BASE 0xE8000
>> #define DG2_DISCOVERY_START 0x6000
>> #define DG2_TELEM_START 0x4000
>> @@ -22,8 +29,18 @@
>> #define DG2_DISCOVERY_OFFSET (SOC_BASE + DG2_PMT_BASE +
>> DG2_DISCOVERY_START)
>> #define DG2_TELEM_OFFSET (SOC_BASE + DG2_PMT_BASE +
>DG2_TELEM_START)
>>
>> +#define BMG_PMT_BASE 0xDB000
>> +#define BMG_DISCOVERY_OFFSET (SOC_BASE + BMG_PMT_BASE)
>> +
>> +#define BMG_TELEMETRY_BASE 0xE0000
>> +#define BMG_TELEMETRY_OFFSET (SOC_BASE + BMG_TELEMETRY_BASE)
>> +
>> #define GFX_BAR 0
>>
>> +#define SG_REMAP_INDEX1 XE_REG(SOC_BASE + 0x08)
>> +#define SG_REMAP_ACCESS(_mem) ((_mem) << 24)
>> +#define SG_REMAP_BITS GENMASK(31, 24)
>> +
>> static struct intel_vsec_header dg2_telemetry = {
>> .length = 0x10,
>> .id = VSEC_ID_TELEMETRY,
>> @@ -38,12 +55,106 @@ static struct intel_vsec_header *dg2_capabilities[] =
>{
>> NULL
>> };
>>
>> -static struct intel_vsec_platform_info dg2_vsec_info = {
>> - .caps = VSEC_CAP_TELEMETRY,
>> - .headers = dg2_capabilities,
>> - .quirks = VSEC_QUIRK_EARLY_HW | VSEC_QUIRK_P2SB_OFFSET,
>> +static struct intel_vsec_header bmg_telemetry = {
>> + .length = 0x10,
>> + .id = VSEC_ID_TELEMETRY,
>> + .num_entries = 2,
>> + .entry_size = 4,
>> + .tbir = GFX_BAR,
>> + .offset = BMG_DISCOVERY_OFFSET,
>> +};
>> +
>> +static struct intel_vsec_header *bmg_capabilities[] = {
>> + &bmg_telemetry,
>> + NULL
>> +};
>> +
>> +enum xe_vsec {
>> + XE_VSEC_UNKNOWN = 0,
>> + XE_VSEC_DG2,
>> + XE_VSEC_BMG,
>> +};
>> +
>> +static struct intel_vsec_platform_info xe_vsec_info[] = {
>> + [XE_VSEC_DG2] = {
>> + .caps = VSEC_CAP_TELEMETRY,
>> + .headers = dg2_capabilities,
>> + .quirks = VSEC_QUIRK_EARLY_HW |
>VSEC_QUIRK_P2SB_OFFSET,
>> + },
>> + [XE_VSEC_BMG] = {
>> + .caps = VSEC_CAP_TELEMETRY,
>> + .headers = bmg_capabilities,
>> + },
>> + { }
>
>There is some cleanup here as well to support handling multiple platforms. Can
>these structures just be added to the initial DG2 patch?
I will reorder the patches so that DG2 gets support after BMG. So we can just add the
data structures as needed.
Thanks!
M
>David
>
>> };
>>
>> +#define PUNIT_AGGREGATOR 0
>> +#define OOBMSM_AGG0 1
>> +
>> +/*
>> + * The telemetry memory space shares a common offset. To get the
>appropriate
>> + * data, set the index based on the GUID bits.
>> + *
>> + * The GUID will have the following bits to decode:
>> + * (2bits) - Record-ID (0-PUNIT, 1-OOBMSM_0, 2-OOBMSM_1)
>> + * (2bits) - Capability Type (Crashlog-0, Telemetry Aggregator-1, Watcher-
>> 2)
>> + * ... <other that are not currently relevant>
>> + *
>> + * Currently only the record-id is set. Once the other bits are set, the
>> + * decode path will get a little more complex.
>> + */
>> +static int xe_pmt_telem_read(void *args, u32 guid, u64 *data, u32 count)
>> +{
>> + struct xe_device *xe = pdev_to_xe_device((struct pci_dev *)args);
>> + void __iomem *telem_addr = xe->tiles[0].mmio.regs +
>> BMG_TELEMETRY_OFFSET;
>> + u32 telem_region = guid & GUID_RECORD_MASK;
>> + int ret = 0;
>> +
>> + /* Update the base offset (if necessary) for the specific telementry
>> region */
>> + switch (telem_region) {
>> + case PUNIT_AGGREGATOR:
>> + telem_addr += 0x200;
>> + break;
>> + case OOBMSM_AGG0:
>> + break;
>> + default:
>> + return -EINVAL;
>> + }
>> +
>> + mutex_lock(&xe->pmt.lock);
>> + if (xe_pm_runtime_get_if_active(xe) > 0) {
>> + /* set SoC re-mapper index register based on guid memory
>> region */
>> + xe_mmio_rmw32(xe->tiles[0].primary_gt, SG_REMAP_INDEX1,
>> SG_REMAP_BITS,
>> + SG_REMAP_ACCESS(telem_region));
>> +
>> + memcpy_fromio(data, telem_addr, count);
>> +
>> + xe_pm_runtime_put(xe);
>> +
>> + ret = count;
>> + }
>> + mutex_unlock(&xe->pmt.lock);
>> +
>> + return ret;
>> +}
>> +
>> +struct pmt_callbacks xe_pmt_cb = {
>> + .read_telem = xe_pmt_telem_read,
>> +};
>> +
>> +static const int vsec_platforms[] = {
>> + [XE_DG2] = XE_VSEC_DG2,
>> + [XE_BATTLEMAGE] = XE_VSEC_BMG,
>> +};
>> +
>> +static enum xe_vsec get_platform_info(struct xe_device *xe)
>> +{
>> + if (xe->info.platform > XE_BATTLEMAGE)
>> + return XE_VSEC_UNKNOWN;
>> +
>> + return vsec_platforms[xe->info.platform];
>> +}
>> +
>> /*
>> * Access the DG2 PMT MMIO discovery table
>> *
>> @@ -92,15 +203,35 @@ static int dg2_adjust_offset(struct pci_dev *pdev,
>struct
>> device *dev,
>> */
>> void xe_vsec_init(struct xe_device *xe)
>> {
>> - struct intel_vsec_platform_info *info = &dg2_vsec_info;
>> + struct intel_vsec_platform_info *info;
>> struct device *dev = xe->drm.dev;
>> struct pci_dev *pdev = to_pci_dev(dev);
>> + enum xe_vsec platform;
>> u32 ret;
>>
>> - ret = dg2_adjust_offset(pdev, dev, info);
>> - if (ret)
>> + platform = get_platform_info(xe);
>> + if (platform == XE_VSEC_UNKNOWN)
>> + return;
>> +
>> + info = &xe_vsec_info[platform];
>> + if (!info->headers)
>> return;
>>
>> + switch (platform) {
>> + case XE_VSEC_DG2:
>> + ret = dg2_adjust_offset(pdev, dev, info);
>> + if (ret)
>> + return;
>> + break;
>> +
>> + case XE_VSEC_BMG:
>> + info->priv_data = &xe_pmt_cb;
>> + break;
>> +
>> + default:
>> + break;
>> + }
>> +
>> /*
>> * Register a VSEC. Cleanup is handled using device managed
>> * resources.
>> diff --git a/drivers/platform/x86/intel/vsec.c
>> b/drivers/platform/x86/intel/vsec.c
>> index 5a0dfc21eb0f..f59f8ac87b4e 100644
>> --- a/drivers/platform/x86/intel/vsec.c
>> +++ b/drivers/platform/x86/intel/vsec.c
>> @@ -341,7 +341,7 @@ static bool intel_vsec_walk_vsec(struct pci_dev
>*pdev,
>> void intel_vsec_register(struct pci_dev *pdev,
>> struct intel_vsec_platform_info *info)
>> {
>> - if (!pdev || !info)
>> + if (!pdev || !info || !info->headers)
>> return;
>>
>> intel_vsec_walk_header(pdev, info);
More information about the Intel-xe
mailing list