[PATCH v11 08/10] drm/xe/nvm: add on-die non-volatile memory device
Usyskin, Alexander
alexander.usyskin at intel.com
Tue Jun 3 08:20:24 UTC 2025
> Subject: Re: [PATCH v11 08/10] drm/xe/nvm: add on-die non-volatile
> memory device
>
> Hey,
>
> I was looking into testing this with the xe code on PVC, and noticed some small
> changes that would be useful to integrate before merging.
>
> On 2025-05-28 15:51, Alexander Usyskin wrote:
> > Enable access to internal non-volatile memory on DGFX
> > with GSC/CSC devices via a child device.
> > The nvm child device is exposed via auxiliary bus.
> >
> > Reviewed-by: Rodrigo Vivi <rodrigo.vivi at intel.com>
> > Acked-by: Rodrigo Vivi <rodrigo.vivi at intel.com>
> > Signed-off-by: Alexander Usyskin <alexander.usyskin at intel.com>
> > ---
> > drivers/gpu/drm/xe/Makefile | 1 +
> > drivers/gpu/drm/xe/xe_device.c | 5 ++
> > drivers/gpu/drm/xe/xe_device_types.h | 6 ++
> > drivers/gpu/drm/xe/xe_nvm.c | 107
> +++++++++++++++++++++++++++
> > drivers/gpu/drm/xe/xe_nvm.h | 15 ++++
> > drivers/gpu/drm/xe/xe_pci.c | 6 ++
> > 6 files changed, 140 insertions(+)
> > create mode 100644 drivers/gpu/drm/xe/xe_nvm.c
> > create mode 100644 drivers/gpu/drm/xe/xe_nvm.h
> >
> > diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
> > index e4bf484d4121..4c51fe3520dc 100644
> > --- a/drivers/gpu/drm/xe/Makefile
> > +++ b/drivers/gpu/drm/xe/Makefile
> > @@ -80,6 +80,7 @@ xe-y += xe_bb.o \
> > xe_mmio.o \
> > xe_mocs.o \
> > xe_module.o \
> > + xe_nvm.o \
> > xe_oa.o \
> > xe_observation.o \
> > xe_pat.o \
> > diff --git a/drivers/gpu/drm/xe/xe_device.c
> b/drivers/gpu/drm/xe/xe_device.c
> > index d4b6e623aa48..845b38aea692 100644
> > --- a/drivers/gpu/drm/xe/xe_device.c
> > +++ b/drivers/gpu/drm/xe/xe_device.c
> > @@ -46,6 +46,7 @@
> > #include "xe_memirq.h"
> > #include "xe_mmio.h"
> > #include "xe_module.h"
> > +#include "xe_nvm.h"
> > #include "xe_oa.h"
> > #include "xe_observation.h"
> > #include "xe_pat.h"
> > @@ -884,6 +885,8 @@ int xe_device_probe(struct xe_device *xe)
> > return err;
> > }
> >
> > + xe_nvm_init(xe);
> > +
> > err = xe_heci_gsc_init(xe);
> > if (err)
> > return err;
> > @@ -941,6 +944,8 @@ void xe_device_remove(struct xe_device *xe)
> > {
> > xe_display_unregister(xe);
> >
> > + xe_nvm_fini(xe);
> > +
> > drm_dev_unplug(&xe->drm);
> >
> > xe_bo_pci_dev_remove_all(xe);
> > diff --git a/drivers/gpu/drm/xe/xe_device_types.h
> b/drivers/gpu/drm/xe/xe_device_types.h
> > index 50b2bfa682ac..938cf1a440de 100644
> > --- a/drivers/gpu/drm/xe/xe_device_types.h
> > +++ b/drivers/gpu/drm/xe/xe_device_types.h
> > @@ -35,6 +35,7 @@
> > #include "intel_display_device.h"
> > #endif
> >
> > +struct intel_dg_nvm_dev;
> > struct xe_ggtt;
> > struct xe_pat_ops;
> > struct xe_pxp;
> > @@ -319,6 +320,8 @@ struct xe_device {
> > u8 has_fan_control:1;
> > /** @info.has_flat_ccs: Whether flat CCS metadata is used */
> > u8 has_flat_ccs:1;
> > + /** @info.has_gsc_nvm: Device has gsc non-volatile memory
> */
> > + u8 has_gsc_nvm:1;
> Is this flag really needed, or is IS_DGFX() enough? It's literally only used during
> NVM init, so any conditions could probably just be put there.
>
There are some DGFX that do not have GSC/CSC and corresponding NVM.
> > /** @info.has_heci_cscfi: device has heci cscfi */
> > u8 has_heci_cscfi:1;
> > /** @info.has_heci_gscfi: device has heci gscfi */
> > @@ -544,6 +547,9 @@ struct xe_device {
> > /** @heci_gsc: graphics security controller */
> > struct xe_heci_gsc heci_gsc;
> >
> > + /** @nvm: discrete graphics non-volatile memory */
> > + struct intel_dg_nvm_dev *nvm;
> > +
> > /** @oa: oa observation subsystem */
> > struct xe_oa oa;
> >
> > diff --git a/drivers/gpu/drm/xe/xe_nvm.c b/drivers/gpu/drm/xe/xe_nvm.c
> > new file mode 100644
> > index 000000000000..33ba635ce116
> > --- /dev/null
> > +++ b/drivers/gpu/drm/xe/xe_nvm.c
> > @@ -0,0 +1,107 @@
> > +// SPDX-License-Identifier: MIT
> > +/*
> > + * Copyright(c) 2019-2025, Intel Corporation. All rights reserved.
> > + */
> > +
> > +#include <linux/intel_dg_nvm_aux.h>
> > +#include <linux/pci.h>
> > +
> > +#include "xe_device_types.h"
> > +#include "xe_nvm.h"
> > +#include "xe_sriov.h"
> > +
> > +#define GEN12_GUNIT_NVM_BASE 0x00102040
> > +#define GEN12_GUNIT_NVM_SIZE 0x80
> > +#define HECI_FW_STATUS_2_NVM_ACCESS_MODE BIT(3)
> > +
> > +static const struct intel_dg_nvm_region regions[INTEL_DG_NVM_REGIONS]
> = {
> > + [0] = { .name = "DESCRIPTOR", },
> > + [2] = { .name = "GSC", },
> > + [9] = { .name = "PADDING", },
> > + [11] = { .name = "OptionROM", },
> > + [12] = { .name = "DAM", },
> > +};
> > +
> Small ask, can we enable PSC for PVC too? Or at least bump regions with 1 so
> it's doable.
Let's enable PVC as follow-up patch as this series is delayed too much already.
>
> > +static void xe_nvm_release_dev(struct device *dev)
> > +{
> > +}
> > +
> > +int xe_nvm_init(struct xe_device *xe)
> > +{
> > + struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
> > + struct auxiliary_device *aux_dev;
> > + struct intel_dg_nvm_dev *nvm;
> > + int ret;
> > +
> > + if (!xe->info.has_gsc_nvm)
> > + return 0;
> > +
> > + /* No access to internal NVM from VFs */
> > + if (IS_SRIOV_VF(xe))
> > + return 0;
> > +
> > + /* Nvm pointer should be NULL here */
> > + if (WARN_ON(xe->nvm))
> > + return -EFAULT;
> > +
> > + xe->nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
> > + if (!xe->nvm)
> > + return -ENOMEM;
> > +
> > + nvm = xe->nvm;
> > +
> > + nvm->writable_override = false;
> > + nvm->bar.parent = &pdev->resource[0];
> > + nvm->bar.start = GEN12_GUNIT_NVM_BASE + pdev-
> >resource[0].start;
> > + nvm->bar.end = nvm->bar.start + GEN12_GUNIT_NVM_SIZE - 1;
> > + nvm->bar.flags = IORESOURCE_MEM;
> > + nvm->bar.desc = IORES_DESC_NONE;
> > + nvm->regions = regions;
> > +
> > + aux_dev = &nvm->aux_dev;
> > +
> > + aux_dev->name = "nvm";
> > + aux_dev->id = (pci_domain_nr(pdev->bus) << 16) | pci_dev_id(pdev);
> > + aux_dev->dev.parent = &pdev->dev;
> > + aux_dev->dev.release = xe_nvm_release_dev;
> > +
> > + ret = auxiliary_device_init(aux_dev);
> > + if (ret) {
> > + drm_err(&xe->drm, "xe-nvm aux init failed %d\n", ret);
> > + goto err;
> > + }
> > +
> > + ret = auxiliary_device_add(aux_dev);
> > + if (ret) {
> > + drm_err(&xe->drm, "xe-nvm aux add failed %d\n", ret);
> > + auxiliary_device_uninit(aux_dev);
> > + goto err;
> > + }
> > + return 0;
> > +
> > +err:
> > + kfree(nvm);
> > + xe->nvm = NULL;
> > + return ret;
> > +}
> > +
> > +void xe_nvm_fini(struct xe_device *xe)
> > +{
> > + struct intel_dg_nvm_dev *nvm = xe->nvm;
> > +
> > + if (!xe->info.has_gsc_nvm)
> > + return;
> > +
> > + /* No access to internal NVM from VFs */
> > + if (IS_SRIOV_VF(xe))
> > + return;
> > +
> > + /* Nvm pointer should not be NULL here */
> > + if (WARN_ON(!nvm))
> > + return;
> > +
> > + auxiliary_device_delete(&nvm->aux_dev);
> > + auxiliary_device_uninit(&nvm->aux_dev);
> > + kfree(nvm);
> > + xe->nvm = NULL;
> > +}
> In xe, instead of exporting nvm_fini, it would be good to use the drmm
> interface, like drmm_kzalloc for allocating NVM so it doesn't have to be freed
> on failure, and drmm_add_action_or_reset as last action in during init. That
> also removes all checks from fini().
Can be done, but I prefer it to push as follow-up patch to not delay whole series.
>
> > diff --git a/drivers/gpu/drm/xe/xe_nvm.h b/drivers/gpu/drm/xe/xe_nvm.h
> > new file mode 100644
> > index 000000000000..7f3d5f57bed0
> > --- /dev/null
> > +++ b/drivers/gpu/drm/xe/xe_nvm.h
> > @@ -0,0 +1,15 @@
> > +/* SPDX-License-Identifier: MIT */
> > +/*
> > + * Copyright(c) 2019-2025 Intel Corporation. All rights reserved.
> > + */
> > +
> > +#ifndef __XE_NVM_H__
> > +#define __XE_NVM_H__
> > +
> > +struct xe_device;
> > +
> > +int xe_nvm_init(struct xe_device *xe);
> > +
> > +void xe_nvm_fini(struct xe_device *xe);
> > +
> > +#endif
> > diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
> > index b68c90910d82..6aa9850bb342 100644
> > --- a/drivers/gpu/drm/xe/xe_pci.c
> > +++ b/drivers/gpu/drm/xe/xe_pci.c
> > @@ -63,6 +63,7 @@ struct xe_device_desc {
> >
> > u8 has_display:1;
> > u8 has_fan_control:1;
> > + u8 has_gsc_nvm:1;
> > u8 has_heci_gscfi:1;
> > u8 has_heci_cscfi:1;
> > u8 has_llc:1;
> > @@ -271,6 +272,7 @@ static const struct xe_device_desc dg1_desc = {
> > PLATFORM(DG1),
> > .dma_mask_size = 39,
> > .has_display = true,
> > + .has_gsc_nvm = 1,
> > .has_heci_gscfi = 1,
> > .require_force_probe = true,
> > };
> > @@ -282,6 +284,7 @@ static const u16 dg2_g12_ids[] = {
> INTEL_DG2_G12_IDS(NOP), 0 };
> > #define DG2_FEATURES \
> > DGFX_FEATURES, \
> > PLATFORM(DG2), \
> > + .has_gsc_nvm = 1, \
> > .has_heci_gscfi = 1, \
> > .subplatforms = (const struct xe_subplatform_desc[]) { \
> > { XE_SUBPLATFORM_DG2_G10, "G10", dg2_g10_ids }, \
> > @@ -318,6 +321,7 @@ static const __maybe_unused struct xe_device_desc
> pvc_desc = {
> > PLATFORM(PVC),
> > .dma_mask_size = 52,
> > .has_display = false,
> > + .has_gsc_nvm = 1,
> > .has_heci_gscfi = 1,
> > .max_remote_tiles = 1,
> > .require_force_probe = true,
> > @@ -346,6 +350,7 @@ static const struct xe_device_desc bmg_desc = {
> > .dma_mask_size = 46,
> > .has_display = true,
> > .has_fan_control = true,
> > + .has_gsc_nvm = 1,
> > .has_heci_cscfi = 1,
> > .needs_scratch = true,
> > };
> > @@ -589,6 +594,7 @@ static int xe_info_init_early(struct xe_device *xe,
> > xe->info.dma_mask_size = desc->dma_mask_size;
> > xe->info.is_dgfx = desc->is_dgfx;
> > xe->info.has_fan_control = desc->has_fan_control;
> > + xe->info.has_gsc_nvm = desc->has_gsc_nvm;
> > xe->info.has_heci_gscfi = desc->has_heci_gscfi;
> > xe->info.has_heci_cscfi = desc->has_heci_cscfi;
> > xe->info.has_llc = desc->has_llc;
>
> Kind regards,
> Maarten Lankhorst
- -
Thanks,
Sasha
More information about the Intel-xe
mailing list