[PATCH v2] drm/xe: Support for mmap-ing mmio regions
Levi, Ilia
ilia.levi at intel.com
Tue Jun 10 09:38:40 UTC 2025
On 09/06/2025 17:20, Upadhyay, Tejas wrote:
>
>> -----Original Message-----
>> From: Intel-xe <intel-xe-bounces at lists.freedesktop.org> On Behalf Of Ilia Levi
>> Sent: 09 June 2025 15:30
>> To: intel-xe at lists.freedesktop.org
>> Cc: Levi, Ilia <ilia.levi at intel.com>; Elbaz, Koby <koby.elbaz at intel.com>;
>> Sinyuk, Konstantin <konstantin.sinyuk at intel.com>; Avizrat, Yaron
>> <yaron.avizrat at intel.com>; Haimovski, Moti <moti.haimovski at intel.com>;
>> Freiman, Didi <didi.freiman at intel.com>
>> Subject: [PATCH v2] drm/xe: Support for mmap-ing mmio regions
>>
>> Allow the driver to expose hardware register spaces to userspace through
>> GEM objects with fake mmap offsets. This can be useful for userspace-
>> firmware communication, debugging, etc.
>>
>> v2: Minor doc fix (CI)
>>
>> Signed-off-by: Ilia Levi <ilia.levi at intel.com>
>> ---
>> drivers/gpu/drm/xe/xe_device_types.h | 14 +++
>> drivers/gpu/drm/xe/xe_mmio.c | 142 +++++++++++++++++++++++++++
>> drivers/gpu/drm/xe/xe_mmio.h | 4 +
>> 3 files changed, 160 insertions(+)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_device_types.h
>> b/drivers/gpu/drm/xe/xe_device_types.h
>> index ac27389ccb8b..78542de0d48d 100644
>> --- a/drivers/gpu/drm/xe/xe_device_types.h
>> +++ b/drivers/gpu/drm/xe/xe_device_types.h
>> @@ -10,6 +10,7 @@
>>
>> #include <drm/drm_device.h>
>> #include <drm/drm_file.h>
>> +#include <drm/drm_gem.h>
>> #include <drm/drm_pagemap.h>
>> #include <drm/ttm/ttm_device.h>
>>
>> @@ -161,6 +162,19 @@ struct xe_mmio {
>> u32 adj_offset;
>> };
>>
>> +/**
>> + * struct xe_mmio_gem - GEM wrapper for xe_mmio
>> + *
>> + * A GEM object for exposing xe_mmio instance to userspace via mmap.
>> + */
>> +struct xe_mmio_gem {
>> + /** @base: GEM object base */
>> + struct drm_gem_object base;
>> +
>> + /** @mmio: The MMIO region to expose */
>> + struct xe_mmio mmio;
>> +};
>> +
>> /**
>> * struct xe_tile - hardware tile structure
>> *
>> diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
>> index 7357458bc0d2..6bfa915a9602 100644
>> --- a/drivers/gpu/drm/xe/xe_mmio.c
>> +++ b/drivers/gpu/drm/xe/xe_mmio.c
>> @@ -408,3 +408,145 @@ int xe_mmio_wait32_not(struct xe_mmio *mmio,
>> struct xe_reg reg, u32 mask, u32 va {
>> return __xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val,
>> atomic, false); }
>> +
>> +/**
>> + * DOC: Exposing MMIO regions to userspace
>> + *
>> + * In certain cases, the driver may allow userspace to mmap a portion of the
>> hardware registers.
>> + *
>> + * This can be done as follows:
>> + * 1. Define an xe_mmio instance that represents this portion.
>> + * 2. Call xe_mmio_gem_create() to create a GEM object with an mmap-able
>> fake offset.
>> + * 3. Use drm_vma_node_offset_addr() on the created GEM object to
>> retrieve the fake offset.
>> + * 4. Provide the fake offset to userspace.
>> + * 5. Userspace can call mmap with the fake offset. The length provided to
>> mmap
>> + * must match the size of the xe_mmio instance.
>> + * 6. When the region is no longer needed, call xe_mmio_gem_destroy() to
>> release the GEM object.
>> + *
>> + * Limitations: The exposed xe_mmio must be page-aligned with regards to
>> its BAR offset and size.
>> + *
>> + * WARNING: Exposing MMIO regions to userspace can have security and
>> stability implications.
>> + * Make sure not to expose any sensitive registers.
>> + */
>> +
>> +static void xe_mmio_gem_free(struct drm_gem_object *); static int
>> +xe_mmio_gem_mmap(struct drm_gem_object *, struct vm_area_struct *);
>> +
>> +static const struct vm_operations_struct vm_ops = {
>> + .open = drm_gem_vm_open,
>> + .close = drm_gem_vm_close,
> Should you remap in fault handler, to make sure mapping is there always?
Hmm, do you see any benefit for lazy mapping here?
When a user maps HW registers (presumably a page or so - not much more) I think it would be ok
to do the mapping immediately on mmap. But let me know if I'm missing something here.
- Ilia
>
>> +};
>> +
>> +static const struct drm_gem_object_funcs xe_mmio_gem_funcs = {
>> + .free = xe_mmio_gem_free,
>> + .mmap = xe_mmio_gem_mmap,
>> + .vm_ops = &vm_ops,
>> +};
>> +
>> +static inline struct xe_mmio_gem *to_xe_mmio_gem(struct drm_gem_object
>> +*obj) {
>> + return container_of(obj, struct xe_mmio_gem, base); }
>> +
>> +static inline phys_addr_t xe_mmio_phys_addr(struct xe_mmio *mmio) {
>> + struct xe_device *xe = tile_to_xe(mmio->tile);
>> +
>> + /*
>> + * All MMIO instances are currently on PCI BAR 0, so we can do the
>> trick below.
>> + * In the future we may want to store the physical address in struct
>> xe_mmio.
>> + */
>> + return pci_resource_start(to_pci_dev(xe->drm.dev),
>> GTTMMADR_BAR) +
>> + (uintptr_t)(mmio->regs - xe->mmio.regs); }
>> +
>> +/**
>> + * xe_mmio_gem_create - Expose an MMIO region to userspace
>> + * @mmio: xe_mmio instance
>> + * @file: DRM file descriptor
>> + *
>> + * This function creates a GEM object with an mmap-able fake offset
>> +that wraps
>> + * the provided xe_mmio instance.
>> + *
>> + * See: "Exposing MMIO regions to userspace"
>> + */
>> +struct xe_mmio_gem *
>> +xe_mmio_gem_create(struct xe_mmio *mmio, struct drm_file *file) {
>> + struct xe_device *xe = tile_to_xe(mmio->tile);
>> + size_t size = mmio->regs_size;
>> + struct xe_mmio_gem *obj;
>> + struct drm_gem_object *base;
>> + int err;
>> +
>> + if ((xe_mmio_phys_addr(mmio) % PAGE_SIZE != 0) || (size %
>> PAGE_SIZE != 0))
>> + return ERR_PTR(-EINVAL);
>> +
>> + obj = kzalloc(sizeof(*obj), GFP_KERNEL);
>> + if (!obj)
>> + return ERR_PTR(-ENOMEM);
>> +
>> + base = &obj->base;
>> + base->funcs = &xe_mmio_gem_funcs;
>> + obj->mmio = *mmio;
>> +
>> + drm_gem_private_object_init(&xe->drm, base, size);
>> +
>> + err = drm_gem_create_mmap_offset(base);
>> + if (err)
>> + goto free_gem;
>> +
>> + err = drm_vma_node_allow(&base->vma_node, file);
>> + if (err)
>> + goto free_gem;
>> +
>> + return obj;
>> +
>> +free_gem:
>> + xe_mmio_gem_free(base);
>> + return ERR_PTR(err);
>> +}
>> +
>> +static void xe_mmio_gem_free(struct drm_gem_object *base) {
>> + struct xe_mmio_gem *obj = to_xe_mmio_gem(base);
>> +
>> + drm_gem_object_release(base);
>> + kfree(obj);
>> +}
>> +
>> +/**
>> + * xe_mmio_gem_destroy - Destroy the GEM object wrapping xe_mmio
>> + * @gem: the GEM object to destroy
>> + *
>> + * This function releases resources associated with the GEM object
>> +created by
>> + * xe_mmio_gem_create().
>> + *
>> + * See: "Exposing MMIO regions to userspace"
>> + */
>> +void xe_mmio_gem_destroy(struct xe_mmio_gem *gem) {
>> + xe_mmio_gem_free(&gem->base);
>> +}
>> +
>> +static int xe_mmio_gem_mmap(struct drm_gem_object *base, struct
>> +vm_area_struct *vma) {
>> + struct xe_mmio_gem *obj = to_xe_mmio_gem(base);
>> + struct xe_mmio *mmio = &obj->mmio;
>> +
>> + if (vma->vm_end - vma->vm_start != base->size)
>> + return -EINVAL;
>> +
>> + /*
>> + * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map
>> the
>> + * whole buffer from the start.
>> + */
>> + vma->vm_pgoff = 0;
> I am not sure, but is it confirmed this offset wont be used by anything else?
Yes, I believe it is used to lookup the GEM object in drm_gem_mmap and that's it.
Also many (maybe all?) drivers do exactly this, for example see:
mtk_gem_object_mmap
exynos_drm_gem_mmap_buffer
rockchip_drm_gem_object_mmap
__tegra_gem_mmap
- Ilia
>> + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
>> +
>> + vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND |
>> VM_DONTDUMP |
>> + VM_DONTCOPY | VM_NORESERVE);
> Do you want to allow cow_mapping?
Good question... I probably should enforce that the user passed MAP_SHARED, thanks!
- Ilia
>
> Tejas
>> +
>> + return remap_pfn_range(vma, vma->vm_start,
>> xe_mmio_phys_addr(mmio) >> PAGE_SHIFT,
>> + base->size, vma->vm_page_prot); }
>> diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
>> index c151ba569003..2990bbcef24d 100644
>> --- a/drivers/gpu/drm/xe/xe_mmio.h
>> +++ b/drivers/gpu/drm/xe/xe_mmio.h
>> @@ -8,6 +8,7 @@
>>
>> #include "xe_gt_types.h"
>>
>> +struct drm_file;
>> struct xe_device;
>> struct xe_reg;
>>
>> @@ -42,4 +43,7 @@ static inline struct xe_mmio *xe_root_tile_mmio(struct
>> xe_device *xe)
>> return &xe->tiles[0].mmio;
>> }
>>
>> +struct xe_mmio_gem *xe_mmio_gem_create(struct xe_mmio *mmio, struct
>> +drm_file *file); void xe_mmio_gem_destroy(struct xe_mmio_gem *gem);
>> +
>> #endif
>> --
>> 2.43.0
More information about the Intel-xe
mailing list