[PATCH 4/9] drm/xe: avoid accessing internals of iosys_map
Lucas De Marchi
lucas.demarchi at intel.com
Thu May 22 13:56:54 UTC 2025
On Thu, May 22, 2025 at 04:52:13PM +1000, Dave Airlie wrote:
>From: Dave Airlie <airlied at redhat.com>
>
>This uses the new accessors to avoid touch iosys_map internals.
>
>Signed-off-by: Dave Airlie <airlied at redhat.com>
>---
> drivers/gpu/drm/xe/display/intel_fbdev_fb.c | 2 +-
> drivers/gpu/drm/xe/xe_bo.c | 8 ++++----
> drivers/gpu/drm/xe/xe_eu_stall.c | 2 +-
> drivers/gpu/drm/xe/xe_guc_pc.c | 2 +-
> drivers/gpu/drm/xe/xe_map.h | 12 ++++++------
> drivers/gpu/drm/xe/xe_memirq.c | 16 ++++++++--------
> drivers/gpu/drm/xe/xe_oa.c | 4 ++--
> drivers/gpu/drm/xe/xe_pt.c | 4 ++--
> drivers/gpu/drm/xe/xe_sa.c | 8 ++++----
> 9 files changed, 29 insertions(+), 29 deletions(-)
>
>diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
>index e8191562d122..ad2681c90efb 100644
>--- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
>+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
>@@ -101,7 +101,7 @@ int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info
> }
> XE_WARN_ON(iosys_map_is_null(&obj->vmap));
>
>- info->screen_base = obj->vmap.vaddr_iomem;
>+ info->screen_base = iosys_map_ioptr(&obj->vmap);
> info->screen_size = obj->ttm.base.size;
>
> return 0;
>diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
>index d99d91fe8aa9..c83a54708495 100644
>--- a/drivers/gpu/drm/xe/xe_bo.c
>+++ b/drivers/gpu/drm/xe/xe_bo.c
>@@ -1249,7 +1249,7 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
> unmap = true;
> }
>
>- xe_map_memcpy_from(xe, backup->vmap.vaddr, &bo->vmap, 0,
>+ xe_map_memcpy_from(xe, iosys_map_ptr(&backup->vmap), &bo->vmap, 0,
> bo->size);
> }
>
>@@ -1342,7 +1342,7 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
> unmap = true;
> }
>
>- xe_map_memcpy_to(xe, &bo->vmap, 0, backup->vmap.vaddr,
>+ xe_map_memcpy_to(xe, &bo->vmap, 0, iosys_map_ptr(&backup->vmap),
> bo->size);
> }
>
>@@ -2226,9 +2226,9 @@ int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, str
> XE_BO_FLAG_PINNED_NORESTORE);
>
> xe_assert(xe, IS_DGFX(xe));
>- xe_assert(xe, !(*src)->vmap.is_iomem);
>+ xe_assert(xe, !iosys_map_is_iomem(&(*src)->vmap));
>
>- bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr,
>+ bo = xe_managed_bo_create_from_data(xe, tile, iosys_map_ptr(&(*src)->vmap),
> (*src)->size, dst_flags);
> if (IS_ERR(bo))
> return PTR_ERR(bo);
>diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c
>index 96732613b4b7..d8f900efac95 100644
>--- a/drivers/gpu/drm/xe/xe_eu_stall.c
>+++ b/drivers/gpu/drm/xe/xe_eu_stall.c
>@@ -741,7 +741,7 @@ static int xe_eu_stall_stream_init(struct xe_eu_stall_data_stream *stream,
> for_each_dss_steering(xecore, gt, group, instance) {
> xecore_buf = &stream->xecore_buf[xecore];
> vaddr_offset = xecore * stream->per_xecore_buf_size;
>- xecore_buf->vaddr = stream->bo->vmap.vaddr + vaddr_offset;
>+ xecore_buf->vaddr = iosys_map_ptr(&stream->bo->vmap) + vaddr_offset;
> }
> return 0;
> }
>diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
>index 18c623992035..c7ad56774c99 100644
>--- a/drivers/gpu/drm/xe/xe_guc_pc.c
>+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
>@@ -1068,7 +1068,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
> goto out;
> }
>
>- memset(pc->bo->vmap.vaddr, 0, size);
>+ memset(iosys_map_ptr(&pc->bo->vmap), 0, size);
this is wrong, we need to move it to iosys_map_memset().
> slpc_shared_data_write(pc, header.size, size);
>
> earlier = ktime_get();
>diff --git a/drivers/gpu/drm/xe/xe_map.h b/drivers/gpu/drm/xe/xe_map.h
>index f62e0c8b67ab..37842c02c7f9 100644
>--- a/drivers/gpu/drm/xe/xe_map.h
>+++ b/drivers/gpu/drm/xe/xe_map.h
>@@ -49,10 +49,10 @@ static inline u32 xe_map_read32(struct xe_device *xe, struct iosys_map *map)
> {
> xe_device_assert_mem_access(xe);
>
>- if (map->is_iomem)
>- return readl(map->vaddr_iomem);
>+ if (iosys_map_is_iomem(map))
>+ return readl(iosys_map_ioptr(map));
> else
>- return READ_ONCE(*(u32 *)map->vaddr);
>+ return READ_ONCE(*(u32 *)iosys_map_ptr(map));
we added this because of the mem_access. But I have no idea why exactly
this is hand rolling the read.
It seems we'd benefit from also having fixed-types (at least u32) in
iosys_map so drivers don't seem tempted to do this.
Lucas De Marchi
> }
>
> static inline void xe_map_write32(struct xe_device *xe, struct iosys_map *map,
>@@ -60,10 +60,10 @@ static inline void xe_map_write32(struct xe_device *xe, struct iosys_map *map,
> {
> xe_device_assert_mem_access(xe);
>
>- if (map->is_iomem)
>- writel(val, map->vaddr_iomem);
>+ if (iosys_map_is_iomem(map))
>+ writel(val, iosys_map_ioptr(map));
> else
>- *(u32 *)map->vaddr = val;
>+ *(u32 *)iosys_map_ptr(map) = val;
> }
>
> #define xe_map_rd(xe__, map__, offset__, type__) ({ \
>diff --git a/drivers/gpu/drm/xe/xe_memirq.c b/drivers/gpu/drm/xe/xe_memirq.c
>index 49c45ec3e83c..458955c75e04 100644
>--- a/drivers/gpu/drm/xe/xe_memirq.c
>+++ b/drivers/gpu/drm/xe/xe_memirq.c
>@@ -198,9 +198,9 @@ static int memirq_alloc_pages(struct xe_memirq *memirq)
> memirq->status = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_STATUS_OFFSET(0));
> memirq->mask = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_ENABLE_OFFSET);
>
>- memirq_assert(memirq, !memirq->source.is_iomem);
>- memirq_assert(memirq, !memirq->status.is_iomem);
>- memirq_assert(memirq, !memirq->mask.is_iomem);
>+ memirq_assert(memirq, !iosys_map_is_iomem(&memirq->source));
>+ memirq_assert(memirq, !iosys_map_is_iomem(&memirq->status));
>+ memirq_assert(memirq, !iosys_map_is_iomem(&memirq->mask));
>
> memirq_debug(memirq, "page offsets: bo %#x bo_size %zu source %#x status %#x\n",
> xe_bo_ggtt_addr(bo), bo_size, XE_MEMIRQ_SOURCE_OFFSET(0),
>@@ -418,7 +418,7 @@ static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector,
> static void memirq_dispatch_engine(struct xe_memirq *memirq, struct iosys_map *status,
> struct xe_hw_engine *hwe)
> {
>- memirq_debug(memirq, "STATUS %s %*ph\n", hwe->name, 16, status->vaddr);
>+ memirq_debug(memirq, "STATUS %s %*ph\n", hwe->name, 16, iosys_map_ptr(status));
>
> if (memirq_received(memirq, status, ilog2(GT_RENDER_USER_INTERRUPT), hwe->name))
> xe_hw_engine_handle_irq(hwe, GT_RENDER_USER_INTERRUPT);
>@@ -429,7 +429,7 @@ static void memirq_dispatch_guc(struct xe_memirq *memirq, struct iosys_map *stat
> {
> const char *name = guc_name(guc);
>
>- memirq_debug(memirq, "STATUS %s %*ph\n", name, 16, status->vaddr);
>+ memirq_debug(memirq, "STATUS %s %*ph\n", name, 16, iosys_map_ptr(status));
>
> if (memirq_received(memirq, status, ilog2(GUC_INTR_GUC2HOST), name))
> xe_guc_irq_handler(guc, GUC_INTR_GUC2HOST);
>@@ -479,9 +479,9 @@ void xe_memirq_handler(struct xe_memirq *memirq)
> if (!memirq->bo)
> return;
>
>- memirq_assert(memirq, !memirq->source.is_iomem);
>- memirq_debug(memirq, "SOURCE %*ph\n", 32, memirq->source.vaddr);
>- memirq_debug(memirq, "SOURCE %*ph\n", 32, memirq->source.vaddr + 32);
>+ memirq_assert(memirq, !iosys_map_is_iomem(&memirq->source));
>+ memirq_debug(memirq, "SOURCE %*ph\n", 32, iosys_map_ptr(&memirq->source));
>+ memirq_debug(memirq, "SOURCE %*ph\n", 32, iosys_map_ptr(&memirq->source) + 32);
>
> for_each_gt(gt, xe, gtid) {
> if (gt->tile != tile)
>diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
>index fb842fa0552e..99424d790d84 100644
>--- a/drivers/gpu/drm/xe/xe_oa.c
>+++ b/drivers/gpu/drm/xe/xe_oa.c
>@@ -880,8 +880,8 @@ static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream, size_t size)
>
> stream->oa_buffer.bo = bo;
> /* mmap implementation requires OA buffer to be in system memory */
>- xe_assert(stream->oa->xe, bo->vmap.is_iomem == 0);
>- stream->oa_buffer.vaddr = bo->vmap.vaddr;
>+ xe_assert(stream->oa->xe, iosys_map_is_iomem(&bo->vmap) == 0);
>+ stream->oa_buffer.vaddr = iosys_map_ptr(&bo->vmap);
> return 0;
> }
>
>diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
>index b42cf5d1b20c..af0992aea6b4 100644
>--- a/drivers/gpu/drm/xe/xe_pt.c
>+++ b/drivers/gpu/drm/xe/xe_pt.c
>@@ -1723,12 +1723,12 @@ xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update,
> u64 empty = __xe_pt_empty_pte(tile, vm, update->pt->level);
> int i;
>
>- if (map && map->is_iomem)
>+ if (map && iosys_map_is_iomem(map))
> for (i = 0; i < num_qwords; ++i)
> xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) *
> sizeof(u64), u64, empty);
> else if (map)
>- memset64(map->vaddr + qword_ofs * sizeof(u64), empty,
>+ memset64(iosys_map_ptr(map) + qword_ofs * sizeof(u64), empty,
> num_qwords);
> else
> memset64(ptr, empty, num_qwords);
>diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c
>index 1d43e183ca21..4ac335c68242 100644
>--- a/drivers/gpu/drm/xe/xe_sa.c
>+++ b/drivers/gpu/drm/xe/xe_sa.c
>@@ -68,15 +68,15 @@ struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u3
> return ERR_CAST(bo);
> }
> sa_manager->bo = bo;
>- sa_manager->is_iomem = bo->vmap.is_iomem;
>+ sa_manager->is_iomem = iosys_map_is_iomem(&bo->vmap);
> sa_manager->gpu_addr = xe_bo_ggtt_addr(bo);
>
>- if (bo->vmap.is_iomem) {
>+ if (iosys_map_is_iomem(&bo->vmap)) {
> sa_manager->cpu_ptr = kvzalloc(managed_size, GFP_KERNEL);
> if (!sa_manager->cpu_ptr)
> return ERR_PTR(-ENOMEM);
> } else {
>- sa_manager->cpu_ptr = bo->vmap.vaddr;
>+ sa_manager->cpu_ptr = iosys_map_ptr(&bo->vmap);
> memset(sa_manager->cpu_ptr, 0, bo->ttm.base.size);
> }
>
>@@ -116,7 +116,7 @@ void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo)
> struct xe_sa_manager *sa_manager = to_xe_sa_manager(sa_bo->manager);
> struct xe_device *xe = tile_to_xe(sa_manager->bo->tile);
>
>- if (!sa_manager->bo->vmap.is_iomem)
>+ if (!iosys_map_is_iomem(&sa_manager->bo->vmap))
> return;
>
> xe_map_memcpy_to(xe, &sa_manager->bo->vmap, drm_suballoc_soffset(sa_bo),
>--
>2.49.0
>
More information about the dri-devel
mailing list