[Intel-xe] [PATCH 1/2] drm/xe: Make xe_mem_region struct
Matthew Brost
matthew.brost at intel.com
Fri Aug 11 18:21:39 UTC 2023
On Thu, Aug 10, 2023 at 12:08:14PM -0400, Oak Zeng wrote:
> Make a xe_mem_region structure which will be used in the
> coming patches. The new structure is used in both xe device
> level (xe->mem.vram) and xe_tile level (tile->vram).
>
> Make the definition of xe_mem_region.dpa_base to be the DPA
> base of this memory region and change codes according to
> this new definition.
>
> v1:
> - rename xe_mem_region.base to dpa_base per conversation with Mike
> Ruhl
>
> Signed-off-by: Oak Zeng <oak.zeng at intel.com>
Reviewed-by: Matthew Brost <matthew.brost at intel.com>
> ---
> drivers/gpu/drm/xe/xe_bo.c | 2 +-
> drivers/gpu/drm/xe/xe_device_types.h | 94 ++++++++++++----------------
> drivers/gpu/drm/xe/xe_migrate.c | 2 +-
> drivers/gpu/drm/xe/xe_mmio.c | 9 +--
> 4 files changed, 47 insertions(+), 60 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
> index 284c86107a5f..480f4e59f891 100644
> --- a/drivers/gpu/drm/xe/xe_bo.c
> +++ b/drivers/gpu/drm/xe/xe_bo.c
> @@ -1457,7 +1457,7 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res)
> if (res->mem_type == XE_PL_STOLEN)
> return xe_ttm_stolen_gpu_offset(xe);
>
> - return xe->mem.vram.base + tile->mem.vram.base;
> + return tile->mem.vram.dpa_base;
> }
>
> /**
> diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
> index bfedcc7571b0..8e45724f3d2f 100644
> --- a/drivers/gpu/drm/xe/xe_device_types.h
> +++ b/drivers/gpu/drm/xe/xe_device_types.h
> @@ -56,6 +56,44 @@ struct xe_ggtt;
> const struct xe_tile * : (const struct xe_device *)((tile__)->xe), \
> struct xe_tile * : (tile__)->xe)
>
> +/**
> + * struct xe_mem_region - memory region structure
> + * This is used to describe a memory region in xe
> + * device, such as HBM memory or CXL extension memory.
> + */
> +struct xe_mem_region {
> + /** @io_start: IO start address of this VRAM instance */
> + resource_size_t io_start;
> + /**
> + * @io_size: IO size of this VRAM instance
> + *
> + * This represents how much of this VRAM we can access
> + * via the CPU through the VRAM BAR. This can be smaller
> + * than @usable_size, in which case only part of VRAM is CPU
> + * accessible (typically the first 256M). This
> + * configuration is known as small-bar.
> + */
> + resource_size_t io_size;
> + /** @base: This memory regions's DPA (device physical address) base */
> + resource_size_t dpa_base;
> + /**
> + * @usable_size: usable size of VRAM
> + *
> + * Usable size of VRAM excluding reserved portions
> + * (e.g stolen mem)
> + */
> + resource_size_t usable_size;
> + /**
> + * @actual_physical_size: Actual VRAM size
> + *
> + * Actual VRAM size including reserved portions
> + * (e.g stolen mem)
> + */
> + resource_size_t actual_physical_size;
> + /** @mapping: pointer to VRAM mappable space */
> + void *__iomem mapping;
> +};
> +
> /**
> * struct xe_tile - hardware tile structure
> *
> @@ -113,38 +151,7 @@ struct xe_tile {
> * Although VRAM is associated with a specific tile, it can
> * still be accessed by all tiles' GTs.
> */
> - struct {
> - /** @io_start: IO start address of this VRAM instance */
> - resource_size_t io_start;
> - /**
> - * @io_size: IO size of this VRAM instance
> - *
> - * This represents how much of this VRAM we can access
> - * via the CPU through the VRAM BAR. This can be smaller
> - * than @size, in which case only part of VRAM is CPU
> - * accessible (typically the first 256M). This
> - * configuration is known as small-bar.
> - */
> - resource_size_t io_size;
> - /** @base: offset of VRAM starting base */
> - resource_size_t base;
> - /**
> - * @usable_size: usable size of VRAM
> - *
> - * Usable size of VRAM excluding reserved portions
> - * (e.g stolen mem)
> - */
> - resource_size_t usable_size;
> - /**
> - * @actual_physical_size: Actual VRAM size
> - *
> - * Actual VRAM size including reserved portions
> - * (e.g stolen mem)
> - */
> - resource_size_t actual_physical_size;
> - /** @mapping: pointer to VRAM mappable space */
> - void *__iomem mapping;
> - } vram;
> + struct xe_mem_region vram;
>
> /** @vram_mgr: VRAM TTM manager */
> struct xe_ttm_vram_mgr *vram_mgr;
> @@ -263,28 +270,7 @@ struct xe_device {
> /** @mem: memory info for device */
> struct {
> /** @vram: VRAM info for device */
> - struct {
> - /** @io_start: IO start address of VRAM */
> - resource_size_t io_start;
> - /**
> - * @io_size: IO size of VRAM.
> - *
> - * This represents how much of VRAM the CPU can access
> - * via the VRAM BAR.
> - * On systems that do not support large BAR IO space,
> - * this can be smaller than the actual memory size, in
> - * which case only part of VRAM is CPU accessible
> - * (typically the first 256M). This configuration is
> - * known as small-bar.
> - */
> - resource_size_t io_size;
> - /** @size: Total size of VRAM */
> - resource_size_t size;
> - /** @base: Offset to apply for Device Physical Address control */
> - resource_size_t base;
> - /** @mapping: pointer to VRAM mappable space */
> - void *__iomem mapping;
> - } vram;
> + struct xe_mem_region vram;
> /** @sys_mgr: system TTM manager */
> struct ttm_resource_manager sys_mgr;
> } mem;
> diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
> index 18c94022930f..956a96b38346 100644
> --- a/drivers/gpu/drm/xe/xe_migrate.c
> +++ b/drivers/gpu/drm/xe/xe_migrate.c
> @@ -264,7 +264,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
> * Use 1GB pages, it shouldn't matter the physical amount of
> * vram is less, when we don't access it.
> */
> - for (pos = 0; pos < xe->mem.vram.size; pos += SZ_1G, ofs += 8)
> + for (pos = 0; pos < xe->mem.vram.actual_physical_size; pos += SZ_1G, ofs += 8)
> xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
> }
>
> diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
> index aa9c573b1243..41ee89247ddb 100644
> --- a/drivers/gpu/drm/xe/xe_mmio.c
> +++ b/drivers/gpu/drm/xe/xe_mmio.c
> @@ -173,7 +173,8 @@ static int xe_determine_lmem_bar_size(struct xe_device *xe)
> if (!xe->mem.vram.io_size)
> return -EIO;
>
> - xe->mem.vram.base = 0; /* DPA offset */
> + /* XXX: Need to change when xe link code is ready */
> + xe->mem.vram.dpa_base = 0;
>
> /* set up a map to the total memory area. */
> xe->mem.vram.mapping = ioremap_wc(xe->mem.vram.io_start, xe->mem.vram.io_size);
> @@ -281,7 +282,7 @@ int xe_mmio_probe_vram(struct xe_device *xe)
> return -ENODEV;
> }
>
> - tile->mem.vram.base = tile_offset;
> + tile->mem.vram.dpa_base = xe->mem.vram.dpa_base + tile_offset;
> tile->mem.vram.usable_size = vram_size;
> tile->mem.vram.mapping = xe->mem.vram.mapping + tile_offset;
>
> @@ -304,10 +305,10 @@ int xe_mmio_probe_vram(struct xe_device *xe)
> io_size -= min_t(u64, tile_size, io_size);
> }
>
> - xe->mem.vram.size = total_size;
> + xe->mem.vram.actual_physical_size = total_size;
>
> drm_info(&xe->drm, "Total VRAM: %pa, %pa\n", &xe->mem.vram.io_start,
> - &xe->mem.vram.size);
> + &xe->mem.vram.actual_physical_size);
> drm_info(&xe->drm, "Available VRAM: %pa, %pa\n", &xe->mem.vram.io_start,
> &available_size);
>
> --
> 2.26.3
>
More information about the Intel-xe
mailing list