[PATCH v5 21/32] drm/xe: Add SVM device memory mirroring
Matthew Brost
matthew.brost at intel.com
Thu Feb 13 02:11:01 UTC 2025
Add SVM device memory mirroring which enables device pages for
migration. Enabled via CONFIG_XE_DEVMEM_MIRROR Kconfig. Kconfig option
defaults to enabled. If not enabled, SVM will work sans migration and
KMD memory footprint will be less.
v3:
- Add CONFIG_XE_DEVMEM_MIRROR
v4:
- Fix Kconfig (Himal)
- Use %pe to print errors (Thomas)
- Fix alignment issue (Checkpatch)
v5:
- s/xe_mem_region/xe_vram_region (Rebase)
Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura at intel.com>
Signed-off-by: Oak Zeng <oak.zeng at intel.com>
Signed-off-by: Matthew Brost <matthew.brost at intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
---
drivers/gpu/drm/xe/Kconfig | 9 ++++
drivers/gpu/drm/xe/xe_device_types.h | 8 ++++
drivers/gpu/drm/xe/xe_svm.c | 62 +++++++++++++++++++++++++++-
drivers/gpu/drm/xe/xe_svm.h | 3 ++
drivers/gpu/drm/xe/xe_tile.c | 5 +++
5 files changed, 85 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index 60b922f75001..4bc03d6f6720 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -74,6 +74,15 @@ config DRM_XE_DP_TUNNEL
If in doubt say "Y".
+config DRM_XE_DEVMEM_MIRROR
+ bool "Enable device memory mirror"
+ depends on DRM_XE
+ select GET_FREE_REGION
+ default y
+ help
+ Disable this option only if you want to compile out without device
+ memory mirror. Will reduce KMD memory footprint when disabled.
+
config DRM_XE_FORCE_PROBE
string "Force probe xe for selected Intel hardware IDs"
depends on DRM_XE
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 6a41f608a7a1..00873d80b10d 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -106,6 +106,14 @@ struct xe_vram_region {
resource_size_t actual_physical_size;
/** @mapping: pointer to VRAM mappable space */
void __iomem *mapping;
+ /** @pagemap: Used to remap device memory as ZONE_DEVICE */
+ struct dev_pagemap pagemap;
+ /**
+ * @hpa_base: base host physical address
+ *
+ * This is generated when remap device memory as ZONE_DEVICE
+ */
+ resource_size_t hpa_base;
/** @ttm: VRAM TTM manager */
struct xe_ttm_vram_mgr ttm;
};
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 7b1076c184e3..b1947c75c249 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -29,6 +29,11 @@ static unsigned long xe_svm_range_end(struct xe_svm_range *range)
return drm_gpusvm_range_end(&range->base);
}
+static void *xe_svm_devm_owner(struct xe_device *xe)
+{
+ return xe;
+}
+
static struct drm_gpusvm_range *
xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
{
@@ -317,8 +322,8 @@ int xe_svm_init(struct xe_vm *vm)
xe_svm_garbage_collector_work_func);
err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
- current->mm, NULL, 0, vm->size,
- SZ_512M, &gpusvm_ops, fault_chunk_sizes,
+ current->mm, xe_svm_devm_owner(vm->xe), 0,
+ vm->size, SZ_512M, &gpusvm_ops, fault_chunk_sizes,
ARRAY_SIZE(fault_chunk_sizes));
if (err)
return err;
@@ -453,3 +458,56 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
{
return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
}
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
+/**
+ * xe_devm_add: Remap and provide memmap backing for device memory
+ * @tile: tile that the memory region belongs to
+ * @vr: vram memory region to remap
+ *
+ * This remap device memory to host physical address space and create
+ * struct page to back device memory
+ *
+ * Return: 0 on success standard error code otherwise
+ */
+int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct device *dev = &to_pci_dev(xe->drm.dev)->dev;
+ struct resource *res;
+ void *addr;
+ int ret;
+
+ res = devm_request_free_mem_region(dev, &iomem_resource,
+ vr->usable_size);
+ if (IS_ERR(res)) {
+ ret = PTR_ERR(res);
+ return ret;
+ }
+
+ vr->pagemap.type = MEMORY_DEVICE_PRIVATE;
+ vr->pagemap.range.start = res->start;
+ vr->pagemap.range.end = res->end;
+ vr->pagemap.nr_range = 1;
+ vr->pagemap.ops = drm_gpusvm_pagemap_ops_get();
+ vr->pagemap.owner = xe_svm_devm_owner(xe);
+ addr = devm_memremap_pages(dev, &vr->pagemap);
+ if (IS_ERR(addr)) {
+ devm_release_mem_region(dev, res->start, resource_size(res));
+ ret = PTR_ERR(addr);
+ drm_err(&xe->drm, "Failed to remap tile %d memory, errno %pe\n",
+ tile->id, ERR_PTR(ret));
+ return ret;
+ }
+ vr->hpa_base = res->start;
+
+ drm_info(&xe->drm, "Added tile %d memory [%llx-%llx] to devm, remapped to %pr\n",
+ tile->id, vr->io_start, vr->io_start + vr->usable_size, res);
+ return 0;
+}
+#else
+int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
+{
+ return 0;
+}
+#endif
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index 50d47024d2db..0fa525d34987 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -11,6 +11,7 @@
#define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
+struct xe_vram_region;
struct xe_tile;
struct xe_vm;
struct xe_vma;
@@ -36,6 +37,8 @@ struct xe_svm_range {
u8 tile_invalidated;
};
+int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr);
+
int xe_svm_init(struct xe_vm *vm);
void xe_svm_fini(struct xe_vm *vm);
diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c
index d9a7a04ff652..51cda137cfbc 100644
--- a/drivers/gpu/drm/xe/xe_tile.c
+++ b/drivers/gpu/drm/xe/xe_tile.c
@@ -13,6 +13,7 @@
#include "xe_migrate.h"
#include "xe_pcode.h"
#include "xe_sa.h"
+#include "xe_svm.h"
#include "xe_tile.h"
#include "xe_tile_sysfs.h"
#include "xe_ttm_vram_mgr.h"
@@ -160,6 +161,7 @@ static int tile_ttm_mgr_init(struct xe_tile *tile)
*/
int xe_tile_init_noalloc(struct xe_tile *tile)
{
+ struct xe_device *xe = tile_to_xe(tile);
int err;
err = tile_ttm_mgr_init(tile);
@@ -168,6 +170,9 @@ int xe_tile_init_noalloc(struct xe_tile *tile)
xe_wa_apply_tile_workarounds(tile);
+ if (xe->info.has_usm && IS_DGFX(xe))
+ xe_devm_add(tile, &tile->mem.vram);
+
err = xe_tile_sysfs_init(tile);
return 0;
--
2.34.1
More information about the Intel-xe
mailing list