[PATCH 1/8] drm/xe/svm: Remap and provide memmap backing for GPU vram
Oak Zeng
oak.zeng at intel.com
Tue Mar 19 02:55:04 UTC 2024
Memory remap GPU vram using devm_memremap_pages, so each GPU vram
page is backed by a struct page.
Those struct pages are created to allow hmm migrate buffer b/t
GPU vram and CPU system memory using existing Linux migration
mechanism (i.e., migrating b/t CPU system memory and hard disk).
This is prepare work to enable svm (shared virtual memory) through
Linux kernel hmm framework. The memory remap's page map type is set
to MEMORY_DEVICE_PRIVATE for now. This means even though each GPU
vram page get a struct page and can be mapped in CPU page table,
but such pages are treated as GPU's private resource, so CPU can't
access them. If CPU access such page, a page fault is triggered
and page will be migrate to system memory.
For GPU device which supports coherent memory protocol b/t CPU and
GPU (such as CXL and CAPI protocol), we can remap device memory as
MEMORY_DEVICE_COHERENT. This is TBD.
v1:
Changes per code review feedback from Matt:
change .o order in Makefile
fix indentation
change code order in mmio_fini
remove unnecessary header file
uniform xe_svm_devm_add/_remove parameter
use tile (vs dev) as pagemap.owner during memremap
only remap vram for platform that support usm
Changes per review feedback from Brian:
s/xe_svm_devm_add/xe_devm_add
s/xe_svm_devm_remove/xe_devm_remove
move calling of xe_devm_add to xe_tile.c
Signed-off-by: Oak Zeng <oak.zeng at intel.com>
Co-developed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura at intel.com>
Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura at intel.com>
Cc: Matthew Brost <matthew.brost at intel.com>
Cc: Thomas Hellström <thomas.hellstrom at intel.com>
Cc: Brian Welty <brian.welty at intel.com>
---
drivers/gpu/drm/xe/Makefile | 1 +
drivers/gpu/drm/xe/xe_device_types.h | 8 +++
drivers/gpu/drm/xe/xe_mmio.c | 6 ++
drivers/gpu/drm/xe/xe_svm.h | 15 +++++
drivers/gpu/drm/xe/xe_svm_devmem.c | 89 ++++++++++++++++++++++++++++
drivers/gpu/drm/xe/xe_tile.c | 4 ++
6 files changed, 123 insertions(+)
create mode 100644 drivers/gpu/drm/xe/xe_svm.h
create mode 100644 drivers/gpu/drm/xe/xe_svm_devmem.c
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 3c3e67885559..e2ec6d1375c0 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -128,6 +128,7 @@ xe-y += xe_bb.o \
xe_sa.o \
xe_sched_job.o \
xe_step.o \
+ xe_svm_devmem.o \
xe_sync.o \
xe_tile.o \
xe_tile_sysfs.o \
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 9785eef2e5a4..607b61326c9a 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -99,6 +99,14 @@ struct xe_mem_region {
resource_size_t actual_physical_size;
/** @mapping: pointer to VRAM mappable space */
void __iomem *mapping;
+ /** @pagemap: Used to remap device memory as ZONE_DEVICE */
+ struct dev_pagemap pagemap;
+ /**
+ * @hpa_base: base host physical address
+ *
+ * This is generated when remap device memory as ZONE_DEVICE
+ */
+ resource_size_t hpa_base;
};
/**
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index 7ba2477452d7..525254d9369e 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -22,6 +22,7 @@
#include "xe_module.h"
#include "xe_sriov.h"
#include "xe_tile.h"
+#include "xe_svm.h"
#define XEHP_MTCFG_ADDR XE_REG(0x101800)
#define TILE_COUNT REG_GENMASK(15, 8)
@@ -354,6 +355,11 @@ void xe_mmio_probe_tiles(struct xe_device *xe)
static void mmio_fini(struct drm_device *drm, void *arg)
{
struct xe_device *xe = arg;
+ struct xe_tile *tile;
+ u8 id;
+
+ for_each_tile(tile, xe, id)
+ xe_devm_remove(tile, &tile->mem.vram);
pci_iounmap(to_pci_dev(xe->drm.dev), xe->mmio.regs);
if (xe->mem.vram.mapping)
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
new file mode 100644
index 000000000000..e944971cfc6d
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __XE_SVM_H
+#define __XE_SVM_H
+
+struct xe_tile;
+struct xe_mem_region;
+
+int xe_devm_add(struct xe_tile *tile, struct xe_mem_region *mr);
+void xe_devm_remove(struct xe_tile *tile, struct xe_mem_region *mr);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_svm_devmem.c b/drivers/gpu/drm/xe/xe_svm_devmem.c
new file mode 100644
index 000000000000..f5fa07150874
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_svm_devmem.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/mm_types.h>
+#include <linux/sched/mm.h>
+
+#include "xe_device_types.h"
+#include "xe_svm.h"
+
+
+static vm_fault_t xe_devm_migrate_to_ram(struct vm_fault *vmf)
+{
+ return 0;
+}
+
+static void xe_devm_page_free(struct page *page)
+{
+}
+
+static const struct dev_pagemap_ops xe_devm_pagemap_ops = {
+ .page_free = xe_devm_page_free,
+ .migrate_to_ram = xe_devm_migrate_to_ram,
+};
+
+/**
+ * xe_devm_add: Remap and provide memmap backing for device memory
+ * @tile: tile that the memory region blongs to
+ * @mr: memory region to remap
+ *
+ * This remap device memory to host physical address space and create
+ * struct page to back device memory
+ *
+ * Return: 0 on success standard error code otherwise
+ */
+int xe_devm_add(struct xe_tile *tile, struct xe_mem_region *mr)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct device *dev = &to_pci_dev(xe->drm.dev)->dev;
+ struct resource *res;
+ void *addr;
+ int ret;
+
+ res = devm_request_free_mem_region(dev, &iomem_resource,
+ mr->usable_size);
+ if (IS_ERR(res)) {
+ ret = PTR_ERR(res);
+ return ret;
+ }
+
+ mr->pagemap.type = MEMORY_DEVICE_PRIVATE;
+ mr->pagemap.range.start = res->start;
+ mr->pagemap.range.end = res->end;
+ mr->pagemap.nr_range = 1;
+ mr->pagemap.ops = &xe_devm_pagemap_ops;
+ mr->pagemap.owner = xe;
+ addr = devm_memremap_pages(dev, &mr->pagemap);
+ if (IS_ERR(addr)) {
+ devm_release_mem_region(dev, res->start, resource_size(res));
+ ret = PTR_ERR(addr);
+ drm_err(&xe->drm, "Failed to remap tile %d memory, errno %d\n",
+ tile->id, ret);
+ return ret;
+ }
+ mr->hpa_base = res->start;
+
+ drm_info(&xe->drm, "Added tile %d memory [%llx-%llx] to devm, remapped to %pr\n",
+ tile->id, mr->io_start, mr->io_start + mr->usable_size, res);
+ return 0;
+}
+
+/**
+ * xe_devm_remove: Unmap device memory and free resources
+ * @tile: xe tile
+ * @mr: memory region to remove
+ */
+void xe_devm_remove(struct xe_tile *tile, struct xe_mem_region *mr)
+{
+ struct device *dev = &to_pci_dev(tile->xe->drm.dev)->dev;
+
+ /*FIXME: Does below cause a kernel hange during moduel remove?*/
+ if (mr->hpa_base) {
+ devm_memunmap_pages(dev, &mr->pagemap);
+ devm_release_mem_region(dev, mr->pagemap.range.start,
+ mr->pagemap.range.end - mr->pagemap.range.start +1);
+ }
+}
+
diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c
index 0650b2fa75ef..f1c4f9de51df 100644
--- a/drivers/gpu/drm/xe/xe_tile.c
+++ b/drivers/gpu/drm/xe/xe_tile.c
@@ -14,6 +14,7 @@
#include "xe_tile_sysfs.h"
#include "xe_ttm_vram_mgr.h"
#include "xe_wa.h"
+#include "xe_svm.h"
/**
* DOC: Multi-tile Design
@@ -158,6 +159,7 @@ static int tile_ttm_mgr_init(struct xe_tile *tile)
*/
int xe_tile_init_noalloc(struct xe_tile *tile)
{
+ struct xe_device *xe = tile_to_xe(tile);
int err;
xe_device_mem_access_get(tile_to_xe(tile));
@@ -175,6 +177,8 @@ int xe_tile_init_noalloc(struct xe_tile *tile)
xe_tile_sysfs_init(tile);
+ if (xe->info.has_usm)
+ xe_devm_add(tile, &tile->mem.vram);
err_mem_access:
xe_device_mem_access_put(tile_to_xe(tile));
return err;
--
2.26.3
More information about the Intel-xe
mailing list