[CI 36/42] drm/xe/svm: introduce svm migration function
Oak Zeng
oak.zeng at intel.com
Thu Jun 13 04:24:23 UTC 2024
xe_svm_migrate is introduced to migrate b/t two migration
vectors. This will be a callback function registered to
drm layer for svm memory migration.
Cc: Thomas Hellström <thomas.hellstrom at linux.intel.com>
Cc: Matthew Brost <matthew.brost at intel.com>
Cc: Brian Welty <brian.welty at intel.com>
Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
Signed-off-by: Oak Zeng <oak.zeng at intel.com>
---
drivers/gpu/drm/xe/xe_svm.c | 46 +++++++++++++++++++++++++++++++++++++
drivers/gpu/drm/xe/xe_svm.h | 3 +++
2 files changed, 49 insertions(+)
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index d2838ce46eaf..014b70250abd 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -7,8 +7,10 @@
#include <linux/sched/mm.h>
#include <linux/gfp.h>
#include <drm/drm_buddy.h>
+#include <drm/drm_svm.h>
#include "xe_ttm_vram_mgr_types.h"
#include "xe_device_types.h"
+#include "xe_migrate.h"
#include "xe_device.h"
#include "xe_tile.h"
#include "xe_svm.h"
@@ -154,3 +156,47 @@ void* xe_svm_mem_region_to_pagemap_owner(struct drm_mem_region *mr)
return tile->xe;
}
+
+/**
+ * xe_svm_migrate() - migrate b/t two migration vectors.
+ * @src_vec: source migration vector
+ * @dst_vec: dst migration vector
+ *
+ * For now this function only support migration b/t system memory
+ * and device memory. It can be extended to support migration b/t
+ * two device memory regions.
+ *
+ * FIXME: right now it requires vram dpa to be physically contiguous.
+ * This is guaranteed by the DRM_BUDDY_CONTIGUOUS_ALLOCATION in
+ * xe_svm_alloc_pages. In the future we can support migration with
+ * non-contiguous vram. This requires change to xe_migrate_vram
+ * to map non-contiguous vram to contiguous device virtual address
+ * space (aka not using identity vram migrate vm mapping)
+ */
+struct dma_fence* xe_svm_migrate(struct migrate_vec *src_vec,
+ struct migrate_vec *dst_vec)
+{
+ unsigned long npages = src_vec->npages;
+ struct xe_mem_region *xe_mr;
+ struct drm_mem_region *mr;
+ bool dst_vram = false;
+ struct xe_tile *tile;
+ dma_addr_t *sram_addr;
+ u64 vram_addr;
+
+ if (dst_vec->mr) {
+ mr = dst_vec->mr;
+ dst_vram = true;
+ vram_addr = dst_vec->addr_vec[0].dpa;
+ sram_addr = &src_vec->addr_vec[0].dma_addr;
+ } else {
+ mr = src_vec->mr;
+ vram_addr = src_vec->addr_vec[0].dpa;
+ sram_addr = &dst_vec->addr_vec[0].dma_addr;
+ }
+
+ xe_mr = container_of(mr, struct xe_mem_region, drm_mr);
+ tile = xe_mem_region_to_tile(xe_mr);
+
+ return xe_migrate_vram(tile->migrate, npages, sram_addr, vram_addr, dst_vram);
+}
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index b70551d12578..1b891b2a7587 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -8,10 +8,13 @@
struct drm_mem_region;
struct page;
+struct migrate_vec;
int xe_svm_alloc_pages(struct drm_mem_region *mr,
unsigned long npages, unsigned long *pfn);
void xe_svm_free_page(struct page *page);
void* xe_svm_mem_region_to_pagemap_owner(struct drm_mem_region *mr);
+struct dma_fence* xe_svm_migrate(struct migrate_vec *src_vec,
+ struct migrate_vec *dst_vec);
#endif
--
2.26.3
More information about the Intel-xe
mailing list