[PATCH v4 29/33] drm/xe: Basic SVM BO eviction
Matthew Brost
matthew.brost at intel.com
Wed Jan 29 19:52:08 UTC 2025
Wire xe_bo_move to GPU SVM migration via new helper xe_svm_bo_evict.
v2:
- Use xe_svm_bo_evict
- Drop bo->range
v3:
- Kernel doc (Thomas)
v4:
- Add missing xe_bo.c code
Signed-off-by: Matthew Brost <matthew.brost at intel.com>
---
drivers/gpu/drm/xe/xe_bo.c | 19 +++++++++++++++++++
drivers/gpu/drm/xe/xe_svm.c | 15 ++++++++++++++-
drivers/gpu/drm/xe/xe_svm.h | 3 +++
3 files changed, 36 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 20c96709e267..657687ee70d0 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -255,6 +255,8 @@ int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
static void xe_evict_flags(struct ttm_buffer_object *tbo,
struct ttm_placement *placement)
{
+ struct xe_bo *bo;
+
if (!xe_bo_is_xe_bo(tbo)) {
/* Don't handle scatter gather BOs */
if (tbo->type == ttm_bo_type_sg) {
@@ -266,6 +268,12 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo,
return;
}
+ bo = ttm_to_xe_bo(tbo);
+ if (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) {
+ *placement = sys_placement;
+ return;
+ }
+
/*
* For xe, sg bos that are evicted to system just triggers a
* rebind of the sg list upon subsequent validation to XE_PL_TT.
@@ -710,6 +718,17 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
goto out;
}
+ if (!move_lacks_source && (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) &&
+ new_mem->mem_type == XE_PL_SYSTEM) {
+ ret = xe_svm_bo_evict(bo);
+ if (!ret) {
+ drm_dbg(&xe->drm, "Evict system allocator BO success\n");
+ ttm_bo_move_null(ttm_bo, new_mem);
+ }
+
+ goto out;
+ }
+
if (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT && !handle_system_ccs) {
ttm_bo_move_null(ttm_bo, new_mem);
goto out;
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index fc030855d078..dafc5061eb42 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -768,6 +768,20 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
}
+/**
+ * xe_svm_bo_evict() - SVM evict BO to system memory
+ * @bo: BO to evict
+ *
+ * SVM evict BO to system memory. GPU SVM layer ensures all device pages
+ * are evicted before returning.
+ *
+ * Return: 0 on success standard error code otherwise
+ */
+int xe_svm_bo_evict(struct xe_bo *bo)
+{
+ return drm_gpusvm_evict_to_ram(&bo->devmem_allocation);
+}
+
#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
static struct drm_pagemap_dma_addr
xe_drm_pagemap_map_dma(struct drm_pagemap *dpagemap,
@@ -795,7 +809,6 @@ static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
.map_dma = xe_drm_pagemap_map_dma,
};
->>>>>>> 133db8ade5f0 (drm/xe: Add drm_pagemap ops to SVM)
/**
* xe_devm_add: Remap and provide memmap backing for device memory
* @tile: tile that the memory region belongs to
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index 4c2576162c39..77dec5aae0ee 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -11,6 +11,7 @@
#define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
+struct xe_bo;
struct xe_mem_region;
struct xe_tile;
struct xe_vm;
@@ -56,6 +57,8 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
+int xe_svm_bo_evict(struct xe_bo *bo);
+
static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
{
return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base);
--
2.34.1
More information about the dri-devel
mailing list