[PATCH v2 11/32] drm/xe/svm: Add function to determine if range needs VRAM migration
Himal Prasad Ghimiray
himal.prasad.ghimiray at intel.com
Mon Apr 7 10:16:58 UTC 2025
xe_svm_range_needs_migrate_to_vram() determines whether range needs
migration to vram or not, for pagefault try at least once.
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
---
drivers/gpu/drm/xe/xe_svm.c | 49 +++++++++++++++++++++++++++++++++++--
drivers/gpu/drm/xe/xe_svm.h | 10 ++++++++
2 files changed, 57 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 8cd35553a927..f4ae3feaf9d3 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -709,6 +709,51 @@ int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
}
#endif
+static bool supports_4K_migration(struct xe_device *xe)
+{
+ if (xe->info.platform == XE_BATTLEMAGE)
+ return true;
+
+ return false;
+}
+
+/**
+ * xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not
+ * @range: SVM range for which migration needs to be decided
+ * @vma: vma which has range
+ * @region: default placement for range
+ *
+ * Return: True for range needing migration and migration is supported else false
+ */
+bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
+ u32 region)
+{
+ struct xe_vm *vm = range_to_vm(&range->base);
+ u64 range_size = xe_svm_range_size(range);
+ bool needs_migrate = false;
+
+ if (!range->base.flags.migrate_devmem)
+ return false;
+
+ needs_migrate = region;
+
+ if (needs_migrate && !IS_DGFX(vm->xe)) {
+ drm_warn(&vm->xe->drm, "Platform doesn't support VRAM\n");
+ return false;
+ }
+
+ if (needs_migrate && xe_svm_range_in_vram(range)) {
+ drm_info(&vm->xe->drm, "Range is already in VRAM\n");
+ return false;
+ }
+
+ if (needs_migrate && range_size <= SZ_64K && !supports_4K_migration(vm->xe)) {
+ drm_warn(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
+ return false;
+ }
+
+ return needs_migrate;
+}
/**
* xe_svm_handle_pagefault() - SVM handle page fault
@@ -763,8 +808,8 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
range_debug(range, "PAGE FAULT");
/* XXX: Add migration policy, for now migrate range once */
- if (!range->skip_migrate && range->base.flags.migrate_devmem &&
- xe_svm_range_size(range) >= SZ_64K) {
+ if (!range->skip_migrate &&
+ xe_svm_range_needs_migrate_to_vram(range, vma, IS_DGFX(vm->xe))) {
range->skip_migrate = true;
err = xe_svm_alloc_vram(vm, tile, range, &ctx);
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index 9c4c3aeacc6c..d5be8229ca7e 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -95,6 +95,9 @@ struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
struct drm_gpusvm_ctx *ctx);
+bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
+ u32 region);
+
/**
* xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
* @range: SVM range
@@ -281,6 +284,13 @@ static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
return 0;
}
+static inline
+bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
+ u32 region)
+{
+ return false;
+}
+
#define xe_svm_assert_in_notifier(...) do {} while (0)
#define xe_svm_range_has_dma_mapping(...) false
--
2.34.1
More information about the Intel-xe
mailing list