[PATCH] drm/xe: Thread prefetch of SVM ranges
Matthew Brost
matthew.brost at intel.com
Wed May 28 17:27:25 UTC 2025
The migrate_vma_* functions are very CPU-intensive; thus, prefetching of
SVM ranges is limited by the CPU rather than the paging copy engine
bandwidth. In an effort to speed up the prefetching of SVM ranges, the
step that calls migrate_vma_* is now threaded. This utilizes the
existing page fault work queue for threading.
Cc: Thomas Hellström <thomas.hellstrom at linux.intel.com>
Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
Signed-off-by: Matthew Brost <matthew.brost at intel.com>
---
drivers/gpu/drm/xe/xe_vm.c | 111 +++++++++++++++++++++++++++----------
1 file changed, 83 insertions(+), 28 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 5a978da411b0..18e5a36c6c21 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2878,53 +2878,108 @@ static int check_ufence(struct xe_vma *vma)
return 0;
}
-static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
+struct prefetch_thread {
+ struct work_struct work;
+ struct drm_gpusvm_ctx *ctx;
+ struct xe_vma *vma;
+ struct xe_svm_range *svm_range;
+ u32 region;
+ int err;
+};
+
+static void prefetch_work_func(struct work_struct *w)
{
- bool devmem_possible = IS_DGFX(vm->xe) && IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR);
- struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
+ struct prefetch_thread *thread =
+ container_of(w, struct prefetch_thread, work);
+ struct xe_vma *vma = thread->vma;
+ struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_svm_range *svm_range = thread->svm_range;
+ u32 region = thread->region;
+ struct xe_tile *tile =
+ &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
int err = 0;
- struct xe_svm_range *svm_range;
+ if (!region) {
+ xe_svm_range_migrate_to_smem(vm, svm_range);
+ } else if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, region)) {
+ err = xe_svm_alloc_vram(vm, tile, svm_range, thread->ctx);
+ if (err) {
+ drm_dbg(&vm->xe->drm,
+ "VRAM allocation failed, retry from userspace, asid=%u, gpusvm=%p, errno=%pe\n",
+ vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
+ thread->err = -ENODATA;
+ return;
+ }
+ xe_svm_range_debug(svm_range, "PREFETCH - RANGE MIGRATED TO VRAM");
+ }
+
+ err = xe_svm_range_get_pages(vm, svm_range, thread->ctx);
+ if (err) {
+ if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM)
+ err = -ENODATA;
+ drm_dbg(&vm->xe->drm, "Get pages failed, asid=%u, gpusvm=%p, errno=%pe\n",
+ vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
+ thread->err = err;
+ return;
+ }
+
+ xe_svm_range_debug(svm_range, "PREFETCH - RANGE GET PAGES DONE");
+}
+
+static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
+{
+ struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
+ u32 j, region = op->prefetch_range.region;
struct drm_gpusvm_ctx ctx = {};
- struct xe_tile *tile;
+ struct prefetch_thread *thread;
+ struct xe_svm_range *svm_range;
+ struct xarray prefetches;
+ struct xe_tile *tile =
+ &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
unsigned long i;
- u32 region;
+ bool devmem_possible = IS_DGFX(vm->xe) &&
+ IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR);
+ int err = 0;
if (!xe_vma_is_cpu_addr_mirror(vma))
return 0;
- region = op->prefetch_range.region;
+ xa_init_flags(&prefetches, XA_FLAGS_ALLOC);
ctx.read_only = xe_vma_read_only(vma);
ctx.devmem_possible = devmem_possible;
ctx.check_pages_threshold = devmem_possible ? SZ_64K : 0;
- /* TODO: Threading the migration */
xa_for_each(&op->prefetch_range.range, i, svm_range) {
- if (!region)
- xe_svm_range_migrate_to_smem(vm, svm_range);
+ thread = kmalloc(sizeof(*thread), GFP_KERNEL);
+ if (!thread)
+ goto wait_threads;
- if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, region)) {
- tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
- err = xe_svm_alloc_vram(vm, tile, svm_range, &ctx);
- if (err) {
- drm_dbg(&vm->xe->drm, "VRAM allocation failed, retry from userspace, asid=%u, gpusvm=%p, errno=%pe\n",
- vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
- return -ENODATA;
- }
- xe_svm_range_debug(svm_range, "PREFETCH - RANGE MIGRATED TO VRAM");
- }
-
- err = xe_svm_range_get_pages(vm, svm_range, &ctx);
+ err = xa_alloc(&prefetches, &j, thread, xa_limit_32b,
+ GFP_KERNEL);
if (err) {
- if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM)
- err = -ENODATA;
- drm_dbg(&vm->xe->drm, "Get pages failed, asid=%u, gpusvm=%p, errno=%pe\n",
- vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
- return err;
+ kfree(thread);
+ goto wait_threads;
}
- xe_svm_range_debug(svm_range, "PREFETCH - RANGE GET PAGES DONE");
+
+ INIT_WORK(&thread->work, prefetch_work_func);
+ thread->ctx = &ctx;
+ thread->vma = vma;
+ thread->svm_range = svm_range;
+ thread->region = region;
+ thread->err = 0;
+
+ queue_work(tile->primary_gt->usm.pf_wq, &thread->work);
+ }
+
+wait_threads:
+ xa_for_each(&prefetches, i, thread) {
+ flush_work(&thread->work);
+ if (thread->err && (!err || err == -ENODATA))
+ err = thread->err;
+ kfree(thread);
}
+ xa_destroy(&prefetches);
return err;
}
--
2.34.1
More information about the Intel-xe
mailing list