[CI 14/43] drm/svm: Migrate a range of hmmptr to vram
Oak Zeng
oak.zeng at intel.com
Wed Jun 12 02:25:36 UTC 2024
Introduce a helper function drm_svm_migrate_hmmptr_to_vram to migrate
any sub-range of a hmmptr to vram. The range has to be at page boundary.
This supposed to be called by driver to migrate a hmmptr to vram.
Cc: Daniel Vetter <daniel.vetter at intel.com>
Cc: Dave Airlie <airlied at redhat.com>
Cc: Jason Gunthorpe <jgg at nvidia.com>
Cc: Thomas Hellström <thomas.hellstrom at linux.intel.com>
Cc: Christian König <christian.koenig at amd.com>
Cc: Felix Kuehling <felix.kuehling at amd.com>
Cc: Brian Welty <brian.welty at intel.com>
Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
Cc: <dri-devel at lists.freedesktop.org>
Signed-off-by: Matthew Brost <matthew.brost at intel.com>
Signed-off-by: Oak Zeng <oak.zeng at intel.com>
---
drivers/gpu/drm/drm_svm.c | 121 ++++++++++++++++++++++++++++++++++++++
include/drm/drm_svm.h | 3 +
2 files changed, 124 insertions(+)
diff --git a/drivers/gpu/drm/drm_svm.c b/drivers/gpu/drm/drm_svm.c
index ee6d932f434f..0a79b7800400 100644
--- a/drivers/gpu/drm/drm_svm.c
+++ b/drivers/gpu/drm/drm_svm.c
@@ -624,3 +624,124 @@ int drm_svm_register_mem_region(const struct drm_device *drm, struct drm_mem_reg
return 0;
}
EXPORT_SYMBOL_GPL(drm_svm_register_mem_region);
+
+static void __drm_svm_init_device_pages(unsigned long *pfn, unsigned long npages)
+{
+ struct page *page;
+ int i;
+
+ for(i = 0; i < npages; i++) {
+ page = pfn_to_page(pfn[i]);
+ zone_device_page_init(page);
+ pfn[i] = migrate_pfn(pfn[i]);
+ }
+}
+
+/**
+ * drm_svm_migrate_hmmptr_to_vram() - migrate a sub-range of a hmmptr to vram
+ * Must be called with mmap_read_lock held.
+ *
+ * @vm: the vm that the hmmptr belongs to
+ * @mr: the destination memory region we want to migrate to
+ * @hmmptr: the hmmptr to migrate.
+ * @start: start(CPU virtual address, inclusive) of the range to migrate
+ * @end: end(CPU virtual address, exclusive) of the range to migrate
+ *
+ * Returns: negative errno on faiure, 0 on success
+ */
+int drm_svm_migrate_hmmptr_to_vram(struct drm_gpuvm *vm,
+ struct drm_mem_region *mr,
+ struct drm_hmmptr *hmmptr, unsigned long start, unsigned long end)
+{
+ struct drm_device *drm = mr->mr_ops.drm_mem_region_get_device(mr);
+ struct mm_struct *mm = vm->mm;
+ unsigned long npages = __npages_in_range(start, end);
+ struct vm_area_struct *vas;
+ struct migrate_vma migrate = {
+ .start = ALIGN_DOWN(start, PAGE_SIZE),
+ .end = ALIGN(end, PAGE_SIZE),
+ .pgmap_owner = mr->mr_ops.drm_mem_region_pagemap_owner(mr),
+ .flags = MIGRATE_VMA_SELECT_SYSTEM,
+ };
+ struct device *dev = drm->dev;
+ struct dma_fence *fence;
+ struct migrate_vec *src;
+ struct migrate_vec *dst;
+ int ret = 0;
+ void *buf;
+
+ mmap_assert_locked(mm);
+
+ BUG_ON(start < __hmmptr_cpu_start(hmmptr));
+ BUG_ON(end > __hmmptr_cpu_end(hmmptr));
+
+ vas = find_vma_intersection(mm, start, end);
+ if (!vas)
+ return -ENOENT;
+
+ migrate.vma = vas;
+ buf = kvcalloc(npages, 2* sizeof(*migrate.src), GFP_KERNEL);
+ if(!buf)
+ return -ENOMEM;
+
+ migrate.src = buf;
+ migrate.dst = migrate.src + npages;
+ ret = migrate_vma_setup(&migrate);
+ if (ret) {
+ drm_warn(drm, "vma setup returned %d for range [0x%lx - 0x%lx]\n",
+ ret, start, end);
+ goto free_buf;
+ }
+
+ /**
+ * Partial migration is just normal. Print a message for now.
+ * Once this behavior is verified, delete this warning.
+ */
+ if (migrate.cpages != npages)
+ drm_warn(drm, "Partial migration for range [0x%lx - 0x%lx], range is %ld pages, migrate only %ld pages\n",
+ start, end, npages, migrate.cpages);
+
+ ret = mr->mr_ops.drm_mem_region_alloc_pages(mr, migrate.cpages, migrate.dst);
+ if (ret)
+ goto migrate_finalize;
+
+ __drm_svm_init_device_pages(migrate.dst, migrate.cpages);
+
+ src = __generate_migrate_vec_sram(dev, migrate.src, true, npages);
+ if (!src) {
+ ret = -EFAULT;
+ goto free_device_pages;
+ }
+
+ dst = __generate_migrate_vec_vram(migrate.dst, false, migrate.cpages);
+ if (!dst) {
+ ret = -EFAULT;
+ goto free_migrate_src;
+ }
+
+ fence = mr->mr_ops.drm_mem_region_migrate(src, dst);
+ if (IS_ERR(fence)) {
+ ret = -EIO;
+ goto free_migrate_dst;
+ }
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+
+ migrate_vma_pages(&migrate);
+
+free_migrate_dst:
+ __free_migrate_vec_vram(dst);
+free_migrate_src:
+ __free_migrate_vec_sram(dev, src, true);
+free_device_pages:
+ if (ret)
+ __drm_svm_free_pages(migrate.dst, migrate.cpages);
+migrate_finalize:
+ if (ret)
+ memset(migrate.dst, 0, sizeof(*migrate.dst)*migrate.cpages);
+ migrate_vma_finalize(&migrate);
+free_buf:
+ kvfree(buf);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(drm_svm_migrate_hmmptr_to_vram);
diff --git a/include/drm/drm_svm.h b/include/drm/drm_svm.h
index cb1edb6a993e..805f066ef3ff 100644
--- a/include/drm/drm_svm.h
+++ b/include/drm/drm_svm.h
@@ -223,4 +223,7 @@ void drm_svm_hmmptr_map_dma_pages(struct drm_hmmptr *hmmptr, u64 page_idx, u64 n
void drm_svm_hmmptr_unmap_dma_pages(struct drm_hmmptr *hmmptr, u64 page_idx, u64 npages);
int drm_svm_hmmptr_populate(struct drm_hmmptr *hmmptr, void *owner, u64 start, u64 end,
bool write, bool is_mmap_locked);
+int drm_svm_migrate_hmmptr_to_vram(struct drm_gpuvm *vm,
+ struct drm_mem_region *mr,
+ struct drm_hmmptr *hmmptr, unsigned long start, unsigned long end);
#endif
--
2.26.3
More information about the Intel-xe
mailing list