[CI v3 14/26] drm/svm: Migrate a range of hmmptr to vram
Oak Zeng
oak.zeng at intel.com
Wed May 29 01:19:12 UTC 2024
Introduce a helper function drm_svm_migrate_hmmptr_to_vram to migrate
any sub-range of a hmmptr to vram. The range has to be at page boundary.
This supposed to be called by driver to migrate a hmmptr to vram.
Cc: Daniel Vetter <daniel.vetter at intel.com>
Cc: Dave Airlie <airlied at redhat.com>
Cc: Jason Gunthorpe <jgg at nvidia.com>
Cc: Thomas Hellström <thomas.hellstrom at linux.intel.com>
Cc: Christian König <christian.koenig at amd.com>
Cc: Felix Kuehling <felix.kuehling at amd.com>
Cc: Brian Welty <brian.welty at intel.com>
Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
Cc: <dri-devel at lists.freedesktop.org>
Signed-off-by: Matthew Brost <matthew.brost at intel.com>
Signed-off-by: Oak Zeng <oak.zeng at intel.com>
---
drivers/gpu/drm/drm_svm.c | 122 ++++++++++++++++++++++++++++++++++++++
include/drm/drm_svm.h | 3 +
2 files changed, 125 insertions(+)
diff --git a/drivers/gpu/drm/drm_svm.c b/drivers/gpu/drm/drm_svm.c
index e18656bbd1fd..e93b9640913e 100644
--- a/drivers/gpu/drm/drm_svm.c
+++ b/drivers/gpu/drm/drm_svm.c
@@ -526,3 +526,125 @@ int drm_svm_register_mem_region(const struct drm_device *drm, struct drm_mem_reg
return 0;
}
EXPORT_SYMBOL_GPL(drm_svm_register_mem_region);
+
+static void __drm_svm_init_device_pages(unsigned long *pfn, unsigned long npages)
+{
+ struct page *page;
+ int i;
+
+ for(i = 0; i < npages; i++) {
+ page = pfn_to_page(pfn[i]);
+ zone_device_page_init(page);
+ pfn[i] = migrate_pfn(pfn[i]);
+ }
+}
+
+/**
+ * drm_svm_migrate_hmmptr_to_vram() - migrate a sub-range of a hmmptr to vram
+ * Must be called with mmap_read_lock held.
+ *
+ * @vm: the vm that the hmmptr belongs to
+ * @mr: the destination memory region we want to migrate to
+ * @hmmptr: the hmmptr to migrate.
+ * @start: start(virtual address, inclusive) of the range to migrate
+ * @end: end(virtual address, exclusive) of the range to migrate
+ *
+ * Returns: negative errno on faiure, 0 on success
+ */
+int drm_svm_migrate_hmmptr_to_vram(struct drm_gpuvm *vm,
+ struct drm_mem_region *mr,
+ struct drm_hmmptr *hmmptr, unsigned long start, unsigned long end)
+{
+ struct drm_device *drm = mr->mr_ops.drm_mem_region_get_device(mr);
+ struct drm_gpuva *gpuva = hmmptr->get_gpuva(hmmptr);
+ struct mm_struct *mm = vm->mm;
+ unsigned long npages = __npages_in_range(start, end);
+ struct vm_area_struct *vas;
+ struct migrate_vma migrate = {
+ .start = ALIGN_DOWN(start, PAGE_SIZE),
+ .end = ALIGN(end, PAGE_SIZE),
+ .pgmap_owner = mr->mr_ops.drm_mem_region_pagemap_owner(mr),
+ .flags = MIGRATE_VMA_SELECT_SYSTEM,
+ };
+ struct device *dev = drm->dev;
+ struct dma_fence *fence;
+ struct migrate_vec *src;
+ struct migrate_vec *dst;
+ int ret = 0;
+ void *buf;
+
+ mmap_assert_locked(mm);
+
+ BUG_ON(start < GPUVA_START(gpuva));
+ BUG_ON(end > GPUVA_END(gpuva));
+
+ vas = find_vma_intersection(mm, start, end);
+ if (!vas)
+ return -ENOENT;
+
+ migrate.vma = vas;
+ buf = kvcalloc(npages, 2* sizeof(*migrate.src), GFP_KERNEL);
+ if(!buf)
+ return -ENOMEM;
+
+ migrate.src = buf;
+ migrate.dst = migrate.src + npages;
+ ret = migrate_vma_setup(&migrate);
+ if (ret) {
+ drm_warn(drm, "vma setup returned %d for range [%lx - %lx]\n",
+ ret, start, end);
+ goto free_buf;
+ }
+
+ /**
+ * Partial migration is just normal. Print a message for now.
+ * Once this behavior is verified, delete this warning.
+ */
+ if (migrate.cpages != npages)
+ drm_warn(drm, "Partial migration for range [%lx - %lx], range is %ld pages, migrate only %ld pages\n",
+ start, end, npages, migrate.cpages);
+
+ ret = mr->mr_ops.drm_mem_region_alloc_pages(mr, migrate.cpages, migrate.dst);
+ if (ret) {
+ memset(migrate.dst, 0, sizeof(*migrate.dst));
+ goto migrate_pages;
+ }
+
+ __drm_svm_init_device_pages(migrate.dst, migrate.cpages);
+
+ src = __generate_migrate_vec_sram(dev, migrate.src, true, npages);
+ if (!src) {
+ ret = VM_FAULT_OOM;
+ goto free_device_pages;
+ }
+
+ dst = __generate_migrate_vec_vram(migrate.dst, false, migrate.cpages);
+ if (!dst) {
+ ret = VM_FAULT_OOM;
+ goto free_migrate_src;
+ }
+
+ fence = mr->mr_ops.drm_mem_region_migrate(src, dst);
+ if (IS_ERR(fence)) {
+ ret = VM_FAULT_SIGBUS;
+ goto free_migrate_dst;
+ }
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+
+free_migrate_dst:
+ __free_migrate_vec_vram(dst);
+free_migrate_src:
+ __free_migrate_vec_sram(dev, src, true);
+
+free_device_pages:
+ if (ret)
+ __drm_svm_free_pages(migrate.dst, migrate.cpages);
+migrate_pages:
+ migrate_vma_pages(&migrate);
+ migrate_vma_finalize(&migrate);
+free_buf:
+ kvfree(buf);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(drm_svm_migrate_hmmptr_to_vram);
diff --git a/include/drm/drm_svm.h b/include/drm/drm_svm.h
index 9d54475d8b5b..33709d8c5f35 100644
--- a/include/drm/drm_svm.h
+++ b/include/drm/drm_svm.h
@@ -210,3 +210,6 @@ int drm_svm_hmmptr_init(struct drm_hmmptr *hmmptr,
void drm_svm_hmmptr_release(struct drm_hmmptr *hmmptr);
int drm_svm_hmmptr_populate(struct drm_hmmptr *hmmptr, void *owner,
u64 start, u64 end, bool write);
+int drm_svm_migrate_hmmptr_to_vram(struct drm_gpuvm *vm,
+ struct drm_mem_region *mr,
+ struct drm_hmmptr *hmmptr, unsigned long start, unsigned long end);
--
2.26.3
More information about the Intel-xe
mailing list