[PATCH 1/2] drm/xe: keep list of system pages in xe_userptr
Andrzej Hajda
andrzej.hajda at intel.com
Mon Oct 28 16:19:26 UTC 2024
In case of accessing data provided by userptr in the driver we need to
have list of corresponding pages. This list is created by
xe_hmm_userptr_populate_range and stored in xe_userptr.sgl sg list.
Since we are not allowed to get pages from sg list, let's store them in
separate field.
Signed-off-by: Andrzej Hajda <andrzej.hajda at intel.com>
---
drivers/gpu/drm/xe/xe_hmm.c | 52 ++++++++++++++++----------------
drivers/gpu/drm/xe/xe_vm_types.h | 2 ++
2 files changed, 28 insertions(+), 26 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c
index 2c32dc46f7d4..6fc21e04ef2e 100644
--- a/drivers/gpu/drm/xe/xe_hmm.c
+++ b/drivers/gpu/drm/xe/xe_hmm.c
@@ -44,13 +44,13 @@ static void xe_mark_range_accessed(struct hmm_range *range, bool write)
}
/*
- * xe_build_sg() - build a scatter gather table for all the physical pages/pfn
- * in a hmm_range. dma-map pages if necessary. dma-address is save in sg table
+ * xe_build_sg() - build a scatter gather table for given physical pages
+ * and perform dma-map. dma-address is saved in sg table
* and will be used to program GPU page table later.
*
* @xe: the xe device who will access the dma-address in sg table
- * @range: the hmm range that we build the sg table from. range->hmm_pfns[]
- * has the pfn numbers of pages that back up this hmm address range.
+ * @pages: array of page pointers
+ * @npages: number of entries in @pages
* @st: pointer to the sg table.
* @write: whether we write to this range. This decides dma map direction
* for system pages. If write we map it bi-diretional; otherwise
@@ -77,38 +77,22 @@ static void xe_mark_range_accessed(struct hmm_range *range, bool write)
*
* Returns 0 if successful; -ENOMEM if fails to allocate memory
*/
-static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
+static int xe_build_sg(struct xe_device *xe, struct page **pages, u64 npages,
struct sg_table *st, bool write)
{
struct device *dev = xe->drm.dev;
- struct page **pages;
- u64 i, npages;
int ret;
- npages = xe_npages_in_range(range->start, range->end);
- pages = kvmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
-
- for (i = 0; i < npages; i++) {
- pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
- xe_assert(xe, !is_device_private_page(pages[i]));
- }
-
ret = sg_alloc_table_from_pages_segment(st, pages, npages, 0, npages << PAGE_SHIFT,
xe_sg_segment_size(dev), GFP_KERNEL);
if (ret)
- goto free_pages;
+ return ret;
ret = dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
- if (ret) {
+ if (ret)
sg_free_table(st);
- st = NULL;
- }
-free_pages:
- kvfree(pages);
return ret;
}
@@ -136,6 +120,8 @@ void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma)
sg_free_table(userptr->sg);
userptr->sg = NULL;
+ kvfree(userptr->pages);
+ userptr->pages = NULL;
}
/**
@@ -175,7 +161,7 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
struct hmm_range hmm_range;
bool write = !xe_vma_read_only(vma);
unsigned long notifier_seq;
- u64 npages;
+ u64 i, npages;
int ret;
userptr = &uvma->userptr;
@@ -238,9 +224,23 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
if (ret)
goto free_pfns;
- ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt, write);
- if (ret)
+ userptr->pages = kvmalloc_array(npages, sizeof(*userptr->pages), GFP_KERNEL);
+ if (!userptr->pages) {
+ ret = -ENOMEM;
+ goto free_pfns;
+ }
+
+ for (i = 0; i < npages; i++) {
+ userptr->pages[i] = hmm_pfn_to_page(hmm_range.hmm_pfns[i]);
+ xe_assert(vm->xe, !is_device_private_page(userptr->pages[i]));
+ }
+
+ ret = xe_build_sg(vm->xe, userptr->pages, npages, &userptr->sgt, write);
+ if (ret) {
+ kvfree(userptr->pages);
+ userptr->pages = NULL;
goto free_pfns;
+ }
xe_mark_range_accessed(&hmm_range, write);
userptr->sg = &userptr->sgt;
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 557b047ebdd7..f1ec3925dea0 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -53,6 +53,8 @@ struct xe_userptr {
* @notifier: MMU notifier for user pointer (invalidation call back)
*/
struct mmu_interval_notifier notifier;
+ /** @pages: pointer to array of pointers to corresponding pages */
+ struct page **pages;
/** @sgt: storage for a scatter gather table */
struct sg_table sgt;
/** @sg: allocated scatter gather table */
--
2.34.1
More information about the Intel-xe
mailing list