[PATCH 2/2] drm/xe/eudebug: implement userptr_vma access

Andrzej Hajda andrzej.hajda at intel.com
Mon Oct 28 16:19:27 UTC 2024


Debugger needs to read/write program's vmas including userptr_vma.
Since hmm_range_fault is used to pin userptr vmas, it is possible
to map those vmas from debugger context.

v2: pin pages vs notifier, move to vm.c (Matthew)
v3: - iterate over system pages instead of DMA, fixes work with iommu enabled
    - s/xe_uvma_access/xe_vm_uvma_access/ (Matt)
v4: use xe_userptr->pages, instead of sg to access pages (Matt)

Signed-off-by: Andrzej Hajda <andrzej.hajda at intel.com>
---
 drivers/gpu/drm/xe/xe_eudebug.c |  2 +-
 drivers/gpu/drm/xe/xe_vm.c      | 52 +++++++++++++++++++++++++++++++++
 drivers/gpu/drm/xe/xe_vm.h      |  3 ++
 3 files changed, 56 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/xe/xe_eudebug.c b/drivers/gpu/drm/xe/xe_eudebug.c
index 1bff0a2cfaa1..569c8d0b2ef8 100644
--- a/drivers/gpu/drm/xe/xe_eudebug.c
+++ b/drivers/gpu/drm/xe/xe_eudebug.c
@@ -3049,7 +3049,7 @@ static int xe_eudebug_vma_access(struct xe_vma *vma, u64 offset,
 		return ret;
 	}
 
-	return -EINVAL;
+	return xe_vm_userptr_access(to_userptr_vma(vma), offset, buf, bytes, write);
 }
 
 static int xe_eudebug_vm_access(struct xe_vm *vm, u64 offset,
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index e76a6df1eba1..8fd9eed41fe1 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -3412,3 +3412,55 @@ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
 	}
 	kvfree(snap);
 }
+
+int xe_vm_userptr_access(struct xe_userptr_vma *uvma, u64 offset,
+			 void *buf, u64 len, bool write)
+{
+	struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+	struct xe_userptr *up = &uvma->userptr;
+	struct page **page;
+	u64 left = len;
+	int ret = 0;
+
+	while (true) {
+		down_read(&vm->userptr.notifier_lock);
+		if (!xe_vma_userptr_check_repin(uvma))
+			break;
+
+		spin_lock(&vm->userptr.invalidated_lock);
+		list_del_init(&uvma->userptr.invalidate_link);
+		spin_unlock(&vm->userptr.invalidated_lock);
+
+		up_read(&vm->userptr.notifier_lock);
+		ret = xe_vma_userptr_pin_pages(uvma);
+		if (ret)
+			return ret;
+	}
+
+	if (!up->sg) {
+		ret = -EINVAL;
+		goto out_unlock_notifier;
+	}
+
+	page = &up->pages[offset >> PAGE_SHIFT];
+	offset &= ~PAGE_MASK;
+	for (;left > 0; ++page) {
+		u64 cur_len = min(PAGE_SIZE - offset, left);
+		void *ptr = kmap_local_page(page[0]);
+
+		if (write)
+			memcpy(ptr + offset, buf, cur_len);
+		else
+			memcpy(buf, ptr + offset, cur_len);
+		kunmap_local(ptr);
+		buf += cur_len;
+		left -= cur_len;
+		offset = 0;
+	}
+
+	ret = len;
+
+out_unlock_notifier:
+	up_read(&vm->userptr.notifier_lock);
+	return ret;
+}
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index c864dba35e1d..165eab494d59 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -281,3 +281,6 @@ struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
 void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
 void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
 void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
+
+int xe_vm_userptr_access(struct xe_userptr_vma *uvma, u64 offset,
+			 void *buf, u64 len, bool write);
-- 
2.34.1



More information about the Intel-xe mailing list