[PATCH 15/21] drm/xe/eudebug: implement userptr_vma access
Mika Kuoppala
mika.kuoppala at linux.intel.com
Fri Jul 26 14:08:12 UTC 2024
From: Andrzej Hajda <andrzej.hajda at intel.com>
Debugger needs to read/write program's vmas including userptr_vma.
Since hmm_range_fault is used to pin userptr vmas, it is possible
to map those vmas from debugger context.
v2: kmap to kmap_local (Maciej)
Signed-off-by: Andrzej Hajda <andrzej.hajda at intel.com>
Signed-off-by: Maciej Patelczyk <maciej.patelczyk at intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala at linux.intel.com>
---
drivers/gpu/drm/xe/xe_eudebug.c | 56 ++++++++++++++++++++++++++++++++-
1 file changed, 55 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/xe/xe_eudebug.c b/drivers/gpu/drm/xe/xe_eudebug.c
index aa383accc468..947331c19f43 100644
--- a/drivers/gpu/drm/xe/xe_eudebug.c
+++ b/drivers/gpu/drm/xe/xe_eudebug.c
@@ -33,6 +33,7 @@
#include "xe_mmio.h"
#include "xe_module.h"
#include "xe_pm.h"
+#include "xe_res_cursor.h"
#include "xe_rtp.h"
#include "xe_sched_job.h"
#include "xe_vm.h"
@@ -2852,6 +2853,58 @@ static void discovery_work_fn(struct work_struct *work)
xe_eudebug_put(d);
}
+static int xe_eudebug_uvma_access(struct xe_userptr_vma *uvma, u64 offset,
+ void *buf, u64 len, bool write)
+{
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+ struct xe_userptr *up = &uvma->userptr;
+ struct xe_res_cursor cur = {};
+ int cur_len, ret = 0;
+
+ /* lock notifier in non-invalidation state */
+ for (unsigned long nseq = uvma->userptr.notifier_seq; true;
+ nseq = mmu_interval_read_begin(&uvma->userptr.notifier)) {
+ down_read(&vm->userptr.notifier_lock);
+ if (!mmu_interval_read_retry(&uvma->userptr.notifier, nseq))
+ break;
+ up_read(&vm->userptr.notifier_lock);
+ }
+
+ /* re-pin if necessary */
+ if (xe_vma_userptr_check_repin(uvma)) {
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_del_init(&uvma->userptr.invalidate_link);
+ spin_unlock(&vm->userptr.invalidated_lock);
+
+ ret = xe_vma_userptr_pin_pages(uvma);
+ if (ret)
+ goto out_unlock_notifier;
+ }
+
+ if (!up->sg) {
+ ret = -EINVAL;
+ goto out_unlock_notifier;
+ }
+
+ for (xe_res_first_sg(up->sg, offset, len, &cur); cur.remaining;
+ xe_res_next(&cur, cur_len)) {
+ void *ptr = kmap_local_page(sg_page(cur.sgl)) + cur.start;
+
+ cur_len = min(cur.size, cur.remaining);
+ if (write)
+ memcpy(ptr, buf, cur_len);
+ else
+ memcpy(buf, ptr, cur_len);
+ kunmap_local(ptr);
+ buf += cur_len;
+ }
+ ret = len;
+
+out_unlock_notifier:
+ up_read(&vm->userptr.notifier_lock);
+ return ret;
+}
+
static int xe_eudebug_bovma_access(struct xe_bo *bo, u64 offset,
void *buf, u64 len, bool write)
{
@@ -2895,7 +2948,8 @@ static int xe_eudebug_vma_access(struct xe_vma *vma, u64 offset,
if (bo)
return xe_eudebug_bovma_access(bo, offset, buf, bytes, write);
- return -EOPNOTSUPP;
+ return xe_eudebug_uvma_access(to_userptr_vma(vma), offset,
+ buf, bytes, write);
}
static int xe_eudebug_vm_access(struct xe_vm *vm, u64 offset,
--
2.34.1
More information about the Intel-xe
mailing list