[PATCH 15/21] drm/xe/eudebug: implement userptr_vma access

Matthew Brost matthew.brost at intel.com
Sat Jul 27 01:45:38 UTC 2024


On Fri, Jul 26, 2024 at 06:50:24PM +0000, Matthew Brost wrote:
> On Fri, Jul 26, 2024 at 06:46:29PM +0000, Matthew Brost wrote:
> > On Fri, Jul 26, 2024 at 05:08:12PM +0300, Mika Kuoppala wrote:
> > > From: Andrzej Hajda <andrzej.hajda at intel.com>
> > > 
> > > Debugger needs to read/write program's vmas including userptr_vma.
> > > Since hmm_range_fault is used to pin userptr vmas, it is possible
> > > to map those vmas from debugger context.
> > > 
> > > v2: kmap to kmap_local (Maciej)
> > > 
> > > Signed-off-by: Andrzej Hajda <andrzej.hajda at intel.com>
> > > Signed-off-by: Maciej Patelczyk <maciej.patelczyk at intel.com>
> > > Signed-off-by: Mika Kuoppala <mika.kuoppala at linux.intel.com>
> > > ---
> > >  drivers/gpu/drm/xe/xe_eudebug.c | 56 ++++++++++++++++++++++++++++++++-
> > >  1 file changed, 55 insertions(+), 1 deletion(-)
> > > 
> > > diff --git a/drivers/gpu/drm/xe/xe_eudebug.c b/drivers/gpu/drm/xe/xe_eudebug.c
> > > index aa383accc468..947331c19f43 100644
> > > --- a/drivers/gpu/drm/xe/xe_eudebug.c
> > > +++ b/drivers/gpu/drm/xe/xe_eudebug.c
> > > @@ -33,6 +33,7 @@
> > >  #include "xe_mmio.h"
> > >  #include "xe_module.h"
> > >  #include "xe_pm.h"
> > > +#include "xe_res_cursor.h"
> > >  #include "xe_rtp.h"
> > >  #include "xe_sched_job.h"
> > >  #include "xe_vm.h"
> > > @@ -2852,6 +2853,58 @@ static void discovery_work_fn(struct work_struct *work)
> > >  	xe_eudebug_put(d);
> > >  }
> > >  
> > > +static int xe_eudebug_uvma_access(struct xe_userptr_vma *uvma, u64 offset,
> > > +				  void *buf, u64 len, bool write)
> > > +{
> > > +	struct xe_vm *vm = xe_vma_vm(&uvma->vma);
> > > +	struct xe_userptr *up = &uvma->userptr;
> > > +	struct xe_res_cursor cur = {};
> > > +	int cur_len, ret = 0;
> > > +
> > > +	/* lock notifier in non-invalidation state */
> > > +	for (unsigned long nseq = uvma->userptr.notifier_seq; true;
> > > +	     nseq = mmu_interval_read_begin(&uvma->userptr.notifier)) {
> > > +		down_read(&vm->userptr.notifier_lock);
> > > +		if (!mmu_interval_read_retry(&uvma->userptr.notifier, nseq))
> > > +			break;
> > > +		up_read(&vm->userptr.notifier_lock);
> > > +	}
> > > +
> > 
> > I don't think this will work without lockdep blowing up.
> > '&vm->userptr.notifier_lock' is taken in the MMU notifier, the MMU
> > notifier is in the path of reclaim, thus you cannot allocate memory
> > under this lock, xe_vma_userptr_pin_pages allocates memory.
> > 
> > I think you are going to need to pin the pages first, then take the
> > notifier_lock, recheck the seqno, retry on a miscomapre, once the
> 
> Let me make 'n a miscompare' a bit more clear.
> 
> Drop the notifier lock and repin the pages again.
> 
> This how the VM bind flow works to avoid memory allocations under the
> notifier lock.
> 
> Matt

Sorry triple reply... but here are two patches [1] [2] so lockdep will
complain about the why you have this immediately.

Matt

[1] https://patchwork.freedesktop.org/patch/606081/?series=136581&rev=1
[2] https://patchwork.freedesktop.org/series/136579/

> 
> > compare passes it should be safe to write the userptr.
> > 
> > Matt
> > 
> > > +	/* re-pin if necessary */
> > > +	if (xe_vma_userptr_check_repin(uvma)) {
> > > +		spin_lock(&vm->userptr.invalidated_lock);
> > > +		list_del_init(&uvma->userptr.invalidate_link);
> > > +		spin_unlock(&vm->userptr.invalidated_lock);
> > > +
> > > +		ret = xe_vma_userptr_pin_pages(uvma);
> > > +		if (ret)
> > > +			goto out_unlock_notifier;
> > > +	}
> > > +
> > > +	if (!up->sg) {
> > > +		ret = -EINVAL;
> > > +		goto out_unlock_notifier;
> > > +	}
> > > +
> > > +	for (xe_res_first_sg(up->sg, offset, len, &cur); cur.remaining;
> > > +	     xe_res_next(&cur, cur_len)) {
> > > +		void *ptr = kmap_local_page(sg_page(cur.sgl)) + cur.start;
> > > +
> > > +		cur_len = min(cur.size, cur.remaining);
> > > +		if (write)
> > > +			memcpy(ptr, buf, cur_len);
> > > +		else
> > > +			memcpy(buf, ptr, cur_len);
> > > +		kunmap_local(ptr);
> > > +		buf += cur_len;
> > > +	}
> > > +	ret = len;
> > > +
> > > +out_unlock_notifier:
> > > +	up_read(&vm->userptr.notifier_lock);
> > > +	return ret;
> > > +}
> > > +
> > >  static int xe_eudebug_bovma_access(struct xe_bo *bo, u64 offset,
> > >  				   void *buf, u64 len, bool write)
> > >  {
> > > @@ -2895,7 +2948,8 @@ static int xe_eudebug_vma_access(struct xe_vma *vma, u64 offset,
> > >  	if (bo)
> > >  		return xe_eudebug_bovma_access(bo, offset, buf, bytes, write);
> > >  
> > > -	return -EOPNOTSUPP;
> > > +	return xe_eudebug_uvma_access(to_userptr_vma(vma), offset,
> > > +				      buf, bytes, write);
> > >  }
> > >  
> > >  static int xe_eudebug_vm_access(struct xe_vm *vm, u64 offset,
> > > -- 
> > > 2.34.1
> > > 


More information about the Intel-xe mailing list