[PATCH v5 3/6] drm/xe/xe_vm: Add per VM pagefault info

Cavitt, Jonathan jonathan.cavitt at intel.com
Fri Mar 7 22:05:41 UTC 2025


-----Original Message-----
From: Mun, Gwan-gyeong <gwan-gyeong.mun at intel.com> 
Sent: Friday, March 7, 2025 4:40 AM
To: Cavitt, Jonathan <jonathan.cavitt at intel.com>; intel-xe at lists.freedesktop.org
Cc: Gupta, saurabhg <saurabhg.gupta at intel.com>; Zuo, Alex <alex.zuo at intel.com>; joonas.lahtinen at linux.intel.com; Brost, Matthew <matthew.brost at intel.com>; Zhang, Jianxun <jianxun.zhang at intel.com>; Lin, Shuicheng <shuicheng.lin at intel.com>; dri-devel at lists.freedesktop.org
Subject: Re: [PATCH v5 3/6] drm/xe/xe_vm: Add per VM pagefault info
> 
> On 3/4/25 7:08 PM, Jonathan Cavitt wrote:
> > Add additional information to each VM so they can report up to the last
> > 50 seen pagefaults.  Only failed pagefaults are saved this way, as
> > successful pagefaults should recover and not need to be reported to
> > userspace.
> > 
> The unrecoverable pagefault scenario causes a caterr or context reset. 
> so It would be enough to store one uncoverable pagefault for providing a 
> unrecoverable pagefault details to userspace. how do you think?

That's probably correct, though just to be safe, I'm going to continue to report
multipe pagefaults at a time since there's not any harm in it.
-Jonathan Cavitt

> 
> G.G.
> 
> > Signed-off-by: Jonathan Cavitt <jonathan.cavitt at intel.com>
> > Suggested-by: Matthew Brost <matthew.brost at intel.com>
> > ---
> >   drivers/gpu/drm/xe/xe_gt_pagefault.c | 17 +++++++++++
> >   drivers/gpu/drm/xe/xe_vm.c           | 45 ++++++++++++++++++++++++++++
> >   drivers/gpu/drm/xe/xe_vm.h           |  6 ++++
> >   drivers/gpu/drm/xe/xe_vm_types.h     | 20 +++++++++++++
> >   4 files changed, 88 insertions(+)
> > 
> > diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
> > index 07b52d3c1a60..84907fb4295e 100644
> > --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
> > +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
> > @@ -335,6 +335,22 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
> >   	return full ? -ENOSPC : 0;
> >   }
> >   
> > +static void save_pagefault_to_vm(struct xe_device *xe, struct xe_pagefault *pf)
> > +{
> > +	struct xe_vm *vm;
> > +	struct xe_pagefault *store;
> > +
> > +	vm = asid_to_vm(xe, pf->asid);
> > +	if (IS_ERR(vm))
> > +		return;
> > +
> > +	spin_lock(&vm->pfs.lock);
> > +	store = kzalloc(sizeof(*pf), GFP_KERNEL);
> > +	memcpy(store, pf, sizeof(*pf));
> > +	xe_vm_add_pf_entry(vm, store);
> > +	spin_unlock(&vm->pfs.lock);
> > +}
> > +
> >   #define USM_QUEUE_MAX_RUNTIME_MS	20
> >   
> >   static void pf_queue_work_func(struct work_struct *w)
> > @@ -353,6 +369,7 @@ static void pf_queue_work_func(struct work_struct *w)
> >   		ret = handle_pagefault(gt, &pf);
> >   		if (unlikely(ret)) {
> >   			print_pagefault(xe, &pf);
> > +			save_pagefault_to_vm(xe, &pf);
> >   			pf.fault_unsuccessful = 1;
> >   			drm_dbg(&xe->drm, "Fault response: Unsuccessful %d\n", ret);
> >   		}
> > diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> > index 996000f2424e..6211b971bbbd 100644
> > --- a/drivers/gpu/drm/xe/xe_vm.c
> > +++ b/drivers/gpu/drm/xe/xe_vm.c
> > @@ -746,6 +746,46 @@ int xe_vm_userptr_check_repin(struct xe_vm *vm)
> >   		list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
> >   }
> >   
> > +static void free_pf_entry(struct xe_vm *vm, struct xe_vm_pf_entry *e)
> > +{
> > +	list_del(&e->list);
> > +	kfree(e->pf);
> > +	kfree(e);
> > +	vm->pfs.len--;
> > +}
> > +
> > +void xe_vm_add_pf_entry(struct xe_vm *vm, struct xe_pagefault *pf)
> > +{
> > +	struct xe_vm_pf_entry *e = NULL;
> > +
> > +	e = kzalloc(sizeof(*e), GFP_KERNEL);
> > +	xe_assert(vm->xe, e);
> > +
> > +	spin_lock(&vm->pfs.lock);
> > +	list_add_tail(&e->list, &vm->pfs.list);
> > +	vm->pfs.len++;
> > +	/**
> > +	 * Limit the number of pfs in the pf list to prevent memory overuse.
> > +	 */
> > +	if (vm->pfs.len > MAX_PFS) {
> > +		struct xe_vm_pf_entry *rem =
> > +			list_first_entry(&vm->pfs.list, struct xe_vm_pf_entry, list);
> > +
> > +		free_pf_entry(vm, rem);
> > +	}
> > +	spin_unlock(&vm->pfs.lock);
> > +}
> > +
> > +void xe_vm_remove_pf_entries(struct xe_vm *vm)
> > +{
> > +	struct xe_vm_pf_entry *e, *tmp;
> > +
> > +	spin_lock(&vm->pfs.lock);
> > +	list_for_each_entry_safe(e, tmp, &vm->pfs.list, list)
> > +		free_pf_entry(vm, e);
> > +	spin_unlock(&vm->pfs.lock);
> > +}
> > +
> >   static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds)
> >   {
> >   	int i;
> > @@ -1448,6 +1488,9 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
> >   	init_rwsem(&vm->userptr.notifier_lock);
> >   	spin_lock_init(&vm->userptr.invalidated_lock);
> >   
> > +	INIT_LIST_HEAD(&vm->pfs.list);
> > +	spin_lock_init(&vm->pfs.lock);
> > +
> >   	ttm_lru_bulk_move_init(&vm->lru_bulk_move);
> >   
> >   	INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
> > @@ -1672,6 +1715,8 @@ void xe_vm_close_and_put(struct xe_vm *vm)
> >   	}
> >   	up_write(&xe->usm.lock);
> >   
> > +	xe_vm_remove_pf_entries(vm);
> > +
> >   	for_each_tile(tile, xe, id)
> >   		xe_range_fence_tree_fini(&vm->rftree[id]);
> >   
> > diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
> > index f66075f8a6fe..4d94ab5c8ea4 100644
> > --- a/drivers/gpu/drm/xe/xe_vm.h
> > +++ b/drivers/gpu/drm/xe/xe_vm.h
> > @@ -12,6 +12,8 @@
> >   #include "xe_map.h"
> >   #include "xe_vm_types.h"
> >   
> > +#define MAX_PFS 50
> > +
> >   struct drm_device;
> >   struct drm_printer;
> >   struct drm_file;
> > @@ -244,6 +246,10 @@ int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma);
> >   
> >   int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
> >   
> > +void xe_vm_add_pf_entry(struct xe_vm *vm, struct xe_pagefault *pf);
> > +
> > +void xe_vm_remove_pf_entries(struct xe_vm *vm);
> > +
> >   bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
> >   
> >   int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma);
> > diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
> > index 52467b9b5348..10b0952db56c 100644
> > --- a/drivers/gpu/drm/xe/xe_vm_types.h
> > +++ b/drivers/gpu/drm/xe/xe_vm_types.h
> > @@ -18,6 +18,7 @@
> >   #include "xe_range_fence.h"
> >   
> >   struct xe_bo;
> > +struct xe_pagefault;
> >   struct xe_sync_entry;
> >   struct xe_user_fence;
> >   struct xe_vm;
> > @@ -135,6 +136,13 @@ struct xe_userptr_vma {
> >   
> >   struct xe_device;
> >   
> > +struct xe_vm_pf_entry {
> > +	/** @pf: observed pagefault */
> > +	struct xe_pagefault *pf;
> > +	/** @list: link into @xe_vm.pfs.list */
> > +	struct list_head list;
> > +};
> > +
> >   struct xe_vm {
> >   	/** @gpuvm: base GPUVM used to track VMAs */
> >   	struct drm_gpuvm gpuvm;
> > @@ -274,6 +282,18 @@ struct xe_vm {
> >   		bool capture_once;
> >   	} error_capture;
> >   
> > +	/**
> > +	 * @pfs: List of all pagefaults associated with this VM
> > +	 */
> > +	struct {
> > +		/** @lock: lock protecting @bans.list */
> > +		spinlock_t lock;
> > +		/** @list: list of xe_exec_queue_ban_entry entries */
> > +		struct list_head list;
> > +		/** @len: length of @bans.list */
> > +		unsigned int len;
> > +	} pfs;
> > +
> >   	/**
> >   	 * @tlb_flush_seqno: Required TLB flush seqno for the next exec.
> >   	 * protected by the vm resv.
> 
> 


More information about the dri-devel mailing list