[PATCH 02/11] drm/xe: Implement xe_pagefault_init
Matthew Brost
matthew.brost at intel.com
Wed Aug 6 23:59:18 UTC 2025
On Wed, Aug 06, 2025 at 05:08:18PM -0600, Summers, Stuart wrote:
> On Tue, 2025-08-05 at 23:22 -0700, Matthew Brost wrote:
> > Create pagefault queues and initialize them.
> >
> > Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> > ---
> > drivers/gpu/drm/xe/xe_device.c | 5 ++
> > drivers/gpu/drm/xe/xe_device_types.h | 6 ++
> > drivers/gpu/drm/xe/xe_pagefault.c | 93
> > +++++++++++++++++++++++++++-
> > 3 files changed, 102 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/xe/xe_device.c
> > b/drivers/gpu/drm/xe/xe_device.c
> > index 57edbc63da6f..c7c8aee03841 100644
> > --- a/drivers/gpu/drm/xe/xe_device.c
> > +++ b/drivers/gpu/drm/xe/xe_device.c
> > @@ -50,6 +50,7 @@
> > #include "xe_nvm.h"
> > #include "xe_oa.h"
> > #include "xe_observation.h"
> > +#include "xe_pagefault.h"
> > #include "xe_pat.h"
> > #include "xe_pcode.h"
> > #include "xe_pm.h"
> > @@ -890,6 +891,10 @@ int xe_device_probe(struct xe_device *xe)
> > if (err)
> > return err;
> >
> > + err = xe_pagefault_init(xe);
> > + if (err)
> > + return err;
> > +
> > xe_nvm_init(xe);
> >
> > err = xe_heci_gsc_init(xe);
> > diff --git a/drivers/gpu/drm/xe/xe_device_types.h
> > b/drivers/gpu/drm/xe/xe_device_types.h
> > index 01e8fa0d2f9f..6aa119026ce9 100644
> > --- a/drivers/gpu/drm/xe/xe_device_types.h
> > +++ b/drivers/gpu/drm/xe/xe_device_types.h
> > @@ -17,6 +17,7 @@
> > #include "xe_lmtt_types.h"
> > #include "xe_memirq_types.h"
> > #include "xe_oa_types.h"
> > +#include "xe_pagefault_types.h"
> > #include "xe_platform_types.h"
> > #include "xe_pmu_types.h"
> > #include "xe_pt_types.h"
> > @@ -394,6 +395,11 @@ struct xe_device {
> > u32 next_asid;
> > /** @usm.lock: protects UM state */
> > struct rw_semaphore lock;
> > + /** @usm.pf_wq: page fault work queue, unbound, high
> > priority */
> > + struct workqueue_struct *pf_wq;
> > +#define XE_PAGEFAULT_QUEUE_COUNT 4
> > + /** @pf_queue: Page fault queues */
> > + struct xe_pagefault_queue
> > pf_queue[XE_PAGEFAULT_QUEUE_COUNT];
> > } usm;
> >
> > /** @pinned: pinned BO state */
> > diff --git a/drivers/gpu/drm/xe/xe_pagefault.c
> > b/drivers/gpu/drm/xe/xe_pagefault.c
> > index 3ce0e8d74b9d..14304c41eb23 100644
> > --- a/drivers/gpu/drm/xe/xe_pagefault.c
> > +++ b/drivers/gpu/drm/xe/xe_pagefault.c
> > @@ -3,6 +3,10 @@
> > * Copyright © 2025 Intel Corporation
> > */
> >
> > +#include <drm/drm_managed.h>
> > +
> > +#include "xe_device.h"
> > +#include "xe_gt_types.h"
> > #include "xe_pagefault.h"
> > #include "xe_pagefault_types.h"
> >
> > @@ -19,6 +23,71 @@
> > * with a single shared consumer.
> > */
> >
> > +static int xe_pagefault_entry_size(void)
> > +{
> > + return roundup_pow_of_two(sizeof(struct xe_pagefault));
>
> Nice thanks!
>
> > +}
> > +
> > +static void xe_pagefault_queue_work(struct work_struct *w)
> > +{
> > + /* TODO: Implement */
> > +}
> > +
> > +static int xe_pagefault_queue_init(struct xe_device *xe,
> > + struct xe_pagefault_queue
> > *pf_queue)
> > +{
> > + struct xe_gt *gt;
> > + int total_num_eus = 0;
> > + u8 id;
> > +
> > + for_each_gt(gt, xe, id) {
> > + xe_dss_mask_t all_dss;
> > + int num_dss, num_eus;
> > +
> > + bitmap_or(all_dss, gt->fuse_topo.g_dss_mask,
> > + gt->fuse_topo.c_dss_mask,
> > XE_MAX_DSS_FUSE_BITS);
> > +
> > + num_dss = bitmap_weight(all_dss,
> > XE_MAX_DSS_FUSE_BITS);
> > + num_eus = bitmap_weight(gt-
> > >fuse_topo.eu_mask_per_dss,
> > + XE_MAX_EU_FUSE_BITS) *
> > num_dss;
> > +
> > + total_num_eus += num_eus;
>
> I'm behind on that patch I had posted a while back to update this
> algorithm :(. Want to pull that calculation in here directly so we can
> remove the PF_MULTIPLIER you have below?
>
> See https://patchwork.freedesktop.org/patch/651415/?series=148523&rev=1
>
> I can also rework that on top of this if you'd prefer, either way is
> fine with me.
>
Either way works for me. If you get this one in ahead of me, no issuing
picking it up in a rebase or ofc if lands first you can post on top of
this. If you are concerned about history, then the later might be
better.
Matt
> Thanks,
> Stuart
>
> > + }
> > +
> > + xe_assert(xe, total_num_eus);
> > +
> > + /*
> > + * user can issue separate page faults per EU and per CS
> > + *
> > + * XXX: Multiplier required as compute UMD are getting PF
> > queue errors
> > + * without it. Follow on why this multiplier is required.
> > + */
> > +#define PF_MULTIPLIER 8
> > + pf_queue->size = (total_num_eus + XE_NUM_HW_ENGINES) *
> > + xe_pagefault_entry_size() * PF_MULTIPLIER;
> > + pf_queue->size = roundup_pow_of_two(pf_queue->size);
> > +#undef PF_MULTIPLIER
> > +
> > + drm_dbg(&xe->drm, "xe_pagefault_entry_size=%d,
> > total_num_eus=%d, pf_queue->size=%u",
> > + xe_pagefault_entry_size(), total_num_eus, pf_queue-
> > >size);
> > +
> > + pf_queue->data = devm_kzalloc(xe->drm.dev, pf_queue->size,
> > GFP_KERNEL);
> > + if (!pf_queue->data)
> > + return -ENOMEM;
> > +
> > + spin_lock_init(&pf_queue->lock);
> > + INIT_WORK(&pf_queue->worker, xe_pagefault_queue_work);
> > +
> > + return 0;
> > +}
> > +
> > +static void xe_pagefault_fini(void *arg)
> > +{
> > + struct xe_device *xe = arg;
> > +
> > + destroy_workqueue(xe->usm.pf_wq);
> > +}
> > +
> > /**
> > * xe_pagefault_init() - Page fault init
> > * @xe: xe device instance
> > @@ -29,8 +98,28 @@
> > */
> > int xe_pagefault_init(struct xe_device *xe)
> > {
> > - /* TODO - implement */
> > - return 0;
> > + int err, i;
> > +
> > + if (!xe->info.has_usm)
> > + return 0;
> > +
> > + xe->usm.pf_wq = alloc_workqueue("xe_page_fault_work_queue",
> > + WQ_UNBOUND | WQ_HIGHPRI,
> > + XE_PAGEFAULT_QUEUE_COUNT);
> > + if (!xe->usm.pf_wq)
> > + return -ENOMEM;
> > +
> > + for (i = 0; i < XE_PAGEFAULT_QUEUE_COUNT; ++i) {
> > + err = xe_pagefault_queue_init(xe, xe->usm.pf_queue +
> > i);
> > + if (err)
> > + goto err_out;
> > + }
> > +
> > + return devm_add_action_or_reset(xe->drm.dev,
> > xe_pagefault_fini, xe);
> > +
> > +err_out:
> > + destroy_workqueue(xe->usm.pf_wq);
> > + return err;
> > }
> >
> > /**
>
More information about the Intel-xe
mailing list