[PATCH 2/2] drm/xe: Use topology to determine page fault queue size

Summers, Stuart stuart.summers at intel.com
Tue Jun 25 17:38:09 UTC 2024


On Mon, 2024-06-10 at 22:32 +0000, Matthew Brost wrote:
> On Mon, Jun 10, 2024 at 08:33:00PM +0000, Stuart Summers wrote:
> > Currently the page fault queue size is hard coded. However
> > the hardware supports faulting for each EU and each CS.
> > For some applications running on hardware with a large
> > number of EUs and CSs, this can result in an overflow of
> > the page fault queue.
> > 
> > Add a small calculation to determine the page fault queue
> > size based on the number of EUs and CSs in the platform as
> > detmined by fuses.
> > 
> 
> This math will also be needed when determining the G2H queue size as
> page faults get sunk to that queue first before being copied to this
> secondary queue. 

Ok let me look..

> 
> > Signed-off-by: Stuart Summers <stuart.summers at intel.com>
> > ---
> >  drivers/gpu/drm/xe/xe_gt_pagefault.c | 49 +++++++++++++++++++++---
> > ----
> >  drivers/gpu/drm/xe/xe_gt_types.h     |  4 +--
> >  2 files changed, 39 insertions(+), 14 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c
> > b/drivers/gpu/drm/xe/xe_gt_pagefault.c
> > index 3858c8e0b707..5565732e79e9 100644
> > --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
> > +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
> > @@ -287,7 +287,7 @@ static bool get_pagefault(struct pf_queue
> > *pf_queue, struct pagefault *pf)
> >                         PFD_VIRTUAL_ADDR_LO_SHIFT;
> >  
> >                 pf_queue->tail = (pf_queue->tail + PF_MSG_LEN_DW) %
> > -                       PF_QUEUE_NUM_DW;
> > +                       pf_queue->pf_queue_num_dw;
> >                 ret = true;
> >         }
> >         spin_unlock_irq(&pf_queue->lock);
> > @@ -299,7 +299,8 @@ static bool pf_queue_full(struct pf_queue
> > *pf_queue)
> >  {
> >         lockdep_assert_held(&pf_queue->lock);
> >  
> > -       return CIRC_SPACE(pf_queue->head, pf_queue->tail,
> > PF_QUEUE_NUM_DW) <=
> > +       return CIRC_SPACE(pf_queue->head, pf_queue->tail,
> > +                         pf_queue->pf_queue_num_dw) <=
> 
> I don't think the <= PF_MSG_LEN_DW is needed.

I thought the idea here was to make sure the max space we might
possibly need (PF_MSG_LEN_DW) is available in the rest of the queue
(CIRC_SPACE...). Isn't this exactly what we want to check?

> 
> >                 PF_MSG_LEN_DW;
> >  }
> >  
> > @@ -312,11 +313,6 @@ int xe_guc_pagefault_handler(struct xe_guc
> > *guc, u32 *msg, u32 len)
> >         u32 asid;
> >         bool full;
> >  
> > -       /*
> > -        * The below logic doesn't work unless PF_QUEUE_NUM_DW %
> > PF_MSG_LEN_DW == 0
> > -        */
> > -       BUILD_BUG_ON(PF_QUEUE_NUM_DW % PF_MSG_LEN_DW);
> > -
> 
> So here I'd add:
> xe_gt_assert(gt, !(pf_queue->pf_queue_num_dw %PF_QUEUE_NUM_DW);

Makes sense.

> 
> >         if (unlikely(len != PF_MSG_LEN_DW))
> >                 return -EPROTO;
> >  
> > @@ -327,7 +323,8 @@ int xe_guc_pagefault_handler(struct xe_guc
> > *guc, u32 *msg, u32 len)
> >         full = pf_queue_full(pf_queue);
> >         if (!full) {
> >                 memcpy(pf_queue->data + pf_queue->head, msg, len *
> > sizeof(u32));
> > -               pf_queue->head = (pf_queue->head + len) %
> > PF_QUEUE_NUM_DW;
> > +               pf_queue->head = (pf_queue->head + len) %
> > +                       pf_queue->pf_queue_num_dw;
> >                 queue_work(gt->usm.pf_wq, &pf_queue->worker);
> >         } else {
> >                 drm_warn(&xe->drm, "PF Queue full, shouldn't be
> > possible");
> > @@ -382,18 +379,42 @@ static void pf_queue_work_func(struct
> > work_struct *w)
> >  
> >  static void acc_queue_work_func(struct work_struct *w);
> >  
> > +static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue
> > *pf_queue)
> > +{
> > +       xe_dss_mask_t all_dss;
> > +       int num_dss, num_eus;
> > +
> > +       bitmap_or(all_dss, gt->fuse_topo.g_dss_mask, gt-
> > >fuse_topo.c_dss_mask,
> > +                 XE_MAX_DSS_FUSE_BITS);
> > +
> > +       num_dss = bitmap_weight(all_dss, XE_MAX_DSS_FUSE_BITS);
> > +       num_eus = bitmap_weight(gt->fuse_topo.eu_mask_per_dss,
> > +                               XE_MAX_EU_FUSE_BITS) * num_dss;
> > +
> > +       /* user can issue separate page faults per EU and per CS */
> > +       pf_queue->pf_queue_num_dw =
> > +               ALIGN(num_eus + XE_NUM_HW_ENGINES, PF_MSG_LEN_DW);
> > +
> > +       pf_queue->gt = gt;
> > +       pf_queue->data = kzalloc(pf_queue->pf_queue_num_dw,
> > GFP_KERNEL);
> > +       spin_lock_init(&pf_queue->lock);
> > +       INIT_WORK(&pf_queue->worker, pf_queue_work_func);
> > +
> > +       return 0;
> > +}
> > +
> >  int xe_gt_pagefault_init(struct xe_gt *gt)
> >  {
> >         struct xe_device *xe = gt_to_xe(gt);
> > -       int i;
> > +       int i, ret = 0;
> >  
> >         if (!xe->info.has_usm)
> >                 return 0;
> >  
> >         for (i = 0; i < NUM_PF_QUEUE; ++i) {
> > -               gt->usm.pf_queue[i].gt = gt;
> > -               spin_lock_init(&gt->usm.pf_queue[i].lock);
> > -               INIT_WORK(&gt->usm.pf_queue[i].worker,
> > pf_queue_work_func);
> > +               ret = xe_alloc_pf_queue(gt, &gt->usm.pf_queue[i]);
> > +               if (ret)
> > +                       return ret;
> >         }
> >         for (i = 0; i < NUM_ACC_QUEUE; ++i) {
> >                 gt->usm.acc_queue[i].gt = gt;
> > @@ -418,12 +439,16 @@ int xe_gt_pagefault_init(struct xe_gt *gt)
> >  void xe_gt_pagefault_fini(struct xe_gt *gt)
> >  {
> >         struct xe_device *xe = gt_to_xe(gt);
> > +       int i;
> >  
> >         if (!xe->info.has_usm)
> >                 return;
> >  
> >         destroy_workqueue(gt->usm.acc_wq);
> >         destroy_workqueue(gt->usm.pf_wq);
> > +
> > +       for (i = 0; i < NUM_PF_QUEUE; ++i)
> > +               kfree(gt->usm.pf_queue[i].data);
> >  }
> >  
> >  void xe_gt_pagefault_reset(struct xe_gt *gt)
> > diff --git a/drivers/gpu/drm/xe/xe_gt_types.h
> > b/drivers/gpu/drm/xe/xe_gt_types.h
> > index 10a9a9529377..c40b9c1b5fec 100644
> > --- a/drivers/gpu/drm/xe/xe_gt_types.h
> > +++ b/drivers/gpu/drm/xe/xe_gt_types.h
> > @@ -232,9 +232,9 @@ struct xe_gt {
> >                 struct pf_queue {
> >                         /** @usm.pf_queue.gt: back pointer to GT */
> >                         struct xe_gt *gt;
> > -#define PF_QUEUE_NUM_DW        128
> >                         /** @usm.pf_queue.data: data in the page
> > fault queue */
> > -                       u32 data[PF_QUEUE_NUM_DW];
> > +                       u32 *data;
> 
> Kernel doc.

Ok.

Thanks,
Stuart

> 
> > +                       u32 pf_queue_num_dw;
> >                         /**
> >                          * @usm.pf_queue.tail: tail pointer in DWs
> > for page fault queue,
> >                          * moved by worker which processes faults
> > (consumer).
> > -- 
> > 2.34.1
> > 



More information about the Intel-xe mailing list