[PATCH v5 2/2] drm/xe/uapi: Add L3 bank mask to topology query

Matt Roper matthew.d.roper at intel.com
Wed Mar 27 23:13:11 UTC 2024


On Mon, Mar 25, 2024 at 05:17:41PM -0700, Matt Roper wrote:
> On Mon, Mar 25, 2024 at 11:14:39AM +0000, Francois Dugast wrote:
> > The Metrics Discovery Application Programming Interface (MDAPI) exposes
> > disaggregated metrics only for available instances of hardware, therefore
> > it needs to know exactly which L3 banks are enabled on the device. The L3
> > count is not sufficient information as in some configurations the L3
> > banks might be individually enabled or disabled. This is why the topology
> > query is here extended to provide the L3 bank mask.
> > 
> > v2:
> > - Remove "Fixes" and make uAPI change explicit in commit message
> >   (Lucas De Marchi and Matt Roper)
> > - Add separate conditions for Xe2 and for PVC (Matt Roper)
> > - Return the L3 bank mask instead of the L3 node mask and the L3
> >   banks per node mask, as the node mask can be derived from the
> >   bank mask by user space, just like slice masks are derived from
> >   subslice masks today (Francois Dugast)
> > 
> > v3: Use TOPO_NUMBER_OF_MASKS for the number of masks (Zhanjun Dong)
> > 
> > v4: Detail the user space need in commit message (Lucas De Marchi)
> > 
> > Bspec: 52545, 52546, 62482
> > Signed-off-by: Francois Dugast <francois.dugast at intel.com>
> > Cc: Jonathan Cavitt <jonathan.cavitt at intel.com>
> > Cc: Robert Krzemien <robert.krzemien at intel.com>
> > Cc: Matt Roper <matthew.d.roper at intel.com>
> > Cc: Zhanjun Dong <zhanjun.dong at intel.com>
> > Cc: Lucas De Marchi <lucas.demarchi at intel.com>
> > Link: https://github.com/intel/metrics-discovery
> > ---
> >  drivers/gpu/drm/xe/regs/xe_gt_regs.h |  3 ++
> >  drivers/gpu/drm/xe/xe_gt_topology.c  | 72 ++++++++++++++++++++++++++++
> >  drivers/gpu/drm/xe/xe_gt_types.h     | 14 ++++--
> >  drivers/gpu/drm/xe/xe_query.c        | 12 ++++-
> >  include/uapi/drm/xe_drm.h            |  1 +
> >  5 files changed, 96 insertions(+), 6 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
> > index 65af9fe95db5..03388c69af1b 100644
> > --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
> > +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
> > @@ -156,7 +156,10 @@
> >  #define	MIRROR_FUSE3				XE_REG(0x9118)
> >  #define   XE2_NODE_ENABLE_MASK			REG_GENMASK(31, 16)
> >  #define   L3BANK_PAIR_COUNT			4
> > +#define   XEHPC_GT_L3_MODE_MASK			REG_GENMASK(7, 4)
> > +#define   XE2_GT_L3_MODE_MASK			REG_GENMASK(7, 4)
> >  #define   L3BANK_MASK				REG_GENMASK(3, 0)
> > +#define   XELP_GT_L3_MODE_MASK			REG_GENMASK(7, 0)
> >  /* on Xe_HP the same fuses indicates mslices instead of L3 banks */
> >  #define   MAX_MSLICES				4
> >  #define   MEML3_EN_MASK				REG_GENMASK(3, 0)
> > diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c
> > index f5773a14f3c8..8920426332dd 100644
> > --- a/drivers/gpu/drm/xe/xe_gt_topology.c
> > +++ b/drivers/gpu/drm/xe/xe_gt_topology.c
> > @@ -59,6 +59,75 @@ load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask)
> >  	bitmap_from_arr32(mask, &val, XE_MAX_EU_FUSE_BITS);
> >  }
> >  
> > +static void
> > +load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask)
> > +{
> > +	struct xe_device *xe = gt_to_xe(gt);
> > +	u64 node_mask, bank_mask = 0;
> > +	u64 banks_per_node_fuse, banks_per_node_mask = 0;
> > +	int i;
> > +
> > +	if (GRAPHICS_VER(xe) >= 20) {
> > +		node_mask =
> > +			REG_FIELD_GET(XE2_NODE_ENABLE_MASK,
> > +				      xe_mmio_read32(gt, MIRROR_FUSE3));
> > +		banks_per_node_mask =
> > +			REG_FIELD_GET(XE2_GT_L3_MODE_MASK,
> > +				      xe_mmio_read32(gt, MIRROR_FUSE3));
> > +		for (i = 0; i < fls(node_mask); i++)
> > +			if (node_mask & BIT(i))
> > +				bank_mask |= banks_per_node_mask << 4 * i;
> > +	} else if (GRAPHICS_VERx100(xe) >= 1270) {
> > +		node_mask =
> > +			REG_FIELD_GET(MEML3_EN_MASK,
> > +				      xe_mmio_read32(gt, MIRROR_FUSE3));
> > +		banks_per_node_fuse =
> > +			REG_FIELD_GET(GT_L3_EXC_MASK,
> > +				      xe_mmio_read32(gt, XEHP_FUSE4));
> > +		/* Each bit represents 2 banks in the node. */
> > +		for (i = 0; i < fls(banks_per_node_fuse); i++)
> > +			if (banks_per_node_fuse & BIT(i))
> > +				banks_per_node_mask |= 0x3 << 2 * i;
> > +		for (i = 0; i < fls(node_mask); i++)
> > +			if (node_mask & BIT(i))
> > +				bank_mask |= banks_per_node_mask << 4 * i;
> > +	} else if (xe->info.platform == XE_PVC) {
> > +		node_mask =
> > +			REG_FIELD_GET(MEML3_EN_MASK,
> > +				      xe_mmio_read32(gt, MIRROR_FUSE3));
> > +		banks_per_node_fuse =
> > +			REG_FIELD_GET(XEHPC_GT_L3_MODE_MASK,
> > +				      xe_mmio_read32(gt, MIRROR_FUSE3));
> > +		/* Each bit represents 4 banks in the node. */
> > +		for (i = 0; i < fls(banks_per_node_fuse); i++)
> > +			if (banks_per_node_fuse & BIT(i))
> > +				banks_per_node_mask |= 0xf << 4 * i;
> > +		for (i = 0; i < fls(node_mask); i++)
> > +			if (node_mask & BIT(i))
> > +				bank_mask |= banks_per_node_mask << 16 * i;
> > +	} else if (xe->info.platform == XE_DG2) {
> > +		node_mask =
> > +			REG_FIELD_GET(MEML3_EN_MASK,
> > +				      xe_mmio_read32(gt, MIRROR_FUSE3));
> > +		/*
> > +		 * In this case, if a node is present then all 8 banks inside of
> > +		 * it are present.
> > +		 */
> > +		for (i = 0; i < fls(node_mask); i++)
> > +			if (node_mask & BIT(i))
> > +				bank_mask |= 0xfful << 8 * i;
> > +	} else {
> > +		/*
> > +		 * Here the mask logic is reversed: a bit is set if the bank is
> > +		 * disabled.
> > +		 */
> > +		bank_mask = REG_FIELD_GET(XELP_GT_L3_MODE_MASK,
> > +					  ~xe_mmio_read32(gt, MIRROR_FUSE3));
> > +	}
> > +
> > +	bitmap_from_arr64(l3_bank_mask, &bank_mask, XE_MAX_L3_BANK_FUSE_BITS);
> 
> This is going to cause us a lot of pain down the road as soon as we hit
> a platform where the size of the mask can exceed a single u64.  Then
> we'll have to go back and change all of these if/else branches.
> 
> It might be best to just build the bitmap directly in each branch now
> rather than doing this in a way that we know we'll need to change later.
> We can add some static helper functions to do common stuff like
> replicate a submask according to the bits of a different mask.  Hard to
> explain in text, but something like the following (completely untested):
> 
> 
>   /* For each set bit in @mask, replicate @pattern into a corresponding
>      slot of @dst (where slot size = @maskbits) */
>   static void replicate_by_mask(xe_l3_bank_mask_t dst,
>                                 xe_l3_bank_mask_t pattern,
>                                 u32 mask,
>                                 int maskbits)
>   {
>      unsigned long nextbit;
>   
>      xe_assert(fls(mask) <= maskbits);
>      for_each_set_bit(nextbit, &mask, 32) {
>          xe_l3_bank_mask_t shifted_pattern = {};
> 
>          bitmap_shift_left(shifted_pattern, pattern, nextbit * maskbits, MAXSIZE);
>          bitmap_or(dst, dst, shifted_pattern, MAXSIZE);
>      }
>   }
> 
>   static void
>   load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask) {
>         ...
>         fuse3 = xe_mmio_read32(gt, MIRROR_FUSE3);
> 
>         ...
>         } else if (xe->info.platform == XE_PVC) {
>                 xe_l3_bank_mask_t per_node = {};
>                 u32 meml3_en = REG_FIELD_GET(MEML3_EN_MASK, fuse3);
>                 u32 bank_val = REG_FIELD_GET(XEHPC_GT_L3_MODE_MASK, fuse3);
> 
>                 bitmap_from_arr32(per_node, &bank_val, 32);
>                 replicate_by_mask(l3_bank_mask, per_node, meml3_en, 4);
>         } else if (xe->info.platform == XE_DG2) {
>                 xe_l3_bank_mask_t bit0 = {};
>                 u32 mask = REG_FIELD_GET(MEML3_EN_MASK, fuse3);
> 
>                 bitmap_fill(bit0, 8);
>                 replicate_by_mask(l3_bank_mask, bit0, mask, 8);
>         } else {
>                 /* 1:1 register bit to mask bit (inverted register bits) */
>                 u32 mask = REG_FIELD_GET(XELP_GT_L3_MODE_MASK, ~fuse3));
>                 bitmap_from_arr32(l3_bank_mask, &mask, 32);
>         }
>   }
> 
> 
> > +}
> > +
> >  static void
> >  get_num_dss_regs(struct xe_device *xe, int *geometry_regs, int *compute_regs)
> >  {
> > @@ -103,6 +172,7 @@ xe_gt_topology_init(struct xe_gt *gt)
> >  		      XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,
> >  		      XE2_GT_COMPUTE_DSS_2);
> >  	load_eu_mask(gt, gt->fuse_topo.eu_mask_per_dss);
> > +	load_l3_bank_mask(gt, gt->fuse_topo.l3_bank_mask);
> >  
> >  	p = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, "GT topology");
> >  
> > @@ -120,6 +190,8 @@ xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p)
> >  	drm_printf(p, "EU mask per DSS:     %*pb\n", XE_MAX_EU_FUSE_BITS,
> >  		   gt->fuse_topo.eu_mask_per_dss);
> >  
> > +	drm_printf(p, "L3 bank mask:        %*pb\n", XE_MAX_L3_BANK_FUSE_BITS,
> > +		   gt->fuse_topo.l3_bank_mask);
> >  }
> >  
> >  /*
> > diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
> > index f6da2ad9719f..e1438a478f94 100644
> > --- a/drivers/gpu/drm/xe/xe_gt_types.h
> > +++ b/drivers/gpu/drm/xe/xe_gt_types.h
> > @@ -24,13 +24,16 @@ enum xe_gt_type {
> >  	XE_GT_TYPE_MEDIA,
> >  };
> >  
> > -#define XE_MAX_DSS_FUSE_REGS	3
> > -#define XE_MAX_DSS_FUSE_BITS	(32 * XE_MAX_DSS_FUSE_REGS)
> > -#define XE_MAX_EU_FUSE_REGS	1
> > -#define XE_MAX_EU_FUSE_BITS	(32 * XE_MAX_EU_FUSE_REGS)
> > +#define XE_MAX_DSS_FUSE_REGS		3
> > +#define XE_MAX_DSS_FUSE_BITS		(32 * XE_MAX_DSS_FUSE_REGS)
> > +#define XE_MAX_EU_FUSE_REGS		1
> > +#define XE_MAX_EU_FUSE_BITS		(32 * XE_MAX_EU_FUSE_REGS)
> > +#define XE_MAX_L3_BANK_FUSE_REGS	2
> > +#define XE_MAX_L3_BANK_FUSE_BITS	(32 * XE_MAX_L3_BANK_FUSE_REGS)
> 
> This definition doesn't look right.  For DSS the hardware just splits
> the actual mask (1 bit per DSS) across several 32-bit registers so
> 32*numregs expresses the maximum possible size of what the hardware can
> provide.  But for L3 banks the rules are much more complicated and vary
> by platform, so we can't use the same rules.
> 
> 
> Matt
> 
> >  
> >  typedef unsigned long xe_dss_mask_t[BITS_TO_LONGS(XE_MAX_DSS_FUSE_BITS)];
> >  typedef unsigned long xe_eu_mask_t[BITS_TO_LONGS(XE_MAX_EU_FUSE_BITS)];
> > +typedef unsigned long xe_l3_bank_mask_t[BITS_TO_LONGS(XE_MAX_L3_BANK_FUSE_BITS)];
> >  
> >  struct xe_mmio_range {
> >  	u32 start;
> > @@ -334,6 +337,9 @@ struct xe_gt {
> >  
> >  		/** @fuse_topo.eu_mask_per_dss: EU mask per DSS*/
> >  		xe_eu_mask_t eu_mask_per_dss;
> > +
> > +		/** @l3_bank_mask: L3 bank mask */
> > +		xe_l3_bank_mask_t l3_bank_mask;
> >  	} fuse_topo;
> >  
> >  	/** @steering: register steering for individual HW units */
> > diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
> > index 109a96212310..e026d993f246 100644
> > --- a/drivers/gpu/drm/xe/xe_query.c
> > +++ b/drivers/gpu/drm/xe/xe_query.c

I also think we should split this patch a bit more too.  The general
readout and storing of L3 bank masks internally is something that's
generally useful to the driver even without the uapi part so we should
add those in separate patches.  I.e., even if the userspace consumer
never materializes and we drop the uapi stuff, we can still utilize the
internal mask values for our MCR initialization (removing some of the
duplicate code that exists over there now) and it would also be valuable
to make these masks available through debugfs and/or dmesg so that
developers can easily figure out the fusing characteristics of machines
that they're trying to debug issues on.


Matt

> > @@ -23,7 +23,7 @@
> >  #include "xe_mmio.h"
> >  #include "xe_ttm_vram_mgr.h"
> >  
> > -#define TOPO_NUMBER_OF_MASKS   (3)
> > +#define TOPO_NUMBER_OF_MASKS   (4)
> >  
> >  static const u16 xe_to_user_engine_class[] = {
> >  	[XE_ENGINE_CLASS_RENDER] = DRM_XE_ENGINE_CLASS_RENDER,
> > @@ -460,7 +460,8 @@ static size_t calc_topo_query_size(struct xe_device *xe)
> >  		 * sizeof(struct drm_xe_query_topology_mask) +
> >  		 sizeof_field(struct xe_gt, fuse_topo.g_dss_mask) +
> >  		 sizeof_field(struct xe_gt, fuse_topo.c_dss_mask) +
> > -		 sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss));
> > +		 sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss) +
> > +		 sizeof_field(struct xe_gt, fuse_topo.l3_bank_mask));
> >  }
> >  
> >  static int copy_mask(void __user **ptr,
> > @@ -519,6 +520,13 @@ static int query_gt_topology(struct xe_device *xe,
> >  				sizeof(gt->fuse_topo.eu_mask_per_dss));
> >  		if (err)
> >  			return err;
> > +
> > +		topo.type = DRM_XE_TOPO_L3_BANK;
> > +		err = copy_mask(&query_ptr, &topo,
> > +				gt->fuse_topo.l3_bank_mask,
> > +				sizeof(gt->fuse_topo.l3_bank_mask));
> > +		if (err)
> > +			return err;
> >  	}
> >  
> >  	return 0;
> > diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
> > index 808ad1c308ec..aa917308cdeb 100644
> > --- a/include/uapi/drm/xe_drm.h
> > +++ b/include/uapi/drm/xe_drm.h
> > @@ -521,6 +521,7 @@ struct drm_xe_query_topology_mask {
> >  #define DRM_XE_TOPO_DSS_GEOMETRY	(1 << 0)
> >  #define DRM_XE_TOPO_DSS_COMPUTE		(1 << 1)
> >  #define DRM_XE_TOPO_EU_PER_DSS		(1 << 2)
> > +#define DRM_XE_TOPO_L3_BANK		(1 << 3)
> >  	/** @type: type of mask */
> >  	__u16 type;
> >  
> > -- 
> > 2.34.1
> > 
> 
> -- 
> Matt Roper
> Graphics Software Engineer
> Linux GPU Platform Enablement
> Intel Corporation

-- 
Matt Roper
Graphics Software Engineer
Linux GPU Platform Enablement
Intel Corporation


More information about the Intel-xe mailing list