[PATCH 08/12] drm/xe: Introduce xe_ggtt_largest_hole

Rodrigo Vivi rodrigo.vivi at intel.com
Fri Aug 9 21:33:38 UTC 2024


On Thu, Jul 11, 2024 at 10:00:51PM +0200, Michal Wajdeczko wrote:
> 
> 
> On 11.07.2024 19:11, Rodrigo Vivi wrote:
> > Introduce a new xe_ggtt_largest_hole helper that attends the SRIOV
> > demand and continue with the goal of limiting drm_mm access to xe_ggtt.
> > 
> > Cc: Michal Wajdeczko <michal.wajdeczko at intel.com>
> > Signed-off-by: Rodrigo Vivi <rodrigo.vivi at intel.com>
> > ---
> >  drivers/gpu/drm/xe/xe_ggtt.c               | 35 ++++++++++++++++++++++
> >  drivers/gpu/drm/xe/xe_ggtt.h               |  1 +
> >  drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c | 23 ++------------
> >  3 files changed, 38 insertions(+), 21 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
> > index 67337bfeb81e..dbaf1ce87fb4 100644
> > --- a/drivers/gpu/drm/xe/xe_ggtt.c
> > +++ b/drivers/gpu/drm/xe/xe_ggtt.c
> > @@ -584,6 +584,41 @@ void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
> >  			    bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
> >  }
> >  
> > +/**
> > + * xe_ggtt_largest_hole - Largest GGTT hole
> > + * @ggtt: the &xe_ggtt that will be inspected
> > + * @alignment: mininum alignment
> 
> typo: minimum

thanks, addressed this...

> 
> > + * @spare: If not NULL: in: desired memory size to be spared / out: Adjusted possible spare
> > + *
> > + * Return: size of the largest continuous GGTT region
> > + */
> > +u64 xe_ggtt_largest_hole(struct xe_ggtt *ggtt, u64 alignment, u64 *spare)
> > +{
> > +	const struct drm_mm *mm = &ggtt->mm;
> > +	const struct drm_mm_node *entry;
> > +	u64 hole_min_start = xe_wopcm_size(tile_to_xe(ggtt->tile));
> 
> this is likely not needed any more as xe_ggtt is always above WOPCM
> (unlikely to previous driver ;)

hmmm... probably... but can we leave this to a follow up after we
get this series merged?
I'm trying to not modify the current behavior more then it is already
doing...

> 
> > +	u64 hole_start, hole_end, hole_size;
> > +	u64 max_hole = 0;
> > +
> > +	mutex_lock(&ggtt->lock);
> > +
> > +	drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
> > +		hole_start = max(hole_start, hole_min_start);
> > +		hole_start = ALIGN(hole_start, alignment);
> > +		hole_end = ALIGN_DOWN(hole_end, alignment);
> > +		if (hole_start >= hole_end)
> > +			continue;
> > +		hole_size = hole_end - hole_start;
> > +		if (spare)
> > +			*spare -= min3(*spare, hole_size, max_hole);
> > +		max_hole = max(max_hole, hole_size);
> > +	}
> > +
> > +	mutex_unlock(&ggtt->lock);
> > +
> > +	return max_hole;
> > +}
> > +
> >  #ifdef CONFIG_PCI_IOV
> >  static u64 xe_encode_vfid_pte(u16 vfid)
> >  {
> > diff --git a/drivers/gpu/drm/xe/xe_ggtt.h b/drivers/gpu/drm/xe/xe_ggtt.h
> > index f816b3c0732b..31060fe7644b 100644
> > --- a/drivers/gpu/drm/xe/xe_ggtt.h
> > +++ b/drivers/gpu/drm/xe/xe_ggtt.h
> > @@ -29,6 +29,7 @@ int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
> >  int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
> >  			 u64 start, u64 end);
> >  void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
> > +u64 xe_ggtt_largest_hole(struct xe_ggtt *ggtt, u64 alignment, u64 *spare);
> >  
> >  int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p);
> >  
> > diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
> > index efaf188290ea..1d17c34fe5a4 100644
> > --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
> > +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
> > @@ -590,30 +590,11 @@ int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid,
> >  static u64 pf_get_max_ggtt(struct xe_gt *gt)
> >  {
> >  	struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
> > -	const struct drm_mm *mm = &ggtt->mm;
> > -	const struct drm_mm_node *entry;
> >  	u64 alignment = pf_get_ggtt_alignment(gt);
> >  	u64 spare = pf_get_spare_ggtt(gt);
> > -	u64 hole_min_start = xe_wopcm_size(gt_to_xe(gt));
> > -	u64 hole_start, hole_end, hole_size;
> > -	u64 max_hole = 0;
> > -
> > -	mutex_lock(&ggtt->lock);
> > -
> > -	drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
> > -		hole_start = max(hole_start, hole_min_start);
> > -		hole_start = ALIGN(hole_start, alignment);
> > -		hole_end = ALIGN_DOWN(hole_end, alignment);
> > -		if (hole_start >= hole_end)
> > -			continue;
> > -		hole_size = hole_end - hole_start;
> > -		xe_gt_sriov_dbg_verbose(gt, "HOLE start %llx size %lluK\n",
> > -					hole_start, hole_size / SZ_1K);
> > -		spare -= min3(spare, hole_size, max_hole);
> > -		max_hole = max(max_hole, hole_size);
> > -	}
> > +	u64 max_hole;
> >  
> > -	mutex_unlock(&ggtt->lock);
> > +	max_hole = xe_ggtt_largest_hole(ggtt, alignment, &spare);
> >  
> >  	xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n",
> >  				max_hole / SZ_1K, spare / SZ_1K);


More information about the Intel-xe mailing list