[PATCH 1/4] drm/xe/guc: Allocate GuC data structures in system memory for initial load

Rodrigo Vivi rodrigo.vivi at intel.com
Thu Feb 1 21:57:25 UTC 2024


On Thu, Feb 01, 2024 at 09:25:38PM +0100, Michał Winiarski wrote:
> On Thu, Feb 01, 2024 at 05:36:20PM +0000, Matthew Brost wrote:
> > On Mon, Jan 29, 2024 at 02:03:05PM +0100, Michał Winiarski wrote:
> > > GuC load will need to happen at an earlier point in probe, where local
> > > memory is not yet available. Use system memory for GuC data structures
> > > used for initial "hwconfig" load, and realloc at a later,
> > > "post-hwconfig" load if needed, when local memory is available.
> > > 
> > > Signed-off-by: Michał Winiarski <michal.winiarski at intel.com>
> > > ---
> > >  drivers/gpu/drm/xe/xe_bo.c           | 32 ++++++++++++++++++++++++++
> > >  drivers/gpu/drm/xe/xe_bo.h           |  1 +
> > >  drivers/gpu/drm/xe/xe_guc.c          | 34 ++++++++++++++++++++++++++++
> > >  drivers/gpu/drm/xe/xe_guc_ads.c      |  2 +-
> > >  drivers/gpu/drm/xe/xe_guc_ct.c       |  2 +-
> > >  drivers/gpu/drm/xe/xe_guc_hwconfig.c |  2 +-
> > >  drivers/gpu/drm/xe/xe_guc_log.c      |  2 +-
> > >  7 files changed, 71 insertions(+), 4 deletions(-)
> > > 
> > > diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
> > > index d6a193060cc0b..7df87fbad0938 100644
> > > --- a/drivers/gpu/drm/xe/xe_bo.c
> > > +++ b/drivers/gpu/drm/xe/xe_bo.c
> > > @@ -1605,6 +1605,38 @@ struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_til
> > >  	return bo;
> > >  }
> > >  
> > > +/**
> > > + * xe_managed_bo_reinit_in_vram
> > > + * @xe: xe device
> > > + * @tile: Tile where the new buffer will be created
> > > + * @src: Managed buffer object allocated in system memory
> > > + *
> > > + * Replace a managed src buffer object allocated in system memory with a new
> > > + * one allocated in vram, copying the data between them.
> > > + * Buffer object in VRAM is not going to have the same GGTT address, the caller
> > > + * is responsible for making sure that any old references to it are updated.
> > > + *
> > > + * Returns 0 for success, negative error code otherwise.
> > > + */
> > > +int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src)
> > > +{
> > > +	struct xe_bo *bo;
> > > +
> > > +	xe_assert(xe, IS_DGFX(xe));
> > > +	xe_assert(xe, !(*src)->vmap.is_iomem);
> > > +
> > > +	bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr, (*src)->size,
> > > +					    XE_BO_CREATE_VRAM_IF_DGFX(tile) |
> > > +					    XE_BO_CREATE_GGTT_BIT);
> > > +	if (IS_ERR(bo))
> > > +		return PTR_ERR(bo);
> > > +
> > > +	drmm_release_action(&xe->drm, __xe_bo_unpin_map_no_vm, *src);
> > 
> > Should we not destroy / release the *src BO here?
> > 
> > Matt
> 
> That's what the __xe_bo_unpin_map_no_vm is doing. When the BO is
> allocated using xe_managed_bo_create_from_data, __xe_bo_unpin_map_no_vm
> action is added as DRM managed resource to automatically destroy the BO.
> With drmm_release_action, we're calling it immediately (and remove the
> action from the managed resources list - so that it won't be called when
> the underlying DRM device gets released).

This indeed looks correct to me:

Reviewed-by: Rodrigo Vivi <rodrigo.vivi at intel.com>


> 
> Thanks,
> -Michał
> 
> > 
> > > +	*src = bo;
> > > +
> > > +	return 0;
> > > +}
> > > +
> > >  /*
> > >   * XXX: This is in the VM bind data path, likely should calculate this once and
> > >   * store, with a recalculation if the BO is moved.
> > > diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
> > > index db4b2db6b0730..ff919a836d163 100644
> > > --- a/drivers/gpu/drm/xe/xe_bo.h
> > > +++ b/drivers/gpu/drm/xe/xe_bo.h
> > > @@ -129,6 +129,7 @@ struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile
> > >  					   size_t size, u32 flags);
> > >  struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
> > >  					     const void *data, size_t size, u32 flags);
> > > +int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src);
> > >  
> > >  int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
> > >  			      u32 bo_flags);
> > > diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
> > > index fcb8a9efac704..9b9a1252f3090 100644
> > > --- a/drivers/gpu/drm/xe/xe_guc.c
> > > +++ b/drivers/gpu/drm/xe/xe_guc.c
> > > @@ -272,6 +272,34 @@ void xe_guc_comm_init_early(struct xe_guc *guc)
> > >  		guc->notify_reg = GUC_HOST_INTERRUPT;
> > >  }
> > >  
> > > +static int xe_guc_realloc_post_hwconfig(struct xe_guc *guc)
> > > +{
> > > +	struct xe_tile *tile = gt_to_tile(guc_to_gt(guc));
> > > +	struct xe_device *xe = guc_to_xe(guc);
> > > +	int ret;
> > > +
> > > +	if (!IS_DGFX(guc_to_xe(guc)))
> > > +		return 0;
> > > +
> > > +	ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->fw.bo);
> > > +	if (ret)
> > > +		return ret;
> > > +
> > > +	ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->log.bo);
> > > +	if (ret)
> > > +		return ret;
> > > +
> > > +	ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ads.bo);
> > > +	if (ret)
> > > +		return ret;
> > > +
> > > +	ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ct.bo);
> > > +	if (ret)
> > > +		return ret;
> > > +
> > > +	return 0;
> > > +}
> > > +
> > >  int xe_guc_init(struct xe_guc *guc)
> > >  {
> > >  	struct xe_device *xe = guc_to_xe(guc);
> > > @@ -331,6 +359,12 @@ int xe_guc_init(struct xe_guc *guc)
> > >   */
> > >  int xe_guc_init_post_hwconfig(struct xe_guc *guc)
> > >  {
> > > +	int ret;
> > > +
> > > +	ret = xe_guc_realloc_post_hwconfig(guc);
> > > +	if (ret)
> > > +		return ret;
> > > +
> > >  	guc_init_params_post_hwconfig(guc);
> > >  
> > >  	return xe_guc_ads_init_post_hwconfig(&guc->ads);
> > > diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
> > > index 390e6f1bf4e1c..6ad4c1a90a787 100644
> > > --- a/drivers/gpu/drm/xe/xe_guc_ads.c
> > > +++ b/drivers/gpu/drm/xe/xe_guc_ads.c
> > > @@ -273,7 +273,7 @@ int xe_guc_ads_init(struct xe_guc_ads *ads)
> > >  	ads->regset_size = calculate_regset_size(gt);
> > >  
> > >  	bo = xe_managed_bo_create_pin_map(xe, tile, guc_ads_size(ads) + MAX_GOLDEN_LRC_SIZE,
> > > -					  XE_BO_CREATE_VRAM_IF_DGFX(tile) |
> > > +					  XE_BO_CREATE_SYSTEM_BIT |
> > >  					  XE_BO_CREATE_GGTT_BIT);
> > >  	if (IS_ERR(bo))
> > >  		return PTR_ERR(bo);
> > > diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
> > > index f3d356383cedf..355edd4d758af 100644
> > > --- a/drivers/gpu/drm/xe/xe_guc_ct.c
> > > +++ b/drivers/gpu/drm/xe/xe_guc_ct.c
> > > @@ -155,7 +155,7 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
> > >  	primelockdep(ct);
> > >  
> > >  	bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
> > > -					  XE_BO_CREATE_VRAM_IF_DGFX(tile) |
> > > +					  XE_BO_CREATE_SYSTEM_BIT |
> > >  					  XE_BO_CREATE_GGTT_BIT);
> > >  	if (IS_ERR(bo))
> > >  		return PTR_ERR(bo);
> > > diff --git a/drivers/gpu/drm/xe/xe_guc_hwconfig.c b/drivers/gpu/drm/xe/xe_guc_hwconfig.c
> > > index 2a13a00917f8c..ea49f3885c108 100644
> > > --- a/drivers/gpu/drm/xe/xe_guc_hwconfig.c
> > > +++ b/drivers/gpu/drm/xe/xe_guc_hwconfig.c
> > > @@ -78,7 +78,7 @@ int xe_guc_hwconfig_init(struct xe_guc *guc)
> > >  		return -EINVAL;
> > >  
> > >  	bo = xe_managed_bo_create_pin_map(xe, tile, PAGE_ALIGN(size),
> > > -					  XE_BO_CREATE_VRAM_IF_DGFX(tile) |
> > > +					  XE_BO_CREATE_SYSTEM_BIT |
> > >  					  XE_BO_CREATE_GGTT_BIT);
> > >  	if (IS_ERR(bo))
> > >  		return PTR_ERR(bo);
> > > diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c
> > > index bcd2f4d34081d..45135c3520e54 100644
> > > --- a/drivers/gpu/drm/xe/xe_guc_log.c
> > > +++ b/drivers/gpu/drm/xe/xe_guc_log.c
> > > @@ -84,7 +84,7 @@ int xe_guc_log_init(struct xe_guc_log *log)
> > >  	struct xe_bo *bo;
> > >  
> > >  	bo = xe_managed_bo_create_pin_map(xe, tile, guc_log_size(),
> > > -					  XE_BO_CREATE_VRAM_IF_DGFX(tile) |
> > > +					  XE_BO_CREATE_SYSTEM_BIT |
> > >  					  XE_BO_CREATE_GGTT_BIT);
> > >  	if (IS_ERR(bo))
> > >  		return PTR_ERR(bo);
> > > -- 
> > > 2.43.0
> > > 


More information about the Intel-xe mailing list