[RFC v1 1/9] drm/xe/hw_engine_group: Introduce xe_hw_engine_group

Francois Dugast francois.dugast at intel.com
Mon Jul 22 07:40:34 UTC 2024


On Wed, Jul 17, 2024 at 07:29:07PM +0000, Matthew Brost wrote:
> On Wed, Jul 17, 2024 at 03:07:22PM +0200, Francois Dugast wrote:
> > A xe_hw_engine_group is a group of hw engines. Two hw engines belong
> > to the same xe_hw_engine_group if one hw engine cannot make progress
> > while the other is stuck on a page fault.
> > 
> > Typically, hw engines of the same group share some resources such as
> > EUs. This depends on the hardware configuration of the platforms.
> > 
> > Currently all engines share EUs so for now only one group is created
> > and assigned to all hw engines.
> > 
> > Signed-off-by: Francois Dugast <francois.dugast at intel.com>
> > ---
> >  drivers/gpu/drm/xe/xe_hw_engine.c       | 48 +++++++++++++++++++++++++
> >  drivers/gpu/drm/xe/xe_hw_engine_types.h | 31 ++++++++++++++++
> >  2 files changed, 79 insertions(+)
> > 
> > diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
> > index 78b50d3a6501..f8df85d25617 100644
> > --- a/drivers/gpu/drm/xe/xe_hw_engine.c
> > +++ b/drivers/gpu/drm/xe/xe_hw_engine.c
> > @@ -431,6 +431,53 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe)
> >  	xe_rtp_process_to_sr(&ctx, engine_entries, &hwe->reg_sr);
> >  }
> >  
> > +static struct xe_hw_engine_group *
> > +hw_engine_group_alloc(struct xe_device *xe)
> > +{
> > +	struct xe_hw_engine_group *group;
> > +
> > +	group = kzalloc(sizeof(*group), GFP_KERNEL);
> 
> Need to handle kzalloc failing here, e.g.
> 
> if (!group)
> 	return ERR_PTR(-ENOMEM);
> 
> > +	init_rwsem(&group->mode_sem);
> > +	INIT_LIST_HEAD(&group->exec_queue_list);
> > +
> > +	return group;
> > +}
> > +
> > +static void
> 
> This can fail, so:
> 
> static int
> 
> > +hw_engine_setup_groups(struct xe_gt *gt)
> > +{
> > +	struct xe_hw_engine *hwe;
> > +	enum xe_hw_engine_id id;
> > +	struct xe_hw_engine_group *group_rcs_ccs, *group_bcs, *group_vcs_vecs;
> > +	struct xe_device *xe = gt_to_xe(gt);
> > +
> > +	/*
> > +	 * Current partitioning implies all engines share EUs therefore
> > +	 * belong to the same group, so create this group and assign it
> > +	 * to all engines.
> > +	 */
> > +	group_rcs_ccs = hw_engine_group_alloc(xe);
> 
> if (IS_ERR(group_rcs_ccs))
> 	return PTR_ERR(group_rcs_ccs);
> 
> Also you will need to cleanup these allocations on failures and on
> driver unload. For driver unload cleanup use drmm_add_action_or_reset, I
> suggest adding that below.
> 
> > +	group_bcs = hw_engine_group_alloc(xe);
> > +	group_vcs_vecs = hw_engine_group_alloc(xe);
> > +	for_each_hw_engine(hwe, gt, id) {
> > +		switch (hwe->class) {
> > +		case XE_ENGINE_CLASS_COPY:
> > +			hwe->hw_engine_group = group_bcs;
> > +			break;
> > +		case XE_ENGINE_CLASS_RENDER:
> > +		case XE_ENGINE_CLASS_COMPUTE:
> > +			hwe->hw_engine_group = group_rcs_ccs;
> > +			break;
> > +		case XE_ENGINE_CLASS_VIDEO_DECODE:
> > +		case XE_ENGINE_CLASS_VIDEO_ENHANCE:
> > +			hwe->hw_engine_group = group_vcs_vecs;
> > +			break;
> > +		default:
> > +			drm_warn(&xe->drm, "hw engine class not handled");
> 
> I don't think this is a warn as the GSC (XE_ENGINE_CLASS_OTHER) doesn't
> need a group as it is not exposed to user space but pretty sure shows up
> in the for_each_hw_engine loop.
> 
> > +		}
> > +	}
> > +}
> > +
> >  static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
> >  				 enum xe_hw_engine_id id)
> >  {
> > @@ -761,6 +808,7 @@ int xe_hw_engines_init(struct xe_gt *gt)
> >  	}
> >  
> >  	hw_engine_setup_logical_mapping(gt);
> > +	hw_engine_setup_groups(gt);
> 
> err = hw_engine_setup_groups(gt);
> if (err)
> 	return err;
> 
> >  
> >  	return 0;
> 
> return drmm_add_action_or_reset(..., function_to_kfree_groups);
> 
> >  }
> > diff --git a/drivers/gpu/drm/xe/xe_hw_engine_types.h b/drivers/gpu/drm/xe/xe_hw_engine_types.h
> > index 70e6434f150d..c6f7bbcb2a41 100644
> > --- a/drivers/gpu/drm/xe/xe_hw_engine_types.h
> > +++ b/drivers/gpu/drm/xe/xe_hw_engine_types.h
> > @@ -100,6 +100,35 @@ struct xe_hw_engine_class_intf {
> >  	} sched_props, defaults;
> >  };
> >  
> > +/** enum xe_hw_engine_group_execution_mode - possible execution modes of a hw engine group */
> > +enum xe_hw_engine_group_execution_mode {
> > +	EXEC_MODE_LR,
> > +	EXEC_MODE_DMA_FENCE,
> > +};
> > +
> > +/**
> > + * struct xe_hw_engine_group - Hardware engine group
> > + *
> > + * hw engines belong to the same group if they share hardware resources in
> > + * a way that prevents them from making progress when one is stuck on a
> > + * page fault.
> > + */
> > +struct xe_hw_engine_group {
> > +	/** @exec_queue_list: list of exec queues attached to this xe_hw_engine_group */
> > +	struct list_head exec_queue_list;
> > +	/** @resume_work: worker to resume LR exec queues */
> > +	struct work_struct resume_work;
> > +	/** @resume_wq: workqueue to resume LR exec queues */
> > +	struct workqueue_struct *resume_wq;
> 
> Assume these two members (resume_*) are setup later in the series?

Yes they are, in "drm/xe/hw_engine_group: Resume LR exec queues suspended by dma fence jobs".

Francois

> 
> > +	/** @mode_sem: used to protect this group's hardware resources and
> > +	 * ensure mutual exclusion between execution only in LR mode
> > +	 * and execution only in DMA_FENCE mode
> > +	 */
> 
> Prefer this style for multi line kernel doc:
> 
> /**
>  * @foo: some description...
>  */
> 
> Matt
> 
> > +	struct rw_semaphore mode_sem;
> > +	/** @cur_mode: current execution mode of this hw engine group */
> > +	enum xe_hw_engine_group_execution_mode cur_mode;
> > +};
> > +
> >  /**
> >   * struct xe_hw_engine - Hardware engine
> >   *
> > @@ -150,6 +179,8 @@ struct xe_hw_engine {
> >  	struct xe_hw_engine_class_intf *eclass;
> >  	/** @oa_unit: oa unit for this hw engine */
> >  	struct xe_oa_unit *oa_unit;
> > +	/** @hw_engine_group: the group of hw engines this one belongs to */
> > +	struct xe_hw_engine_group *hw_engine_group;
> >  };
> >  
> >  /**
> > -- 
> > 2.43.0
> > 


More information about the Intel-xe mailing list