[PATCH 3/4] drm/xe: Add exec_queue.sched_props.priority

Welty, Brian brian.welty at intel.com
Wed Jan 3 18:15:43 UTC 2024



On 1/3/2024 12:11 AM, Matthew Brost wrote:
> On Tue, Jan 02, 2024 at 01:17:30PM -0800, Brian Welty wrote:
>> The purpose here is to allow to optimize exec_queue_set_priority()
>> in follow-on patch.  Currently it does q->ops->set_priority(...).
>> But we'd like to apply exec_queue_user_extensions much earlier and
>> q->ops cannot be called before __xe_exec_queue_init().
>>
>> It will be much more efficient to instead only have to set
>> q->sched_props.priority when applying user extensions. That value will
>> then already be set to the user requested value. So the setting of
>> default value is moved from q->ops->init() to __xe_exec_queue_alloc.
>>
>> Signed-off-by: Brian Welty <brian.welty at intel.com>
>> ---
>>   drivers/gpu/drm/xe/xe_exec_queue.c       | 1 +
>>   drivers/gpu/drm/xe/xe_exec_queue_types.h | 4 ++--
>>   drivers/gpu/drm/xe/xe_guc_submit.c       | 7 +++----
>>   drivers/gpu/drm/xe/xe_migrate.c          | 2 +-
>>   4 files changed, 7 insertions(+), 7 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
>> index e78b13845417..9891cddba71c 100644
>> --- a/drivers/gpu/drm/xe/xe_exec_queue.c
>> +++ b/drivers/gpu/drm/xe/xe_exec_queue.c
>> @@ -67,6 +67,7 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
>>   				hwe->eclass->sched_props.preempt_timeout_us;
>>   	q->sched_props.job_timeout_ms =
>>   				hwe->eclass->sched_props.job_timeout_ms;
>> +	q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
>>   
>>   	if (xe_exec_queue_is_parallel(q)) {
>>   		q->parallel.composite_fence_ctx = dma_fence_context_alloc(1);
>> diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
>> index 882eb5373980..6ae4f4e2ddca 100644
>> --- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
>> +++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
>> @@ -52,8 +52,6 @@ struct xe_exec_queue {
>>   	struct xe_vm *vm;
>>   	/** @class: class of this exec queue */
>>   	enum xe_engine_class class;
>> -	/** @priority: priority of this exec queue */
>> -	enum xe_exec_queue_priority priority;
>>   	/**
>>   	 * @logical_mask: logical mask of where job submitted to exec queue can run
>>   	 */
>> @@ -144,6 +142,8 @@ struct xe_exec_queue {
>>   		u32 preempt_timeout_us;
>>   		/** @job_timeout_ms: job timeout in milliseconds */
>>   		u32 job_timeout_ms;
>> +		/** @priority: priority of this exec queue */
>> +		enum xe_exec_queue_priority priority;
>>   	} sched_props;
>>   
>>   	/** @compute: compute exec queue state */
>> diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
>> index 6cbf41ad9c8c..0bd79089b633 100644
>> --- a/drivers/gpu/drm/xe/xe_guc_submit.c
>> +++ b/drivers/gpu/drm/xe/xe_guc_submit.c
>> @@ -421,7 +421,7 @@ static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
>>   {
>>   	struct exec_queue_policy policy;
>>   	struct xe_device *xe = guc_to_xe(guc);
>> -	enum xe_exec_queue_priority prio = q->priority;
>> +	enum xe_exec_queue_priority prio = q->sched_props.priority;
>>   	u32 timeslice_us = q->sched_props.timeslice_us;
>>   	u32 preempt_timeout_us = q->sched_props.preempt_timeout_us;
>>   
>> @@ -1231,7 +1231,6 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
>>   	err = xe_sched_entity_init(&ge->entity, sched);
>>   	if (err)
>>   		goto err_sched;
>> -	q->priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
>>   
>>   	if (xe_exec_queue_is_lr(q))
>>   		INIT_WORK(&q->guc->lr_tdr, xe_guc_exec_queue_lr_cleanup);
>> @@ -1301,7 +1300,7 @@ static int guc_exec_queue_set_priority(struct xe_exec_queue *q,
>>   {
>>   	struct xe_sched_msg *msg;
>>   
>> -	if (q->priority == priority || exec_queue_killed_or_banned(q))
>> +	if (q->sched_props.priority == priority || exec_queue_killed_or_banned(q))
>>   		return 0;
>>   
>>   	msg = kmalloc(sizeof(*msg), GFP_KERNEL);
>> @@ -1309,7 +1308,7 @@ static int guc_exec_queue_set_priority(struct xe_exec_queue *q,
>>   		return -ENOMEM;
>>   
>>   	guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
>> -	q->priority = priority;
>> +	q->sched_props.priority = priority;
> 
> Patch LGTM but noticed this is an existing bug - priority should be
> changed before calling guc_exec_queue_add_msg. Can you fix that in this
> series too?
> 

I see that now.  Will include fixing that.

Please see question below...

> Will give an RB on this patch in next rev.
> 
> Matt
> 
>>   
>>   	return 0;
>>   }
>> diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
>> index adf1dab5eba2..6567abcb3c6d 100644
>> --- a/drivers/gpu/drm/xe/xe_migrate.c
>> +++ b/drivers/gpu/drm/xe/xe_migrate.c
>> @@ -356,7 +356,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
>>   		return ERR_CAST(m->q);
>>   	}
>>   	if (xe->info.has_usm)
>> -		m->q->priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
>> +		m->q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;

I wondered. Is above correct way to adjust priority?

should it instead do:
    m->q->ops->set_priority(m->q, XE_EXEC_QUEUE_PRIORITY_KERNEL);
so that GuC is aware?


>>   
>>   	mutex_init(&m->job_mutex);
>>   
>> -- 
>> 2.43.0
>>


More information about the Intel-xe mailing list