[PATCH 1/2] drm/sched: Rename priority MIN to LOW

Christian König christian.koenig at amd.com
Mon Nov 27 13:55:26 UTC 2023


Hi Luben,

Am 24.11.23 um 08:57 schrieb Christian König:
> Am 24.11.23 um 06:27 schrieb Luben Tuikov:
>> Rename DRM_SCHED_PRIORITY_MIN to DRM_SCHED_PRIORITY_LOW.
>>
>> This mirrors DRM_SCHED_PRIORITY_HIGH, for a list of DRM scheduler 
>> priorities
>> in ascending order,
>>    DRM_SCHED_PRIORITY_LOW,
>>    DRM_SCHED_PRIORITY_NORMAL,
>>    DRM_SCHED_PRIORITY_HIGH,
>>    DRM_SCHED_PRIORITY_KERNEL.
>>
>> Cc: Rob Clark <robdclark at gmail.com>
>> Cc: Abhinav Kumar <quic_abhinavk at quicinc.com>
>> Cc: Dmitry Baryshkov <dmitry.baryshkov at linaro.org>
>> Cc: Danilo Krummrich <dakr at redhat.com>
>> Cc: Alex Deucher <alexander.deucher at amd.com>
>> Cc: Christian König <christian.koenig at amd.com>
>> Cc: linux-arm-msm at vger.kernel.org
>> Cc: freedreno at lists.freedesktop.org
>> Cc: dri-devel at lists.freedesktop.org
>> Signed-off-by: Luben Tuikov <ltuikov89 at gmail.com>
>
> Reviewed-by: Christian König <christian.koenig at amd.com>

Looks like you missed one usage in Nouveau:

drivers/gpu/drm/nouveau/nouveau_sched.c:21:41: error: 
‘DRM_SCHED_PRIORITY_MIN’ undeclared here (not in a function); did you 
mean ‘DRM_SCHED_PRIORITY_LOW’?
    21 |         NOUVEAU_SCHED_PRIORITY_SINGLE = DRM_SCHED_PRIORITY_MIN,
       | ^~~~~~~~~~~~~~~~~~~~~~
       | DRM_SCHED_PRIORITY_LOW

This now results in a build error on drm-misc-next.

Christian.

>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c  |  4 ++--
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_job.c  |  2 +-
>>   drivers/gpu/drm/msm/msm_gpu.h            |  2 +-
>>   drivers/gpu/drm/scheduler/sched_entity.c |  2 +-
>>   drivers/gpu/drm/scheduler/sched_main.c   | 10 +++++-----
>>   include/drm/gpu_scheduler.h              |  2 +-
>>   6 files changed, 11 insertions(+), 11 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> index e2ae9ba147ba97..5cb33ac99f7089 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> @@ -73,10 +73,10 @@ amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
>>           return DRM_SCHED_PRIORITY_NORMAL;
>>         case AMDGPU_CTX_PRIORITY_VERY_LOW:
>> -        return DRM_SCHED_PRIORITY_MIN;
>> +        return DRM_SCHED_PRIORITY_LOW;
>>         case AMDGPU_CTX_PRIORITY_LOW:
>> -        return DRM_SCHED_PRIORITY_MIN;
>> +        return DRM_SCHED_PRIORITY_LOW;
>>         case AMDGPU_CTX_PRIORITY_NORMAL:
>>           return DRM_SCHED_PRIORITY_NORMAL;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> index 62bb7fc7448ad9..1a25931607c514 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> @@ -325,7 +325,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct 
>> drm_gpu_scheduler *sched)
>>       int i;
>>         /* Signal all jobs not yet scheduled */
>> -    for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
>> +    for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) {
>>           struct drm_sched_rq *rq = sched->sched_rq[i];
>>           spin_lock(&rq->lock);
>>           list_for_each_entry(s_entity, &rq->entities, list) {
>> diff --git a/drivers/gpu/drm/msm/msm_gpu.h 
>> b/drivers/gpu/drm/msm/msm_gpu.h
>> index 4252e3839fbc83..eb0c97433e5f8a 100644
>> --- a/drivers/gpu/drm/msm/msm_gpu.h
>> +++ b/drivers/gpu/drm/msm/msm_gpu.h
>> @@ -347,7 +347,7 @@ struct msm_gpu_perfcntr {
>>    * DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in 
>> some
>>    * cases, so we don't use it (no need for kernel generated jobs).
>>    */
>> -#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - 
>> DRM_SCHED_PRIORITY_MIN)
>> +#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - 
>> DRM_SCHED_PRIORITY_LOW)
>>     /**
>>    * struct msm_file_private - per-drm_file context
>> diff --git a/drivers/gpu/drm/scheduler/sched_entity.c 
>> b/drivers/gpu/drm/scheduler/sched_entity.c
>> index 20c9c561843ce1..cb7445be3cbb4e 100644
>> --- a/drivers/gpu/drm/scheduler/sched_entity.c
>> +++ b/drivers/gpu/drm/scheduler/sched_entity.c
>> @@ -88,7 +88,7 @@ int drm_sched_entity_init(struct drm_sched_entity 
>> *entity,
>>               drm_err(sched_list[0], "entity with out-of-bounds 
>> priority:%u num_rqs:%u\n",
>>                   entity->priority, sched_list[0]->num_rqs);
>>               entity->priority = max_t(s32, (s32) 
>> sched_list[0]->num_rqs - 1,
>> -                         (s32) DRM_SCHED_PRIORITY_MIN);
>> +                         (s32) DRM_SCHED_PRIORITY_LOW);
>>           }
>>           entity->rq = sched_list[0]->sched_rq[entity->priority];
>>       }
>> diff --git a/drivers/gpu/drm/scheduler/sched_main.c 
>> b/drivers/gpu/drm/scheduler/sched_main.c
>> index 044a8c4875ba64..b6d7bc49ff6ef4 100644
>> --- a/drivers/gpu/drm/scheduler/sched_main.c
>> +++ b/drivers/gpu/drm/scheduler/sched_main.c
>> @@ -1052,7 +1052,7 @@ drm_sched_select_entity(struct 
>> drm_gpu_scheduler *sched)
>>       int i;
>>         /* Kernel run queue has higher priority than normal run queue*/
>> -    for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
>> +    for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) {
>>           entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
>>               drm_sched_rq_select_entity_fifo(sched, 
>> sched->sched_rq[i]) :
>>               drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]);
>> @@ -1291,7 +1291,7 @@ int drm_sched_init(struct drm_gpu_scheduler 
>> *sched,
>>       if (!sched->sched_rq)
>>           goto Out_free;
>>       sched->num_rqs = num_rqs;
>> -    for (i = DRM_SCHED_PRIORITY_MIN; i < sched->num_rqs; i++) {
>> +    for (i = DRM_SCHED_PRIORITY_LOW; i < sched->num_rqs; i++) {
>>           sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), 
>> GFP_KERNEL);
>>           if (!sched->sched_rq[i])
>>               goto Out_unroll;
>> @@ -1312,7 +1312,7 @@ int drm_sched_init(struct drm_gpu_scheduler 
>> *sched,
>>       sched->ready = true;
>>       return 0;
>>   Out_unroll:
>> -    for (--i ; i >= DRM_SCHED_PRIORITY_MIN; i--)
>> +    for (--i ; i >= DRM_SCHED_PRIORITY_LOW; i--)
>>           kfree(sched->sched_rq[i]);
>>   Out_free:
>>       kfree(sched->sched_rq);
>> @@ -1338,7 +1338,7 @@ void drm_sched_fini(struct drm_gpu_scheduler 
>> *sched)
>>         drm_sched_wqueue_stop(sched);
>>   -    for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
>> +    for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) {
>>           struct drm_sched_rq *rq = sched->sched_rq[i];
>>             spin_lock(&rq->lock);
>> @@ -1390,7 +1390,7 @@ void drm_sched_increase_karma(struct 
>> drm_sched_job *bad)
>>       if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
>>           atomic_inc(&bad->karma);
>>   -        for (i = DRM_SCHED_PRIORITY_MIN;
>> +        for (i = DRM_SCHED_PRIORITY_LOW;
>>                i < min_t(typeof(sched->num_rqs), sched->num_rqs, 
>> DRM_SCHED_PRIORITY_KERNEL);
>>                i++) {
>>               struct drm_sched_rq *rq = sched->sched_rq[i];
>> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
>> index 9a50348bd5c04e..d8e2d84d9223e3 100644
>> --- a/include/drm/gpu_scheduler.h
>> +++ b/include/drm/gpu_scheduler.h
>> @@ -63,7 +63,7 @@ struct drm_file;
>>    * to an array, and as such should start at 0.
>>    */
>>   enum drm_sched_priority {
>> -    DRM_SCHED_PRIORITY_MIN,
>> +    DRM_SCHED_PRIORITY_LOW,
>>       DRM_SCHED_PRIORITY_NORMAL,
>>       DRM_SCHED_PRIORITY_HIGH,
>>       DRM_SCHED_PRIORITY_KERNEL,
>



More information about the dri-devel mailing list