[PATCH 2/2] drm/amdgpu: cleanup drm_gpu_scheduler array creation

Nirmoy nirmodas at amd.com
Tue Mar 10 13:22:19 UTC 2020


On 3/10/20 2:00 PM, Christian König wrote:
> Am 10.03.20 um 13:24 schrieb Nirmoy Das:
>> Move initialization of struct drm_gpu_scheduler array,
>> amdgpu_ctx_init_sched() to amdgpu_ring.c.
>
> Moving the code around is a start, but it doesn't buy us much.


Agreed.


> We could go for the big cleanup or at least move the individual 
> scheduler arrays from the per IP structures into amdgpu_device.c

I will go for the big cleanup by adding priority as param to 
amdgpu_ring_init().


>
> How much time can and want you to spend on it?
>
> Christian.
>
>>
>> Signed-off-by: Nirmoy Das <nirmoy.das at amd.com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c    | 75 -------------------
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h    |  3 -
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  2 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c   | 85 ++++++++++++++++++++++
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h   |  2 +
>>   5 files changed, 88 insertions(+), 79 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> index fa575bdc03c8..06d151c0fe4e 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> @@ -661,78 +661,3 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr 
>> *mgr)
>>       idr_destroy(&mgr->ctx_handles);
>>       mutex_destroy(&mgr->lock);
>>   }
>> -
>> -
>> -static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev)
>> -{
>> -    int num_compute_sched_normal = 0;
>> -    int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
>> -    int i;
>> -
>> -    /* use one drm sched array, gfx.compute_sched to store both high 
>> and
>> -     * normal priority drm compute schedulers */
>> -    for (i = 0; i < adev->gfx.num_compute_rings; i++) {
>> -        if (!adev->gfx.compute_ring[i].has_high_prio)
>> - adev->gfx.compute_sched[num_compute_sched_normal++] =
>> -                &adev->gfx.compute_ring[i].sched;
>> -        else
>> - adev->gfx.compute_sched[num_compute_sched_high--] =
>> -                &adev->gfx.compute_ring[i].sched;
>> -    }
>> -
>> -    /* compute ring only has two priority for now */
>> -    i = AMDGPU_GFX_PIPE_PRIO_NORMAL;
>> -    adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
>> -    adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
>> -
>> -    i = AMDGPU_GFX_PIPE_PRIO_HIGH;
>> -    if (num_compute_sched_high == (AMDGPU_MAX_COMPUTE_RINGS - 1)) {
>> -        /* When compute has no high priority rings then use */
>> -        /* normal priority sched array */
>> -        adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
>> -        adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
>> -    } else {
>> -        adev->gfx.compute_prio_sched[i] =
>> - &adev->gfx.compute_sched[num_compute_sched_high - 1];
>> -        adev->gfx.num_compute_sched[i] =
>> -            adev->gfx.num_compute_rings - num_compute_sched_normal;
>> -    }
>> -}
>> -
>> -void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
>> -{
>> -    int i, j;
>> -
>> -    amdgpu_ctx_init_compute_sched(adev);
>> -    for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
>> -        adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
>> -        adev->gfx.num_gfx_sched++;
>> -    }
>> -
>> -    for (i = 0; i < adev->sdma.num_instances; i++) {
>> -        adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
>> -        adev->sdma.num_sdma_sched++;
>> -    }
>> -
>> -    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
>> -        if (adev->vcn.harvest_config & (1 << i))
>> -            continue;
>> - adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
>> -            &adev->vcn.inst[i].ring_dec.sched;
>> -    }
>> -
>> -    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
>> -        if (adev->vcn.harvest_config & (1 << i))
>> -            continue;
>> -        for (j = 0; j < adev->vcn.num_enc_rings; ++j)
>> - adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
>> -                &adev->vcn.inst[i].ring_enc[j].sched;
>> -    }
>> -
>> -    for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
>> -        if (adev->jpeg.harvest_config & (1 << i))
>> -            continue;
>> - adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
>> -            &adev->jpeg.inst[i].ring_dec.sched;
>> -    }
>> -}
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
>> index de490f183af2..f54e10314661 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
>> @@ -88,7 +88,4 @@ void amdgpu_ctx_mgr_entity_fini(struct 
>> amdgpu_ctx_mgr *mgr);
>>   long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long 
>> timeout);
>>   void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
>>   -void amdgpu_ctx_init_sched(struct amdgpu_device *adev);
>> -
>> -
>>   #endif
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> index 572eb6ea8eab..b2a99f9fc223 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> @@ -3092,7 +3092,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
>>               adev->gfx.config.max_cu_per_sh,
>>               adev->gfx.cu_info.number);
>>   -    amdgpu_ctx_init_sched(adev);
>> +    amdgpu_ring_init_sched(adev);
>>         adev->accel_working = true;
>>   diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
>> index a7e1d0425ed0..01faeb8b4ef2 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
>> @@ -454,3 +454,88 @@ int amdgpu_ring_test_helper(struct amdgpu_ring 
>> *ring)
>>       ring->sched.ready = !r;
>>       return r;
>>   }
>> +
>> +static void amdgpu_ring_init_compute_sched(struct amdgpu_device *adev)
>> +{
>> +    int num_compute_sched_normal = 0;
>> +    int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
>> +    int i;
>> +
>> +    /* use one drm sched array, gfx.compute_sched to store both high 
>> and */
>> +    /* normal priority drm compute schedulers */
>> +    for (i = 0; i < adev->gfx.num_compute_rings; i++) {
>> +        if (!adev->gfx.compute_ring[i].has_high_prio)
>> + adev->gfx.compute_sched[num_compute_sched_normal++] =
>> +                &adev->gfx.compute_ring[i].sched;
>> +        else
>> + adev->gfx.compute_sched[num_compute_sched_high--] =
>> +                &adev->gfx.compute_ring[i].sched;
>> +    }
>> +
>> +    /* compute ring only has two priority for now */
>> +    i = AMDGPU_GFX_PIPE_PRIO_NORMAL;
>> +    adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
>> +    adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
>> +
>> +    i = AMDGPU_GFX_PIPE_PRIO_HIGH;
>> +    if (num_compute_sched_high == (AMDGPU_MAX_COMPUTE_RINGS - 1)) {
>> +        /* When compute has no high priority rings then use */
>> +        /* normal priority sched array */
>> +        adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
>> +        adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
>> +    } else {
>> +
>> +        adev->gfx.compute_prio_sched[i] =
>> + &adev->gfx.compute_sched[num_compute_sched_high - 1];
>> +        adev->gfx.num_compute_sched[i] =
>> +            adev->gfx.num_compute_rings - num_compute_sched_normal;
>> +    }
>> +}
>> +
>> +/**
>> + * amdgpu_ring_init_sched - populate array of drm scheds for each HW IP
>> + *
>> + * @adev: amdgpu_device pointer
>> + *
>> + * Populate an array of struct drm_gpu_schedulers for each HW IP which
>> + * can be use by amdgpu_ctx_get_entity() to initialize an entity.
>> + *
>> + */
>> +
>> +void amdgpu_ring_init_sched(struct amdgpu_device *adev)
>> +{
>> +    int i, j;
>> +
>> +    amdgpu_ring_init_compute_sched(adev);
>> +    for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
>> +        adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
>> +        adev->gfx.num_gfx_sched++;
>> +    }
>> +
>> +    for (i = 0; i < adev->sdma.num_instances; i++) {
>> +        adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
>> +        adev->sdma.num_sdma_sched++;
>> +    }
>> +
>> +    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
>> +        if (adev->vcn.harvest_config & (1 << i))
>> +            continue;
>> + adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
>> +            &adev->vcn.inst[i].ring_dec.sched;
>> +    }
>> +
>> +    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
>> +        if (adev->vcn.harvest_config & (1 << i))
>> +            continue;
>> +        for (j = 0; j < adev->vcn.num_enc_rings; ++j)
>> + adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
>> +                &adev->vcn.inst[i].ring_enc[j].sched;
>> +    }
>> +
>> +    for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
>> +        if (adev->jpeg.harvest_config & (1 << i))
>> +            continue;
>> + adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
>> +            &adev->jpeg.inst[i].ring_dec.sched;
>> +    }
>> +}
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
>> index 9a443013d70d..4ccd056d4353 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
>> @@ -326,4 +326,6 @@ int amdgpu_debugfs_ring_init(struct amdgpu_device 
>> *adev,
>>                    struct amdgpu_ring *ring);
>>   void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
>>   +void amdgpu_ring_init_sched(struct amdgpu_device *adev);
>> +
>>   #endif
>


More information about the amd-gfx mailing list