[RFC PATCH] drm/scheduler: rework entity creation
Christian König
christian.koenig at amd.com
Thu Dec 5 12:29:49 UTC 2019
Am 05.12.19 um 12:04 schrieb Nirmoy:
> Hi Christian,
>
> I am not exactly sure about drm_sched_entity_set_priority() I wonder
> if just changing
>
> entity->priority to ctx->override_priority should work. With this
> change drm_sched_entity_select_rq()
>
> will chose a rq based on entity->priority which seems to me correct.
> But is this enough to fix the old bug you were
>
> talking about which mess up already scheduled job on priority change?
Yes, that should perfectly do it.
>
> okay I just realized I need a lock to make sure
>
> drm_sched_entity_set_priority() and drm_sched_entity_select_rq()
> shouldn't happen at the same time.
Yeah, you probably need to grab the lock and make sure that you get the
priority to use while holding the lock as well.
Regards,
Christian.
>
>
> Regards,
>
> Nirmoy
>
>
> On 12/5/19 11:52 AM, Nirmoy Das wrote:
>> Entity currently keeps a copy of run_queue list and modify it in
>> drm_sched_entity_set_priority(). Entities shouldn't modify run_queue
>> list. Use drm_gpu_scheduler list instead of drm_sched_rq list
>> in drm_sched_entity struct. In this way we can select a runqueue based
>> on entity/ctx's priority for a drm scheduler.
>>
>> Signed-off-by: Nirmoy Das <nirmoy.das at amd.com>
>> ---
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 7 +--
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 7 +--
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 7 +--
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 7 +--
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c  | 14 +++--
>>  drivers/gpu/drm/etnaviv/etnaviv_drv.c   | 8 +--
>>  drivers/gpu/drm/lima/lima_sched.c       | 5 +-
>>  drivers/gpu/drm/panfrost/panfrost_job.c | 7 +--
>> Â drivers/gpu/drm/scheduler/sched_entity.c | 65 +++++++++---------------
>>  drivers/gpu/drm/v3d/v3d_drv.c           | 7 +--
>>  include/drm/gpu_scheduler.h             | 9 ++--
>> Â 11 files changed, 69 insertions(+), 74 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> index a0d3d7b756eb..e8f46c13d073 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> @@ -122,7 +122,7 @@ static int amdgpu_ctx_init(struct amdgpu_device
>> *adev,
>> Â Â Â Â Â Â for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
>> Â Â Â Â Â Â Â Â Â struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
>> -Â Â Â Â Â Â Â struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
>> +Â Â Â Â Â Â Â struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
>> Â Â Â Â Â Â Â Â Â unsigned num_rings = 0;
>> Â Â Â Â Â Â Â Â Â unsigned num_rqs = 0;
>> Â @@ -181,12 +181,13 @@ static int amdgpu_ctx_init(struct
>> amdgpu_device *adev,
>> Â Â Â Â Â Â Â Â Â Â Â Â Â if (!rings[j]->adev)
>> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â continue;
>> Â -Â Â Â Â Â Â Â Â Â Â Â rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority];
>> +Â Â Â Â Â Â Â Â Â Â Â sched_list[num_rqs++] = &rings[j]->sched;
>> Â Â Â Â Â Â Â Â Â }
>> Â Â Â Â Â Â Â Â Â Â for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
>> Â Â Â Â Â Â Â Â Â Â Â Â Â r = drm_sched_entity_init(&ctx->entities[i][j].entity,
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â rqs, num_rqs, &ctx->guilty);
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â sched_list, num_rqs,
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â &ctx->guilty, priority);
>> Â Â Â Â Â Â Â Â Â if (r)
>> Â Â Â Â Â Â Â Â Â Â Â Â Â goto error_cleanup_entities;
>> Â Â Â Â Â }
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> index 19ffe00d9072..a960dd7c0711 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> @@ -1957,11 +1957,12 @@ void
>> amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool
>> enable)
>> Â Â Â Â Â Â if (enable) {
>> Â Â Â Â Â Â Â Â Â struct amdgpu_ring *ring;
>> -Â Â Â Â Â Â Â struct drm_sched_rq *rq;
>> +Â Â Â Â Â Â Â struct drm_gpu_scheduler *sched;
>> Â Â Â Â Â Â Â Â Â Â ring = adev->mman.buffer_funcs_ring;
>> -Â Â Â Â Â Â Â rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
>> -Â Â Â Â Â Â Â r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL);
>> +Â Â Â Â Â Â Â sched = &ring->sched;
>> +Â Â Â Â Â Â Â r = drm_sched_entity_init(&adev->mman.entity, &sched,
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â 1, NULL, DRM_SCHED_PRIORITY_KERNEL);
>> Â Â Â Â Â Â Â Â Â if (r) {
>> Â Â Â Â Â Â Â Â Â Â Â Â Â DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
>> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â r);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> index e324bfe6c58f..b803a8882864 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> @@ -330,12 +330,13 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
>> Â int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
>> Â {
>> Â Â Â Â Â struct amdgpu_ring *ring;
>> -Â Â Â struct drm_sched_rq *rq;
>> +Â Â Â struct drm_gpu_scheduler *sched;
>> Â Â Â Â Â int r;
>> Â Â Â Â Â Â ring = &adev->uvd.inst[0].ring;
>> -Â Â Â rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
>> -Â Â Â r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
>> +Â Â Â sched = &ring->sched;
>> +Â Â Â r = drm_sched_entity_init(&adev->uvd.entity, &sched,
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â 1, NULL, DRM_SCHED_PRIORITY_NORMAL);
>> Â Â Â Â Â if (r) {
>> Â Â Â Â Â Â Â Â Â DRM_ERROR("Failed setting up UVD kernel entity.\n");
>> Â Â Â Â Â Â Â Â Â return r;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>> index 46b590af2fd2..b44f28d44fb4 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>> @@ -240,12 +240,13 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
>> Â int amdgpu_vce_entity_init(struct amdgpu_device *adev)
>> Â {
>> Â Â Â Â Â struct amdgpu_ring *ring;
>> -Â Â Â struct drm_sched_rq *rq;
>> +Â Â Â struct drm_gpu_scheduler *sched;
>> Â Â Â Â Â int r;
>> Â Â Â Â Â Â ring = &adev->vce.ring[0];
>> -Â Â Â rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
>> -Â Â Â r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
>> +Â Â Â sched = &ring->sched;
>> +Â Â Â r = drm_sched_entity_init(&adev->vce.entity, &sched,
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â 1, NULL, DRM_SCHED_PRIORITY_NORMAL);
>> Â Â Â Â Â if (r != 0) {
>> Â Â Â Â Â Â Â Â Â DRM_ERROR("Failed setting up VCE run queue.\n");
>> Â Â Â Â Â Â Â Â Â return r;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> index a94c4faa5af1..ec6141773a92 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> @@ -2687,6 +2687,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev,
>> struct amdgpu_vm *vm,
>> Â {
>> Â Â Â Â Â struct amdgpu_bo_param bp;
>> Â Â Â Â Â struct amdgpu_bo *root;
>> +Â Â Â Â Â Â Â struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
>> Â Â Â Â Â int r, i;
>> Â Â Â Â Â Â vm->va = RB_ROOT_CACHED;
>> @@ -2700,14 +2701,19 @@ int amdgpu_vm_init(struct amdgpu_device
>> *adev, struct amdgpu_vm *vm,
>> Â Â Â Â Â spin_lock_init(&vm->invalidated_lock);
>> Â Â Â Â Â INIT_LIST_HEAD(&vm->freed);
>> Â +Â Â Â Â Â Â Â for (i = 0; i < adev->vm_manager.vm_pte_num_rqs; i++)
>> +Â Â Â Â Â Â Â Â Â Â sched_list[i] = adev->vm_manager.vm_pte_rqs[i]->sched;
>> +
>> Â Â Â Â Â /* create scheduler entities for page table updates */
>> -Â Â Â r = drm_sched_entity_init(&vm->direct, adev->vm_manager.vm_pte_rqs,
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â adev->vm_manager.vm_pte_num_rqs, NULL);
>> +Â Â Â r = drm_sched_entity_init(&vm->direct, sched_list,
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â adev->vm_manager.vm_pte_num_rqs,
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â NULL, DRM_SCHED_PRIORITY_KERNEL);
>> Â Â Â Â Â if (r)
>> Â Â Â Â Â Â Â Â Â return r;
>> Â -Â Â Â r = drm_sched_entity_init(&vm->delayed,
>> adev->vm_manager.vm_pte_rqs,
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â adev->vm_manager.vm_pte_num_rqs, NULL);
>> +Â Â Â r = drm_sched_entity_init(&vm->delayed, sched_list,
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â adev->vm_manager.vm_pte_num_rqs,
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â NULL, DRM_SCHED_PRIORITY_KERNEL);
>> Â Â Â Â Â if (r)
>> Â Â Â Â Â Â Â Â Â goto error_free_direct;
>> Â diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
>> b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
>> index 1f9c01be40d7..a65c1e115e35 100644
>> --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
>> +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
>> @@ -65,12 +65,12 @@ static int etnaviv_open(struct drm_device *dev,
>> struct drm_file *file)
>> Â Â Â Â Â Â for (i = 0; i < ETNA_MAX_PIPES; i++) {
>> Â Â Â Â Â Â Â Â Â struct etnaviv_gpu *gpu = priv->gpu[i];
>> -Â Â Â Â Â Â Â struct drm_sched_rq *rq;
>> +Â Â Â Â Â Â Â struct drm_gpu_scheduler *sched;
>> Â Â Â Â Â Â Â Â Â Â if (gpu) {
>> -Â Â Â Â Â Â Â Â Â Â Â rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
>> -Â Â Â Â Â Â Â Â Â Â Â drm_sched_entity_init(&ctx->sched_entity[i],
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â &rq, 1, NULL);
>> +Â Â Â Â Â Â Â Â Â Â Â sched = &gpu->sched;
>> +Â Â Â Â Â Â Â Â Â Â Â drm_sched_entity_init(&ctx->sched_entity[i], &sched,
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â 1, NULL, DRM_SCHED_PRIORITY_NORMAL);
>> Â Â Â Â Â Â Â Â Â Â Â Â Â }
>> Â Â Â Â Â }
>> Â diff --git a/drivers/gpu/drm/lima/lima_sched.c
>> b/drivers/gpu/drm/lima/lima_sched.c
>> index f522c5f99729..a7e53878d841 100644
>> --- a/drivers/gpu/drm/lima/lima_sched.c
>> +++ b/drivers/gpu/drm/lima/lima_sched.c
>> @@ -159,9 +159,10 @@ int lima_sched_context_init(struct
>> lima_sched_pipe *pipe,
>> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â struct lima_sched_context *context,
>> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â atomic_t *guilty)
>> Â {
>> -Â Â Â struct drm_sched_rq *rq = pipe->base.sched_rq +
>> DRM_SCHED_PRIORITY_NORMAL;
>> +Â Â Â struct drm_gpu_scheduler *sched = &pipe->base;
>> Â -Â Â Â return drm_sched_entity_init(&context->base, &rq, 1, guilty);
>> +Â Â Â return drm_sched_entity_init(&context->base, &sched,
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â 1, guilty, DRM_SCHED_PRIORITY_NORMAL);
>> Â }
>> Â Â void lima_sched_context_fini(struct lima_sched_pipe *pipe,
>> diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c
>> b/drivers/gpu/drm/panfrost/panfrost_job.c
>> index d411eb6c8eb9..84178bcf35c9 100644
>> --- a/drivers/gpu/drm/panfrost/panfrost_job.c
>> +++ b/drivers/gpu/drm/panfrost/panfrost_job.c
>> @@ -542,12 +542,13 @@ int panfrost_job_open(struct panfrost_file_priv
>> *panfrost_priv)
>> Â {
>> Â Â Â Â Â struct panfrost_device *pfdev = panfrost_priv->pfdev;
>> Â Â Â Â Â struct panfrost_job_slot *js = pfdev->js;
>> -Â Â Â struct drm_sched_rq *rq;
>> +Â Â Â struct drm_gpu_scheduler *sched;
>> Â Â Â Â Â int ret, i;
>> Â Â Â Â Â Â for (i = 0; i < NUM_JOB_SLOTS; i++) {
>> -Â Â Â Â Â Â Â rq = &js->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
>> -Â Â Â Â Â Â Â ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
>> &rq, 1, NULL);
>> +Â Â Â Â Â Â Â sched = &js->queue[i].sched;
>> +Â Â Â Â Â Â Â ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â &sched, 1, NULL, DRM_SCHED_PRIORITY_NORMAL);
>> Â Â Â Â Â Â Â Â Â if (WARN_ON(ret))
>> Â Â Â Â Â Â Â Â Â Â Â Â Â return ret;
>> Â Â Â Â Â }
>> diff --git a/drivers/gpu/drm/scheduler/sched_entity.c
>> b/drivers/gpu/drm/scheduler/sched_entity.c
>> index 461a7a8129f4..e10d37266836 100644
>> --- a/drivers/gpu/drm/scheduler/sched_entity.c
>> +++ b/drivers/gpu/drm/scheduler/sched_entity.c
>> @@ -38,9 +38,9 @@
>> Â Â * submit to HW ring.
>> Â Â *
>> Â Â * @entity: scheduler entity to init
>> - * @rq_list: the list of run queue on which jobs from this
>> + * @sched_list: the list of drm scheds on which jobs from this
>> Â Â *Â Â Â Â Â Â Â Â Â Â entity can be submitted
>> - * @num_rq_list: number of run queue in rq_list
>> + * @num_sched_list: number of drm sched in sched_list
>> Â Â * @guilty: atomic_t set to 1 when a job on this queue
>> Â Â *Â Â Â Â Â Â Â Â Â is found to be guilty causing a timeout
>> Â Â *
>> @@ -50,32 +50,34 @@
>> Â Â * Returns 0 on success or a negative error code on failure.
>> Â Â */
>> Â int drm_sched_entity_init(struct drm_sched_entity *entity,
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â struct drm_sched_rq **rq_list,
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â unsigned int num_rq_list,
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â atomic_t *guilty)
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â struct drm_gpu_scheduler **sched_list,
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â unsigned int num_sched_list,
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â atomic_t *guilty, enum drm_sched_priority priority)
>> Â {
>> Â Â Â Â Â int i;
>> Â -Â Â Â if (!(entity && rq_list && (num_rq_list == 0 || rq_list[0])))
>> +Â Â Â if (!(entity && sched_list && (num_sched_list == 0 ||
>> sched_list[0])))
>> Â Â Â Â Â Â Â Â Â return -EINVAL;
>> Â Â Â Â Â Â memset(entity, 0, sizeof(struct drm_sched_entity));
>> Â Â Â Â Â INIT_LIST_HEAD(&entity->list);
>> Â Â Â Â Â entity->rq = NULL;
>> Â Â Â Â Â entity->guilty = guilty;
>> -Â Â Â entity->num_rq_list = num_rq_list;
>> -Â Â Â entity->rq_list = kcalloc(num_rq_list, sizeof(struct
>> drm_sched_rq *),
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â GFP_KERNEL);
>> -Â Â Â if (!entity->rq_list)
>> +Â Â Â entity->num_sched_list = num_sched_list;
>> +Â Â Â entity->priority = priority;
>> +Â Â Â entity->sched_list =Â kcalloc(num_sched_list,
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â sizeof(struct drm_gpu_scheduler *), GFP_KERNEL);
>> +
>> +Â Â Â if(!entity->sched_list)
>> Â Â Â Â Â Â Â Â Â return -ENOMEM;
>> Â Â Â Â Â Â init_completion(&entity->entity_idle);
>> Â -Â Â Â for (i = 0; i < num_rq_list; ++i)
>> -Â Â Â Â Â Â Â entity->rq_list[i] = rq_list[i];
>> +Â Â Â for (i = 0; i < num_sched_list; i++)
>> +Â Â Â Â Â Â Â entity->sched_list[i] = sched_list[i];
>> Â -Â Â Â if (num_rq_list)
>> -Â Â Â Â Â Â Â entity->rq = rq_list[0];
>> +Â Â Â if (num_sched_list)
>> +Â Â Â Â Â Â Â entity->rq =
>> &entity->sched_list[0]->sched_rq[entity->priority];
>> Â Â Â Â Â Â entity->last_scheduled = NULL;
>> Â @@ -139,10 +141,10 @@ drm_sched_entity_get_free_sched(struct
>> drm_sched_entity *entity)
>> Â Â Â Â Â unsigned int min_jobs = UINT_MAX, num_jobs;
>> Â Â Â Â Â int i;
>> Â -Â Â Â for (i = 0; i < entity->num_rq_list; ++i) {
>> -Â Â Â Â Â Â Â struct drm_gpu_scheduler *sched = entity->rq_list[i]->sched;
>> +Â Â Â for (i = 0; i < entity->num_sched_list; ++i) {
>> +Â Â Â Â Â Â Â struct drm_gpu_scheduler *sched = entity->sched_list[i];
>> Â -Â Â Â Â Â Â Â if (!entity->rq_list[i]->sched->ready) {
>> +Â Â Â Â Â Â Â if (!entity->sched_list[i]->ready) {
>> Â Â Â Â Â Â Â Â Â Â Â Â Â DRM_WARN("sched%s is not ready, skipping", sched->name);
>> Â Â Â Â Â Â Â Â Â Â Â Â Â continue;
>> Â Â Â Â Â Â Â Â Â }
>> @@ -150,7 +152,7 @@ drm_sched_entity_get_free_sched(struct
>> drm_sched_entity *entity)
>> Â Â Â Â Â Â Â Â Â num_jobs = atomic_read(&sched->num_jobs);
>> Â Â Â Â Â Â Â Â Â if (num_jobs < min_jobs) {
>> Â Â Â Â Â Â Â Â Â Â Â Â Â min_jobs = num_jobs;
>> -Â Â Â Â Â Â Â Â Â Â Â rq = entity->rq_list[i];
>> +Â Â Â Â Â Â Â Â Â Â Â rq = &entity->sched_list[i]->sched_rq[entity->priority];
>> Â Â Â Â Â Â Â Â Â }
>> Â Â Â Â Â }
>> Â @@ -308,7 +310,7 @@ void drm_sched_entity_fini(struct
>> drm_sched_entity *entity)
>> Â Â Â Â Â Â dma_fence_put(entity->last_scheduled);
>> Â Â Â Â Â entity->last_scheduled = NULL;
>> -Â Â Â kfree(entity->rq_list);
>> +Â Â Â kfree(entity->sched_list);
>> Â }
>> Â EXPORT_SYMBOL(drm_sched_entity_fini);
>> Â @@ -353,15 +355,6 @@ static void drm_sched_entity_wakeup(struct
>> dma_fence *f,
>> Â Â Â Â Â drm_sched_wakeup(entity->rq->sched);
>> Â }
>> Â -/**
>> - * drm_sched_entity_set_rq_priority - helper for
>> drm_sched_entity_set_priority
>> - */
>> -static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq,
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â enum drm_sched_priority priority)
>> -{
>> -Â Â Â *rq = &(*rq)->sched->sched_rq[priority];
>> -}
>> -
>> Â /**
>> Â Â * drm_sched_entity_set_priority - Sets priority of the entity
>> Â Â *
>> @@ -373,20 +366,8 @@ static void
>> drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq,
>> Â void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
>> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â enum drm_sched_priority priority)
>> Â {
>> -Â Â Â unsigned int i;
>> -
>> -Â Â Â spin_lock(&entity->rq_lock);
>> Â -Â Â Â for (i = 0; i < entity->num_rq_list; ++i)
>> - drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority);
>> -
>> -Â Â Â if (entity->rq) {
>> -Â Â Â Â Â Â Â drm_sched_rq_remove_entity(entity->rq, entity);
>> -Â Â Â Â Â Â Â drm_sched_entity_set_rq_priority(&entity->rq, priority);
>> -Â Â Â Â Â Â Â drm_sched_rq_add_entity(entity->rq, entity);
>> -Â Â Â }
>> -
>> -Â Â Â spin_unlock(&entity->rq_lock);
>> +Â Â Â entity->priority = priority;
>> Â }
>> Â EXPORT_SYMBOL(drm_sched_entity_set_priority);
>> Â @@ -490,7 +471,7 @@ void drm_sched_entity_select_rq(struct
>> drm_sched_entity *entity)
>> Â Â Â Â Â struct dma_fence *fence;
>> Â Â Â Â Â struct drm_sched_rq *rq;
>> Â -Â Â Â if (spsc_queue_count(&entity->job_queue) ||
>> entity->num_rq_list <= 1)
>> +Â Â Â if (spsc_queue_count(&entity->job_queue) ||
>> entity->num_sched_list <= 1)
>> Â Â Â Â Â Â Â Â Â return;
>> Â Â Â Â Â Â fence = READ_ONCE(entity->last_scheduled);
>> diff --git a/drivers/gpu/drm/v3d/v3d_drv.c
>> b/drivers/gpu/drm/v3d/v3d_drv.c
>> index 1a07462b4528..c6aff1aedd27 100644
>> --- a/drivers/gpu/drm/v3d/v3d_drv.c
>> +++ b/drivers/gpu/drm/v3d/v3d_drv.c
>> @@ -140,7 +140,7 @@ v3d_open(struct drm_device *dev, struct drm_file
>> *file)
>> Â {
>> Â Â Â Â Â struct v3d_dev *v3d = to_v3d_dev(dev);
>> Â Â Â Â Â struct v3d_file_priv *v3d_priv;
>> -Â Â Â struct drm_sched_rq *rq;
>> +Â Â Â struct drm_gpu_scheduler *sched;
>> Â Â Â Â Â int i;
>> Â Â Â Â Â Â v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL);
>> @@ -150,8 +150,9 @@ v3d_open(struct drm_device *dev, struct drm_file
>> *file)
>> Â Â Â Â Â v3d_priv->v3d = v3d;
>> Â Â Â Â Â Â for (i = 0; i < V3D_MAX_QUEUES; i++) {
>> -Â Â Â Â Â Â Â rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
>> - drm_sched_entity_init(&v3d_priv->sched_entity[i], &rq, 1, NULL);
>> +Â Â Â Â Â Â Â sched = &v3d->queue[i].sched;
>> + drm_sched_entity_init(&v3d_priv->sched_entity[i], &sched,
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â 1, NULL, DRM_SCHED_PRIORITY_NORMAL);
>> Â Â Â Â Â }
>> Â Â Â Â Â Â file->driver_priv = v3d_priv;
>> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
>> index 684692a8ed76..9df322dfac30 100644
>> --- a/include/drm/gpu_scheduler.h
>> +++ b/include/drm/gpu_scheduler.h
>> @@ -81,8 +81,9 @@ enum drm_sched_priority {
>> Â struct drm_sched_entity {
>>      struct list_head       list;
>>      struct drm_sched_rq       *rq;
>> -   struct drm_sched_rq       **rq_list;
>> -   unsigned int                   num_rq_list;
>> +   unsigned int                   num_sched_list;
>> +   struct drm_gpu_scheduler       **sched_list;
>> +   enum drm_sched_priority        priority;
>>      spinlock_t           rq_lock;
>>       struct spsc_queue       job_queue;
>> @@ -312,9 +313,9 @@ void drm_sched_rq_remove_entity(struct
>> drm_sched_rq *rq,
>> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â struct drm_sched_entity *entity);
>> Â Â int drm_sched_entity_init(struct drm_sched_entity *entity,
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â struct drm_sched_rq **rq_list,
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â struct drm_gpu_scheduler **sched_list,
>> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â unsigned int num_rq_list,
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â atomic_t *guilty);
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â atomic_t *guilty, enum drm_sched_priority priority);
>> Â long drm_sched_entity_flush(struct drm_sched_entity *entity, long
>> timeout);
>> Â void drm_sched_entity_fini(struct drm_sched_entity *entity);
>> Â void drm_sched_entity_destroy(struct drm_sched_entity *entity);
More information about the amd-gfx
mailing list