[PATCH 3/4] drm/amdkfd: Refactor create_queue_nocpsch
Kuehling, Felix
Felix.Kuehling at amd.com
Fri May 31 21:40:47 UTC 2019
On 2019-05-31 5:19 p.m., Zeng, Oak wrote:
> This is prepare work to fix a circular lock dependency.
> No logic change
>
> Change-Id: I4e0ee918260e7780de972dd71f4ce787b4f6dde9
> Signed-off-by: Oak Zeng <Oak.Zeng at amd.com>
> ---
> .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 172 ++++++++-------------
> 1 file changed, 62 insertions(+), 110 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> index 74944ec..aac7f7b 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> @@ -42,10 +42,6 @@
> static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
> unsigned int pasid, unsigned int vmid);
>
> -static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
> - struct queue *q,
> - struct qcm_process_device *qpd);
> -
> static int execute_queues_cpsch(struct device_queue_manager *dqm,
> enum kfd_unmap_queues_filter filter,
> uint32_t filter_param);
> @@ -55,15 +51,16 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
>
> static int map_queues_cpsch(struct device_queue_manager *dqm);
>
> -static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
> - struct queue *q,
> - struct qcm_process_device *qpd);
> -
> static void deallocate_sdma_queue(struct device_queue_manager *dqm,
> struct queue *q);
> static void deallocate_sdma_queue_locked(struct device_queue_manager *dqm,
> struct queue *q);
>
> +static inline void deallocate_hqd(struct device_queue_manager *dqm,
> + struct queue *q);
> +static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
> +static int allocate_sdma_queue(struct device_queue_manager *dqm,
> + struct queue *q);
> static void kfd_process_hw_exception(struct work_struct *work);
>
> static inline
> @@ -272,6 +269,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
> struct qcm_process_device *qpd)
> {
> int retval;
> + struct mqd_manager *mqd_mgr;
Reverse Christmas tree ... Put the longer declaration first. It tends to
make code more readable.
>
> print_queue(q);
>
> @@ -302,18 +300,51 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
> q->properties.tba_addr = qpd->tba_addr;
> q->properties.tma_addr = qpd->tma_addr;
>
> + mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
> + q->properties.type)];
> if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
> - retval = create_compute_queue_nocpsch(dqm, q, qpd);
> - else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
> - q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
> - retval = create_sdma_queue_nocpsch(dqm, q, qpd);
> - else
> - retval = -EINVAL;
> + {
The open brace should be on the same line as the "if". Please run
check_patch.pl, it should find those coding style violations. This
convenient command checks the top 4 commits on your local branch without
even having to generate the patch files first: "scripts/checkpatch.pl -g
HEAD~4..HEAD".
> + retval = allocate_hqd(dqm, q);
> + if (retval)
> + goto deallocate_vmid;
> + } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
> + q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
> + retval = allocate_sdma_queue(dqm, q);
> + if (retval)
> + goto deallocate_vmid;
> + dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
> + }
>
> - if (retval) {
> - if (list_empty(&qpd->queues_list))
> - deallocate_vmid(dqm, qpd, q);
> - goto out_unlock;
> + retval = allocate_doorbell(qpd, q);
> + if (retval)
> + goto out_deallocate_hqd;
> +
> + retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
> + &q->gart_mqd_addr, &q->properties);
> + if (retval)
> + goto out_deallocate_doorbell;
> +
> + pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
> + q->pipe, q->queue);
> +
> + if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
> + dqm->dev->kfd2kgd->set_scratch_backing_va(
> + dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);
> +
> + if (q->properties.is_active) {
> +
> + if (WARN(q->process->mm != current->mm,
> + "should only run in user thread"))
> + retval = -EFAULT;
> + else
> + /* TODO for sdma queue load mqd was called
> + * unconditionally in original code, with mm_struct set
> + * to NULL. I am not sure whether we need to keep
> + * original logic here */
> + retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
> + &q->properties, current->mm);
> + if (retval)
> + goto out_uninit_mqd;
> }
>
> list_add(&q->list, &qpd->queues_list);
> @@ -334,6 +365,19 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
> pr_debug("Total of %d queues are accountable so far\n",
> dqm->total_queue_count);
>
> +out_uninit_mqd:
> + mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
> +out_deallocate_doorbell:
> + deallocate_doorbell(qpd, q);
> +out_deallocate_hqd:
> + if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
> + deallocate_hqd(dqm, q);
> + else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
> + q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
> + deallocate_sdma_queue_locked(dqm, q);
> +deallocate_vmid:
> + if (list_empty(&qpd->queues_list))
> + deallocate_vmid(dqm, qpd, q);
> out_unlock:
> dqm_unlock(dqm);
> return retval;
This looks like you always deallocate doorbells, SDMA queues etc., even
when queue creation was successful. Seems wrong.
Regards,
Felix
> @@ -379,58 +423,6 @@ static inline void deallocate_hqd(struct device_queue_manager *dqm,
> dqm->allocated_queues[q->pipe] |= (1 << q->queue);
> }
>
> -static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
> - struct queue *q,
> - struct qcm_process_device *qpd)
> -{
> - struct mqd_manager *mqd_mgr;
> - int retval;
> -
> - mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_COMPUTE];
> -
> - retval = allocate_hqd(dqm, q);
> - if (retval)
> - return retval;
> -
> - retval = allocate_doorbell(qpd, q);
> - if (retval)
> - goto out_deallocate_hqd;
> -
> - retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
> - &q->gart_mqd_addr, &q->properties);
> - if (retval)
> - goto out_deallocate_doorbell;
> -
> - pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
> - q->pipe, q->queue);
> -
> - dqm->dev->kfd2kgd->set_scratch_backing_va(
> - dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);
> -
> - if (!q->properties.is_active)
> - return 0;
> -
> - if (WARN(q->process->mm != current->mm,
> - "should only run in user thread"))
> - retval = -EFAULT;
> - else
> - retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
> - &q->properties, current->mm);
> - if (retval)
> - goto out_uninit_mqd;
> -
> - return 0;
> -
> -out_uninit_mqd:
> - mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
> -out_deallocate_doorbell:
> - deallocate_doorbell(qpd, q);
> -out_deallocate_hqd:
> - deallocate_hqd(dqm, q);
> -
> - return retval;
> -}
> -
> /* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
> * to avoid asynchronized access
> */
> @@ -991,46 +983,6 @@ static void deallocate_sdma_queue(struct device_queue_manager *dqm,
> }
> }
>
> -static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
> - struct queue *q,
> - struct qcm_process_device *qpd)
> -{
> - struct mqd_manager *mqd_mgr;
> - int retval;
> -
> - mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA];
> -
> - retval = allocate_sdma_queue(dqm, q);
> - if (retval)
> - return retval;
> -
> - retval = allocate_doorbell(qpd, q);
> - if (retval)
> - goto out_deallocate_sdma_queue_locked;
> -
> - dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
> - retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
> - &q->gart_mqd_addr, &q->properties);
> - if (retval)
> - goto out_deallocate_doorbell;
> -
> - retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, 0, 0, &q->properties,
> - NULL);
> - if (retval)
> - goto out_uninit_mqd;
> -
> - return 0;
> -
> -out_uninit_mqd:
> - mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
> -out_deallocate_doorbell:
> - deallocate_doorbell(qpd, q);
> -out_deallocate_sdma_queue_locked:
> - deallocate_sdma_queue_locked(dqm, q);
> -
> - return retval;
> -}
> -
> /*
> * Device Queue Manager implementation for cp scheduler
> */
More information about the amd-gfx
mailing list