[PATCH 1/1] drm/amdgpu: Use kfd_lock_pdd_by_id helper in more places
Felix Kuehling
felix.kuehling at amd.com
Mon Aug 22 18:00:47 UTC 2022
Am 2022-08-22 um 11:30 schrieb Daniel Phillips:
> Convert most of the "mutex_lock; kfd_process_device_data_by_id" occurrences
> in kfd_chardev to use the kfd_lock_pdd_by_id. These will now consistently
> log debug output if the lookup fails. Sites where kfd_process_device_data_by_id
> is used without locking are not converted for now.
>
> Signed-off-by: Daniel Phillips <daniel.phillips at amd.com>
For consistency, you should also replace mutex_unlock with
kfd_unlock_pdd in the converted functions.
Regards,
Felix
> ---
> drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 96 ++++++++----------------
> 1 file changed, 32 insertions(+), 64 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
> index 2b3d8bc8f0aa..bb5528c55b73 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
> @@ -75,6 +75,7 @@ static inline struct kfd_process_device *kfd_lock_pdd_by_id(struct kfd_process *
> if (pdd)
> return pdd;
>
> + pr_debug("Could not find gpu id 0x%x\n", gpu_id);
> mutex_unlock(&p->mutex);
> return NULL;
> }
> @@ -311,14 +312,9 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
>
> pr_debug("Looking for gpu id 0x%x\n", args->gpu_id);
>
> - mutex_lock(&p->mutex);
> -
> - pdd = kfd_process_device_data_by_id(p, args->gpu_id);
> - if (!pdd) {
> - pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
> - err = -EINVAL;
> - goto err_pdd;
> - }
> + pdd = kfd_lock_pdd_by_id(p, args->gpu_id);
> + if (!pdd)
> + return -EINVAL;
> dev = pdd->dev;
>
> pdd = kfd_bind_process_to_device(dev, p);
> @@ -405,7 +401,6 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
> amdgpu_amdkfd_free_gtt_mem(dev->adev, wptr_bo);
> err_wptr_map_gart:
> err_bind_process:
> -err_pdd:
> mutex_unlock(&p->mutex);
> return err;
> }
> @@ -566,13 +561,9 @@ static int kfd_ioctl_set_memory_policy(struct file *filep,
> return -EINVAL;
> }
>
> - mutex_lock(&p->mutex);
> - pdd = kfd_process_device_data_by_id(p, args->gpu_id);
> - if (!pdd) {
> - pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
> - err = -EINVAL;
> - goto err_pdd;
> - }
> + pdd = kfd_lock_pdd_by_id(p, args->gpu_id);
> + if (!pdd)
> + return -EINVAL;
>
> pdd = kfd_bind_process_to_device(pdd->dev, p);
> if (IS_ERR(pdd)) {
> @@ -596,7 +587,6 @@ static int kfd_ioctl_set_memory_policy(struct file *filep,
> err = -EINVAL;
>
> out:
> -err_pdd:
> mutex_unlock(&p->mutex);
>
> return err;
> @@ -609,13 +599,9 @@ static int kfd_ioctl_set_trap_handler(struct file *filep,
> int err = 0;
> struct kfd_process_device *pdd;
>
> - mutex_lock(&p->mutex);
> -
> - pdd = kfd_process_device_data_by_id(p, args->gpu_id);
> - if (!pdd) {
> - err = -EINVAL;
> - goto err_pdd;
> - }
> + pdd = kfd_lock_pdd_by_id(p, args->gpu_id);
> + if (!pdd)
> + return -EINVAL;
>
> pdd = kfd_bind_process_to_device(pdd->dev, p);
> if (IS_ERR(pdd)) {
> @@ -626,7 +612,6 @@ static int kfd_ioctl_set_trap_handler(struct file *filep,
> kfd_process_set_trap_handler(&pdd->qpd, args->tba_addr, args->tma_addr);
>
> out:
> -err_pdd:
> mutex_unlock(&p->mutex);
>
> return err;
> @@ -663,13 +648,12 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
> struct kfd_ioctl_get_clock_counters_args *args = data;
> struct kfd_process_device *pdd;
>
> - mutex_lock(&p->mutex);
> - pdd = kfd_process_device_data_by_id(p, args->gpu_id);
> - mutex_unlock(&p->mutex);
> - if (pdd)
> + pdd = kfd_lock_pdd_by_id(p, args->gpu_id);
> + if (pdd) {
> + mutex_unlock(&p->mutex);
> /* Reading GPU clock counter from KGD */
> args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(pdd->dev->adev);
> - else
> + } else
> /* Node without GPU resource */
> args->gpu_clock_counter = 0;
>
> @@ -886,12 +870,9 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
> struct kfd_dev *dev;
> long err;
>
> - mutex_lock(&p->mutex);
> - pdd = kfd_process_device_data_by_id(p, args->gpu_id);
> - if (!pdd) {
> - err = -EINVAL;
> - goto err_pdd;
> - }
> + pdd = kfd_lock_pdd_by_id(p, args->gpu_id);
> + if (!pdd)
> + return -EINVAL;
> dev = pdd->dev;
>
> pdd = kfd_bind_process_to_device(dev, p);
> @@ -912,7 +893,6 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
> return 0;
>
> bind_process_to_device_fail:
> -err_pdd:
> mutex_unlock(&p->mutex);
> return err;
> }
> @@ -973,12 +953,9 @@ static int kfd_ioctl_acquire_vm(struct file *filep, struct kfd_process *p,
> if (!drm_file)
> return -EINVAL;
>
> - mutex_lock(&p->mutex);
> - pdd = kfd_process_device_data_by_id(p, args->gpu_id);
> - if (!pdd) {
> - ret = -EINVAL;
> - goto err_pdd;
> - }
> + pdd = kfd_lock_pdd_by_id(p, args->gpu_id);
> + if (!pdd)
> + return -EINVAL;
>
> if (pdd->drm_file) {
> ret = pdd->drm_file == drm_file ? 0 : -EBUSY;
> @@ -995,7 +972,6 @@ static int kfd_ioctl_acquire_vm(struct file *filep, struct kfd_process *p,
> return 0;
>
> err_unlock:
> -err_pdd:
> err_drm_file:
> mutex_unlock(&p->mutex);
> fput(drm_file);
> @@ -1063,12 +1039,9 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
> }
> mutex_unlock(&p->svms.lock);
> #endif
> - mutex_lock(&p->mutex);
> - pdd = kfd_process_device_data_by_id(p, args->gpu_id);
> - if (!pdd) {
> - err = -EINVAL;
> - goto err_pdd;
> - }
> + pdd = kfd_lock_pdd_by_id(p, args->gpu_id);
> + if (!pdd)
> + return EINVAL;
>
> dev = pdd->dev;
>
> @@ -1140,7 +1113,6 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
> amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem,
> pdd->drm_priv, NULL);
> err_unlock:
> -err_pdd:
> err_large_bar:
> mutex_unlock(&p->mutex);
> return err;
> @@ -1231,8 +1203,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
> goto copy_from_user_failed;
> }
>
> - mutex_lock(&p->mutex);
> - pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
> + pdd = kfd_lock_pdd_by_id(p, GET_GPU_ID(args->handle));
> if (!pdd) {
> err = -EINVAL;
> goto get_process_device_data_failed;
> @@ -1304,12 +1275,12 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
>
> return err;
>
> -get_process_device_data_failed:
> bind_process_to_device_failed:
> get_mem_obj_from_handle_failed:
> map_memory_to_gpu_failed:
> mutex_unlock(&p->mutex);
> copy_from_user_failed:
> +get_process_device_data_failed:
> sync_memory_failed:
> kfree(devices_arr);
>
> @@ -1347,8 +1318,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
> goto copy_from_user_failed;
> }
>
> - mutex_lock(&p->mutex);
> - pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
> + pdd = kfd_lock_pdd_by_id(p, GET_GPU_ID(args->handle));
> if (!pdd) {
> err = -EINVAL;
> goto bind_process_to_device_failed;
> @@ -1398,10 +1368,10 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
>
> return 0;
>
> -bind_process_to_device_failed:
> get_mem_obj_from_handle_failed:
> unmap_memory_from_gpu_failed:
> mutex_unlock(&p->mutex);
> +bind_process_to_device_failed:
> copy_from_user_failed:
> sync_memory_failed:
> kfree(devices_arr);
> @@ -1517,11 +1487,10 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
> if (IS_ERR(dmabuf))
> return PTR_ERR(dmabuf);
>
> - mutex_lock(&p->mutex);
> - pdd = kfd_process_device_data_by_id(p, args->gpu_id);
> + pdd = kfd_lock_pdd_by_id(p, args->gpu_id);
> if (!pdd) {
> r = -EINVAL;
> - goto err_unlock;
> + goto err_pdd;
> }
>
> pdd = kfd_bind_process_to_device(pdd->dev, p);
> @@ -1555,6 +1524,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
> pdd->drm_priv, NULL);
> err_unlock:
> mutex_unlock(&p->mutex);
> +err_pdd:
> dma_buf_put(dmabuf);
> return r;
> }
> @@ -1566,13 +1536,11 @@ static int kfd_ioctl_smi_events(struct file *filep,
> struct kfd_ioctl_smi_events_args *args = data;
> struct kfd_process_device *pdd;
>
> - mutex_lock(&p->mutex);
> -
> - pdd = kfd_process_device_data_by_id(p, args->gpuid);
> - mutex_unlock(&p->mutex);
> + pdd = kfd_lock_pdd_by_id(p, args->gpuid);
> if (!pdd)
> return -EINVAL;
>
> + mutex_unlock(&p->mutex);
> return kfd_smi_event_open(pdd->dev, &args->anon_fd);
> }
>
More information about the amd-gfx
mailing list