[PATCH 2/2] drm/amdgpu: Use kfd_unlock_pdd helper
Daniel Phillips
daniel.phillips at amd.com
Wed Aug 24 20:01:32 UTC 2022
Trivially changes kfd_lock_pdd_by_id ... mutex_unlock to use the
kfd_unlock_pdd helper.
Signed-off-by: Daniel Phillips <daniel.phillips at amd.com>
---
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 38 +++++++++++-------------
1 file changed, 18 insertions(+), 20 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index bb5528c55b73..cf5bfd928b69 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -381,7 +381,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
*/
args->doorbell_offset |= doorbell_offset_in_process;
- mutex_unlock(&p->mutex);
+ kfd_unlock_pdd(pdd);
pr_debug("Queue id %d was created successfully\n", args->queue_id);
@@ -401,7 +401,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
amdgpu_amdkfd_free_gtt_mem(dev->adev, wptr_bo);
err_wptr_map_gart:
err_bind_process:
- mutex_unlock(&p->mutex);
+ kfd_unlock_pdd(pdd);
return err;
}
@@ -587,8 +587,7 @@ static int kfd_ioctl_set_memory_policy(struct file *filep,
err = -EINVAL;
out:
- mutex_unlock(&p->mutex);
-
+ kfd_unlock_pdd(pdd);
return err;
}
@@ -612,8 +611,7 @@ static int kfd_ioctl_set_trap_handler(struct file *filep,
kfd_process_set_trap_handler(&pdd->qpd, args->tba_addr, args->tma_addr);
out:
- mutex_unlock(&p->mutex);
-
+ kfd_unlock_pdd(pdd);
return err;
}
@@ -650,7 +648,7 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
pdd = kfd_lock_pdd_by_id(p, args->gpu_id);
if (pdd) {
- mutex_unlock(&p->mutex);
+ kfd_unlock_pdd(pdd);
/* Reading GPU clock counter from KGD */
args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(pdd->dev->adev);
} else
@@ -883,7 +881,7 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
pdd->qpd.sh_hidden_private_base = args->va_addr;
- mutex_unlock(&p->mutex);
+ kfd_unlock_pdd(pdd);
if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS &&
pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va)
@@ -893,7 +891,7 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
return 0;
bind_process_to_device_fail:
- mutex_unlock(&p->mutex);
+ kfd_unlock_pdd(pdd);
return err;
}
@@ -967,13 +965,13 @@ static int kfd_ioctl_acquire_vm(struct file *filep, struct kfd_process *p,
goto err_unlock;
/* On success, the PDD keeps the drm_file reference */
- mutex_unlock(&p->mutex);
+ kfd_unlock_pdd(pdd);
return 0;
err_unlock:
err_drm_file:
- mutex_unlock(&p->mutex);
+ kfd_unlock_pdd(pdd);
fput(drm_file);
return ret;
}
@@ -1095,7 +1093,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + args->size);
- mutex_unlock(&p->mutex);
+ kfd_unlock_pdd(pdd);
args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
args->mmap_offset = offset;
@@ -1114,7 +1112,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
pdd->drm_priv, NULL);
err_unlock:
err_large_bar:
- mutex_unlock(&p->mutex);
+ kfd_unlock_pdd(pdd);
return err;
}
@@ -1256,7 +1254,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
args->n_success = i+1;
}
- mutex_unlock(&p->mutex);
+ kfd_unlock_pdd(pdd);
err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true);
if (err) {
@@ -1278,7 +1276,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
bind_process_to_device_failed:
get_mem_obj_from_handle_failed:
map_memory_to_gpu_failed:
- mutex_unlock(&p->mutex);
+ kfd_unlock_pdd(pdd);
copy_from_user_failed:
get_process_device_data_failed:
sync_memory_failed:
@@ -1346,7 +1344,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
}
args->n_success = i+1;
}
- mutex_unlock(&p->mutex);
+ kfd_unlock_pdd(pdd);
if (kfd_flush_tlb_after_unmap(pdd->dev)) {
err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev,
@@ -1370,7 +1368,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
get_mem_obj_from_handle_failed:
unmap_memory_from_gpu_failed:
- mutex_unlock(&p->mutex);
+ kfd_unlock_pdd(pdd);
bind_process_to_device_failed:
copy_from_user_failed:
sync_memory_failed:
@@ -1512,7 +1510,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
goto err_free;
}
- mutex_unlock(&p->mutex);
+ kfd_unlock_pdd(pdd);
dma_buf_put(dmabuf);
args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
@@ -1523,7 +1521,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, (struct kgd_mem *)mem,
pdd->drm_priv, NULL);
err_unlock:
- mutex_unlock(&p->mutex);
+ kfd_unlock_pdd(pdd);
err_pdd:
dma_buf_put(dmabuf);
return r;
@@ -1540,7 +1538,7 @@ static int kfd_ioctl_smi_events(struct file *filep,
if (!pdd)
return -EINVAL;
- mutex_unlock(&p->mutex);
+ kfd_unlock_pdd(pdd);
return kfd_smi_event_open(pdd->dev, &args->anon_fd);
}
--
2.35.1
More information about the amd-gfx
mailing list