[PATCH v4 09/11] drm/amdgpu: validate userq activity status for GEM_VA unmap
Alex Deucher
alexdeucher at gmail.com
Tue Jun 24 17:04:44 UTC 2025
On Tue, Jun 24, 2025 at 4:53 AM Prike Liang <Prike.Liang at amd.com> wrote:
>
> The userq VA unmap requires validating queue status before unamapping
> it, if user tries to unmap a busy userq by GEM VA IOCTL then the
> driver should report an error for this illegal usage.
>
> Signed-off-by: Prike Liang <Prike.Liang at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c | 16 +++++++++++++---
> drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 8 ++++++++
> 2 files changed, 21 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
> index 534a9c98c011..5243512c9cec 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
> @@ -243,7 +243,7 @@ amdgpu_userq_map_helper(struct amdgpu_userq_mgr *uq_mgr,
> return r;
> }
>
> -static void
> +static int
> amdgpu_userq_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr,
> struct amdgpu_usermode_queue *queue)
> {
> @@ -252,10 +252,14 @@ amdgpu_userq_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr,
>
> if (f && !dma_fence_is_signaled(f)) {
> ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
> - if (ret <= 0)
> + if (ret <= 0) {
> drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
> f->context, f->seqno);
> + return -ETIMEDOUT;
> + }
> }
> +
> + return 0;
> }
>
> static void
> @@ -471,7 +475,13 @@ amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
> mutex_unlock(&uq_mgr->userq_mutex);
> return -EINVAL;
> }
> - amdgpu_userq_wait_for_last_fence(uq_mgr, queue);
> +
> + if (amdgpu_userq_wait_for_last_fence(uq_mgr, queue)) {
> + drm_warn(adev_to_drm(uq_mgr->adev), "Don't destroy a busy userq\n");
> + mutex_unlock(&uq_mgr->userq_mutex);
> + return -EINVAL;
This is a memory leak. I think we want to destroy a busy queue if the
fence doesn't signal. If it doesn't signal, we should unmap/reset the
queue and destroy it.
Alex
> + }
> +
> r = amdgpu_bo_reserve(queue->db_obj.obj, true);
> if (!r) {
> amdgpu_bo_unpin(queue->db_obj.obj);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index 5e075e8f0ca3..afc493f43256 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -1929,6 +1929,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
> struct amdgpu_bo_va_mapping *mapping;
> struct amdgpu_vm *vm = bo_va->base.vm;
> bool valid = true;
> + int r;
>
> saddr /= AMDGPU_GPU_PAGE_SIZE;
>
> @@ -1949,6 +1950,13 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
> return -ENOENT;
> }
>
> + /* It's unlike to happen that the mapping userq hasn't been idled
> + * during user unmap vm except the vm unmaped forcely from user space.
> + */
> + r = amdgpu_userq_gem_va_unmap_validate(vm, saddr);
> + if (unlikely(r && r != -EBUSY))
> + dev_warn(adev->dev, "Here should be an improper unmap request from user space\n");
> +
> list_del(&mapping->list);
> amdgpu_vm_it_remove(mapping, &vm->va);
> mapping->bo_va = NULL;
> --
> 2.34.1
>
More information about the amd-gfx
mailing list