[PATCH v2 05/11] drm/amdgpu: add userq object va track helpers

Liang, Prike Prike.Liang at amd.com
Thu Jun 19 12:49:41 UTC 2025


[Public]

Regards,
      Prike

> -----Original Message-----
> From: Alex Deucher <alexdeucher at gmail.com>
> Sent: Tuesday, June 17, 2025 11:35 PM
> To: Liang, Prike <Prike.Liang at amd.com>
> Cc: amd-gfx at lists.freedesktop.org; Deucher, Alexander
> <Alexander.Deucher at amd.com>; Koenig, Christian <Christian.Koenig at amd.com>
> Subject: Re: [PATCH v2 05/11] drm/amdgpu: add userq object va track helpers
>
> On Tue, Jun 17, 2025 at 4:27 AM Prike Liang <Prike.Liang at amd.com> wrote:
> >
> > Add the userq object virtual address get(),mapped() and put() helpers
> > for tracking the userq obj va address usage.
>
> Need to add the other metadata buffers to the tracking.
Note.

> >
> > Signed-off-by: Prike Liang <Prike.Liang at amd.com>
> > ---
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c | 114
> > +++++++++++++++++++++-  drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h |
> > 11 +++
> >  2 files changed, 124 insertions(+), 1 deletion(-)
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
> > b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
> > index db47e90b8c83..2882ef8fe719 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
> > @@ -76,6 +76,116 @@ int amdgpu_userq_input_va_validate(struct amdgpu_vm
> *vm, u64 addr,
> >         return -EINVAL;
> >  }
> >
> > +int amdgpu_userq_buffer_va_get(struct amdgpu_vm *vm, u64 addr) {
> > +       struct amdgpu_bo_va_mapping *mapping;
> > +       u64 user_addr;
> > +       int r;
> > +
> > +       user_addr = amdgpu_userq_va_align(addr);
> > +       r = amdgpu_bo_reserve(vm->root.bo, false);
> > +       if (r)
> > +               return r;
> > +
> > +       mapping = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
> > +       if (!mapping)
> > +               goto out_err;
> > +
> > +       /*
> > +        * Need to unify the following userq va reference.
> > +        *  mqd  bo
> > +        *  rptr bo
> > +        *  wptr bo
> > +        *  eop  bo
> > +        *  doorbell bo
> > +        */
> > +       /*amdgpu_bo_ref(mapping->bo_va->base.bo);*/
> > +       mapping->bo_va->queue_refcount++;
> > +
> > +       amdgpu_bo_unreserve(vm->root.bo);
> > +       return 0;
> > +
> > +out_err:
> > +       amdgpu_bo_unreserve(vm->root.bo);
> > +       return -EINVAL;
> > +}
> > +
> > +bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr) {
> > +       struct amdgpu_bo_va_mapping *mapping;
> > +       u64 user_addr;
> > +       bool r;
> > +
> > +       user_addr = amdgpu_userq_va_align(addr);
> > +
> > +       if (amdgpu_bo_reserve(vm->root.bo, false))
> > +               return false;
> > +
> > +       mapping = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
> > +       if (!IS_ERR_OR_NULL(mapping) && mapping->bo_va->queue_refcount > 0)
> > +               r = true;
> > +       else
> > +               r = false;
> > +       amdgpu_bo_unreserve(vm->root.bo);
> > +
> > +       return r;
> > +}
> > +
> > +bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_vm *vm,
> > +                       struct amdgpu_usermode_queue *queue) {
> > +
> > +       if (amdgpu_userq_buffer_va_mapped(vm, queue->queue_va) ||
> > +               amdgpu_userq_buffer_va_mapped(vm, queue->rptr_va) ||
> > +               amdgpu_userq_buffer_va_mapped(vm, queue->wptr_va))
> > +               return true;
> > +       else
> > +               return false;
> > +}
> > +
> > +int amdgpu_userq_buffer_va_put(struct amdgpu_vm *vm, u64 addr) {
> > +       struct amdgpu_bo_va_mapping *mapping;
> > +       u64 user_addr;
> > +       int r;
> > +
> > +       user_addr = amdgpu_userq_va_align(addr);
> > +       r = amdgpu_bo_reserve(vm->root.bo, false);
> > +       if (r)
> > +               return r;
> > +
> > +       mapping = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
> > +       if (!mapping)
> > +               goto out_err;
> > +       /*
> > +        * As the GEM userq bo will be unmapped by amdgpu_vm_bo_unmap()
> which is
> > +        * invoked before destroying userq. So if the user wants to
> reference/dereference
> > +        * the userq vm bo, then ensure the userq vm bo is dereferenced before
> amdgpu_vm_bo_unmap()
> > +        * or unmap the userq bo VA at amdgpu_userq_destroy() directly; otherwise,
> that results in the
> > +        * userq va BOs being referred to unbalanced and causing the failure of
> amdgpu_vm_bo_unmap().
> > +        */
>
> I don't follow this comment.  Why wouldn't this be balanced?
If we refer to the userq VM BOs and keep the usage count by amdgpu_bo_ref(mapping->bo_va->base.bo) at creating the userq IOCTL,
this reference and usage counter will be kept until amdgpu_userq_destroy(), while the userq VA is unmapped at amdgpu_vm_bo_unmap(),
which is ahead of amdgpu_userq_destroy(). So, when amdgpu_vm_bo_unmap() tries to unmap the userq VA mapping, it will result in
an unmap error caused by the BO's reference.

> Alex
>
> > +       /*amdgpu_bo_unref(&mapping->bo_va->base.bo);*/
> > +
> > +       mapping->bo_va->queue_refcount--;
> > +
> > +       amdgpu_bo_unreserve(vm->root.bo);
> > +       return 0;
> > +
> > +out_err:
> > +       amdgpu_bo_unreserve(vm->root.bo);
> > +       return -EINVAL;
> > +}
> > +
> > +int amdgpu_userq_buffer_vas_put(struct amdgpu_vm *vm,
> > +                       struct amdgpu_usermode_queue *queue) {
> > +       amdgpu_userq_buffer_va_put(vm, queue->queue_va);
> > +       amdgpu_userq_buffer_va_put(vm, queue->rptr_va);
> > +       amdgpu_userq_buffer_va_put(vm, queue->wptr_va);
> > +
> > +       return 0;
> > +}
> > +
> >  static int
> >  amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
> >                           struct amdgpu_usermode_queue *queue) @@
> > -442,6 +552,9 @@ amdgpu_userq_create(struct drm_file *filp, union
> drm_amdgpu_userq *args)
> >         queue->queue_type = args->in.ip_type;
> >         queue->vm = &fpriv->vm;
> >         queue->priority = priority;
> > +       queue->queue_va = args->in.queue_va;
> > +       queue->rptr_va = args->in.rptr_va;
> > +       queue->wptr_va = args->in.wptr_va;
> >
> >         db_info.queue_type = queue->queue_type;
> >         db_info.doorbell_handle = queue->doorbell_handle; @@ -472,7
> > +585,6 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq
> *args)
> >                 goto unlock;
> >         }
> >
> > -
> >         qid = idr_alloc(&uq_mgr->userq_idr, queue, 1,
> AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL);
> >         if (qid < 0) {
> >                 drm_file_err(uq_mgr->file, "Failed to allocate a queue
> > id\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
> > b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
> > index 704935ca0c36..9543fbab7cc3 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
> > @@ -52,6 +52,10 @@ struct amdgpu_usermode_queue {
> >         enum amdgpu_userq_state state;
> >         uint64_t                doorbell_handle;
> >         uint64_t                doorbell_index;
> > +       uint64_t                queue_va;
> > +       uint64_t                rptr_va;
> > +       uint64_t                wptr_va;
> > +
> >         uint64_t                flags;
> >         struct amdgpu_mqd_prop  *userq_prop;
> >         struct amdgpu_userq_mgr *userq_mgr; @@ -134,4 +138,11 @@ int
> > amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device
> > *adev,
> >
> >  int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
> >                         u64 expected_size);
> > +int amdgpu_userq_buffer_va_get(struct amdgpu_vm *vm, u64 addr); bool
> > +amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr); bool
> > +amdgpu_userq_buffer_vas_mapped(struct amdgpu_vm *vm,
> > +                       struct amdgpu_usermode_queue *queue); int
> > +amdgpu_userq_buffer_va_put(struct amdgpu_vm *vm, u64 addr); int
> > +amdgpu_userq_buffer_vas_put(struct amdgpu_vm *vm,
> > +                       struct amdgpu_usermode_queue *queue);
> >  #endif
> > --
> > 2.34.1
> >


More information about the amd-gfx mailing list