[PATCH v2] drm/amdgpu: Add queue id support to the user queue wait IOCTL

Marek Olšák maraeo at gmail.com
Mon Apr 14 17:33:16 UTC 2025


On Mon, Apr 14, 2025 at 7:33 AM Christian König <christian.koenig at amd.com>
wrote:

> Am 12.04.25 um 10:03 schrieb Arunpravin Paneer Selvam:
> > Add queue id support to the user queue wait IOCTL
> > drm_amdgpu_userq_wait structure.
> >
> > This is required to retrieve the wait user queue and maintain
> > the fence driver references in it so that the user queue in
> > the same context releases their reference to the fence drivers
> > at some point before queue destruction.
> >
> > Otherwise, we would gather those references until we
> > don't have any more space left and crash.
> >
> > v2: Modify the UAPI comment as per the mesa and libdrm UAPI comment.
> >
> > Libdrm MR: https://gitlab.freedesktop.org/mesa/drm/-/merge_requests/408
> > Mesa MR: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/34493
> >
> > Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam at amd.com
> >
> > Suggested-by: Christian König <christian.koenig at amd.com>
>
> Reviewed-by: Christian König <christian.koenig at amd.com>
>

Didn't you notice the ABI breakage?

Marek


>
> > ---
> >  .../gpu/drm/amd/amdgpu/amdgpu_userq_fence.c   | 20 +++++++++++--------
> >  .../gpu/drm/amd/amdgpu/amdgpu_userq_fence.h   |  1 -
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c |  1 -
> >  include/uapi/drm/amdgpu_drm.h                 |  6 ++++++
> >  4 files changed, 18 insertions(+), 10 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
> > index a4953d668972..83bb2737c92e 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
> > @@ -97,7 +97,6 @@ int amdgpu_userq_fence_driver_alloc(struct
> amdgpu_device *adev,
> >       spin_lock_init(&fence_drv->fence_list_lock);
> >
> >       fence_drv->adev = adev;
> > -     fence_drv->fence_drv_xa_ptr = &userq->fence_drv_xa;
> >       fence_drv->context = dma_fence_context_alloc(1);
> >       get_task_comm(fence_drv->timeline_name, current);
> >
> > @@ -591,6 +590,9 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev,
> void *data,
> >       u32 num_syncobj, num_read_bo_handles, num_write_bo_handles;
> >       struct drm_amdgpu_userq_fence_info *fence_info = NULL;
> >       struct drm_amdgpu_userq_wait *wait_info = data;
> > +     struct amdgpu_fpriv *fpriv = filp->driver_priv;
> > +     struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
> > +     struct amdgpu_usermode_queue *waitq;
> >       struct drm_gem_object **gobj_write;
> >       struct drm_gem_object **gobj_read;
> >       struct dma_fence **fences = NULL;
> > @@ -840,6 +842,10 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev,
> void *data,
> >                       fences[num_fences++] = fence;
> >               }
> >
> > +             waitq = idr_find(&userq_mgr->userq_idr,
> wait_info->waitq_id);
> > +             if (!waitq)
> > +                     goto free_fences;
> > +
> >               for (i = 0, cnt = 0; i < num_fences; i++) {
> >                       struct amdgpu_userq_fence_driver *fence_drv;
> >                       struct amdgpu_userq_fence *userq_fence;
> > @@ -868,14 +874,12 @@ int amdgpu_userq_wait_ioctl(struct drm_device
> *dev, void *data,
> >                        * Otherwise, we would gather those references
> until we don't
> >                        * have any more space left and crash.
> >                        */
> > -                     if (fence_drv->fence_drv_xa_ptr) {
> > -                             r = xa_alloc(fence_drv->fence_drv_xa_ptr,
> &index, fence_drv,
> > -                                          xa_limit_32b, GFP_KERNEL);
> > -                             if (r)
> > -                                     goto free_fences;
> > +                     r = xa_alloc(&waitq->fence_drv_xa, &index,
> fence_drv,
> > +                                  xa_limit_32b, GFP_KERNEL);
> > +                     if (r)
> > +                             goto free_fences;
> >
> > -                             amdgpu_userq_fence_driver_get(fence_drv);
> > -                     }
> > +                     amdgpu_userq_fence_driver_get(fence_drv);
> >
> >                       /* Store drm syncobj's gpu va address and value */
> >                       fence_info[cnt].va = fence_drv->va;
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
> > index f0a91cc02880..d5090a6bcdde 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
> > @@ -55,7 +55,6 @@ struct amdgpu_userq_fence_driver {
> >       spinlock_t fence_list_lock;
> >       struct list_head fences;
> >       struct amdgpu_device *adev;
> > -     struct xarray *fence_drv_xa_ptr;
> >       char timeline_name[TASK_COMM_LEN];
> >  };
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
> > index ecd49cf15b2a..7c754ba56cff 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
> > @@ -73,7 +73,6 @@ amdgpu_userqueue_cleanup(struct amdgpu_userq_mgr
> *uq_mgr,
> >       }
> >
> >       uq_funcs->mqd_destroy(uq_mgr, queue);
> > -     queue->fence_drv->fence_drv_xa_ptr = NULL;
> >       amdgpu_userq_fence_driver_free(queue);
> >       idr_remove(&uq_mgr->userq_idr, queue_id);
> >       kfree(queue);
> > diff --git a/include/uapi/drm/amdgpu_drm.h
> b/include/uapi/drm/amdgpu_drm.h
> > index ef97c0d78b8a..2195810ae42d 100644
> > --- a/include/uapi/drm/amdgpu_drm.h
> > +++ b/include/uapi/drm/amdgpu_drm.h
> > @@ -501,6 +501,12 @@ struct drm_amdgpu_userq_fence_info {
> >  };
> >
> >  struct drm_amdgpu_userq_wait {
> > +     /**
> > +      * @waitq_id: Queue handle used by the userq wait IOCTL to
> retrieve the
> > +      * wait queue and maintain the fence driver references in it.
> > +      */
> > +     __u32   waitq_id;
> > +     __u32   pad;
> >       /**
> >        * @syncobj_handles: The list of syncobj handles submitted by the
> user queue
> >        * job to get the va/value pairs.
>
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.freedesktop.org/archives/amd-gfx/attachments/20250414/09d16c7d/attachment.htm>


More information about the amd-gfx mailing list