<div dir="ltr"><div class="gmail_quote gmail_quote_container"><div dir="ltr" class="gmail_attr">On Mon, Apr 14, 2025 at 7:33 AM Christian König <<a href="mailto:christian.koenig@amd.com">christian.koenig@amd.com</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">Am 12.04.25 um 10:03 schrieb Arunpravin Paneer Selvam:<br>
> Add queue id support to the user queue wait IOCTL<br>
> drm_amdgpu_userq_wait structure.<br>
><br>
> This is required to retrieve the wait user queue and maintain<br>
> the fence driver references in it so that the user queue in<br>
> the same context releases their reference to the fence drivers<br>
> at some point before queue destruction.<br>
><br>
> Otherwise, we would gather those references until we<br>
> don't have any more space left and crash.<br>
><br>
> v2: Modify the UAPI comment as per the mesa and libdrm UAPI comment.<br>
><br>
> Libdrm MR: <a href="https://gitlab.freedesktop.org/mesa/drm/-/merge_requests/408" rel="noreferrer" target="_blank">https://gitlab.freedesktop.org/mesa/drm/-/merge_requests/408</a><br>
> Mesa MR: <a href="https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/34493" rel="noreferrer" target="_blank">https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/34493</a><br>
><br>
> Signed-off-by: Arunpravin Paneer Selvam <<a href="mailto:Arunpravin.PaneerSelvam@amd.com" target="_blank">Arunpravin.PaneerSelvam@amd.com</a>><br>
> Suggested-by: Christian König <<a href="mailto:christian.koenig@amd.com" target="_blank">christian.koenig@amd.com</a>><br>
<br>
Reviewed-by: Christian König <<a href="mailto:christian.koenig@amd.com" target="_blank">christian.koenig@amd.com</a>><br></blockquote><div><br></div><div>Didn't you notice the ABI breakage?</div><div><br></div><div>Marek</div><div> </div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">
<br>
> ---<br>
>  .../gpu/drm/amd/amdgpu/amdgpu_userq_fence.c   | 20 +++++++++++--------<br>
>  .../gpu/drm/amd/amdgpu/amdgpu_userq_fence.h   |  1 -<br>
>  drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c |  1 -<br>
>  include/uapi/drm/amdgpu_drm.h                 |  6 ++++++<br>
>  4 files changed, 18 insertions(+), 10 deletions(-)<br>
><br>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c<br>
> index a4953d668972..83bb2737c92e 100644<br>
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c<br>
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c<br>
> @@ -97,7 +97,6 @@ int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,<br>
>       spin_lock_init(&fence_drv->fence_list_lock);<br>
>  <br>
>       fence_drv->adev = adev;<br>
> -     fence_drv->fence_drv_xa_ptr = &userq->fence_drv_xa;<br>
>       fence_drv->context = dma_fence_context_alloc(1);<br>
>       get_task_comm(fence_drv->timeline_name, current);<br>
>  <br>
> @@ -591,6 +590,9 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,<br>
>       u32 num_syncobj, num_read_bo_handles, num_write_bo_handles;<br>
>       struct drm_amdgpu_userq_fence_info *fence_info = NULL;<br>
>       struct drm_amdgpu_userq_wait *wait_info = data;<br>
> +     struct amdgpu_fpriv *fpriv = filp->driver_priv;<br>
> +     struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;<br>
> +     struct amdgpu_usermode_queue *waitq;<br>
>       struct drm_gem_object **gobj_write;<br>
>       struct drm_gem_object **gobj_read;<br>
>       struct dma_fence **fences = NULL;<br>
> @@ -840,6 +842,10 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,<br>
>                       fences[num_fences++] = fence;<br>
>               }<br>
>  <br>
> +             waitq = idr_find(&userq_mgr->userq_idr, wait_info->waitq_id);<br>
> +             if (!waitq)<br>
> +                     goto free_fences;<br>
> +<br>
>               for (i = 0, cnt = 0; i < num_fences; i++) {<br>
>                       struct amdgpu_userq_fence_driver *fence_drv;<br>
>                       struct amdgpu_userq_fence *userq_fence;<br>
> @@ -868,14 +874,12 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,<br>
>                        * Otherwise, we would gather those references until we don't<br>
>                        * have any more space left and crash.<br>
>                        */<br>
> -                     if (fence_drv->fence_drv_xa_ptr) {<br>
> -                             r = xa_alloc(fence_drv->fence_drv_xa_ptr, &index, fence_drv,<br>
> -                                          xa_limit_32b, GFP_KERNEL);<br>
> -                             if (r)<br>
> -                                     goto free_fences;<br>
> +                     r = xa_alloc(&waitq->fence_drv_xa, &index, fence_drv,<br>
> +                                  xa_limit_32b, GFP_KERNEL);<br>
> +                     if (r)<br>
> +                             goto free_fences;<br>
>  <br>
> -                             amdgpu_userq_fence_driver_get(fence_drv);<br>
> -                     }<br>
> +                     amdgpu_userq_fence_driver_get(fence_drv);<br>
>  <br>
>                       /* Store drm syncobj's gpu va address and value */<br>
>                       fence_info[cnt].va = fence_drv->va;<br>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h<br>
> index f0a91cc02880..d5090a6bcdde 100644<br>
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h<br>
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h<br>
> @@ -55,7 +55,6 @@ struct amdgpu_userq_fence_driver {<br>
>       spinlock_t fence_list_lock;<br>
>       struct list_head fences;<br>
>       struct amdgpu_device *adev;<br>
> -     struct xarray *fence_drv_xa_ptr;<br>
>       char timeline_name[TASK_COMM_LEN];<br>
>  };<br>
>  <br>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c<br>
> index ecd49cf15b2a..7c754ba56cff 100644<br>
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c<br>
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c<br>
> @@ -73,7 +73,6 @@ amdgpu_userqueue_cleanup(struct amdgpu_userq_mgr *uq_mgr,<br>
>       }<br>
>  <br>
>       uq_funcs->mqd_destroy(uq_mgr, queue);<br>
> -     queue->fence_drv->fence_drv_xa_ptr = NULL;<br>
>       amdgpu_userq_fence_driver_free(queue);<br>
>       idr_remove(&uq_mgr->userq_idr, queue_id);<br>
>       kfree(queue);<br>
> diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h<br>
> index ef97c0d78b8a..2195810ae42d 100644<br>
> --- a/include/uapi/drm/amdgpu_drm.h<br>
> +++ b/include/uapi/drm/amdgpu_drm.h<br>
> @@ -501,6 +501,12 @@ struct drm_amdgpu_userq_fence_info {<br>
>  };<br>
>  <br>
>  struct drm_amdgpu_userq_wait {<br>
> +     /**<br>
> +      * @waitq_id: Queue handle used by the userq wait IOCTL to retrieve the<br>
> +      * wait queue and maintain the fence driver references in it.<br>
> +      */<br>
> +     __u32   waitq_id;<br>
> +     __u32   pad;<br>
>       /**<br>
>        * @syncobj_handles: The list of syncobj handles submitted by the user queue<br>
>        * job to get the va/value pairs.<br>
<br>
</blockquote></div></div>