[PATCH v3 5/6] drm/amdgpu: Implement userqueue signal/wait IOCTL functions

Christian König christian.koenig at amd.com
Fri Apr 21 13:20:57 UTC 2023


Am 20.04.23 um 16:47 schrieb Arunpravin Paneer Selvam:
> This patch introduces new IOCTL for userqueue secure semaphore.
>
> The signal IOCTL called from userspace application creates a drm
> syncobj and array of bo GEM handles and passed in as parameter to
> the driver to install the fence into it.
>
> The wait IOCTL gets an array of drm syncobjs, finds the fences
> attached to the drm syncobjs and obtain the array of
> memory_address/fence_value combintion which are returned to
> userspace.
>
> v2: Worked on review comments from Christian for the following
>      modifications
>
>      - Install fence into GEM BO object.
>      - Lock all BO's using the dma resv subsystem
>      - Reorder the sequence in signal IOCTL function.
>      - Get write pointer from the shadow wptr
>      - use userq_fence to fetch the va/value in wait IOCTL.
>
> v3: Worked on review comments from Christian
>      - Use drm_exec helper for the proper BO drm reserve and avoid BO
>        lock/unlock issues.
>      - fence/fence driver reference count logic for signal/wait IOCTLs.
>
> Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam at amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu.h           |   2 +
>   drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c       |   2 +
>   .../gpu/drm/amd/amdgpu/amdgpu_userq_fence.c   | 312 +++++++++++++++++-
>   .../gpu/drm/amd/amdgpu/amdgpu_userq_fence.h   |   6 +
>   drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c |   1 +
>   5 files changed, 315 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index 3bc8a2d35bb3..1d8a762f43c6 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -970,6 +970,8 @@ struct amdgpu_device {
>   	struct amdgpu_mes               mes;
>   	struct amdgpu_mqd               mqds[AMDGPU_HW_IP_NUM];
>   
> +	struct amdgpu_userq_mgr         *userq_mgr;
> +
>   	/* df */
>   	struct amdgpu_df                df;
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> index e9c5047087d0..b59e31845568 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> @@ -2751,6 +2751,8 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
>   	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>   	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>   	DRM_IOCTL_DEF_DRV(AMDGPU_USERQ, amdgpu_userq_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> +	DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_SIGNAL, amdgpu_userq_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> +	DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_WAIT, amdgpu_userq_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>   };
>   
>   static const struct drm_driver amdgpu_kms_driver = {
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
> index a03d12f83147..1c455b7ebcd6 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
> @@ -25,6 +25,7 @@
>   #include <linux/kref.h>
>   #include <linux/slab.h>
>   
> +#include <drm/drm_exec.h>
>   #include <drm/drm_syncobj.h>
>   
>   #include "amdgpu.h"

> @@ -84,7 +85,7 @@ int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
>   			     &fence_drv->cpu_addr);
>   	if (r)
>   		return -ENOMEM;
> -	
> +
>   	kref_init(&fence_drv->refcount);
>   	INIT_LIST_HEAD(&fence_drv->fences);
>   	spin_lock_init(&fence_drv->fence_list_lock);
> @@ -124,28 +125,27 @@ void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_d
>   void amdgpu_userq_fence_driver_destroy(struct kref *ref)
>   {
>   	struct amdgpu_userq_fence_driver *fence_drv = container_of(ref,
> -					 struct amdgpu_userq_fence_driver,
> -					 refcount);
> +					struct amdgpu_userq_fence_driver,
> +					refcount);
>   	struct amdgpu_device *adev = fence_drv->adev;
>   	struct amdgpu_userq_fence *fence, *tmp;
>   	struct dma_fence *f;
> -	
> +
>   	spin_lock(&fence_drv->fence_list_lock);
>   	list_for_each_entry_safe(fence, tmp, &fence_drv->fences, link) {
>   		f = &fence->base;
> -		
> +
>   		if (!dma_fence_is_signaled(f)) {
>   			dma_fence_set_error(f, -ECANCELED);
>   			dma_fence_signal(f);
>   		}
> -		
> +
>   		list_del(&fence->link);
>   		dma_fence_put(f);
>   	}
> -	
> +
>   	WARN_ON_ONCE(!list_empty(&fence_drv->fences));
>   	spin_unlock(&fence_drv->fence_list_lock);
> -	
>   	/* Free seq64 memory */
>   	amdgpu_seq64_free(adev, fence_drv->gpu_addr);
>   	kfree(fence_drv);

This whole block here are just unrelated white space changes. Please 
cleanup.

> @@ -161,6 +161,11 @@ void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv)
>   	kref_put(&fence_drv->refcount, amdgpu_userq_fence_driver_destroy);
>   }
>   
> +static inline bool amdgpu_userq_fence_test_signaled(struct dma_fence *fence)
> +{
> +	return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags);
> +}
> +

Oh, that's messing with dma_fence internals and you need a very good 
explanation why this is needed!

>   int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
>   			      u64 seq, struct dma_fence **f)
>   {
> @@ -251,3 +256,294 @@ static const struct dma_fence_ops amdgpu_userq_fence_ops = {
>   	.signaled = amdgpu_userq_fence_signaled,
>   	.release = amdgpu_userq_fence_release,
>   };
> +
> +static int amdgpu_userq_fence_read_wptr(struct drm_file *filp,
> +					struct amdgpu_usermode_queue *queue,
> +					u64 *wptr)
> +{
> +	struct amdgpu_fpriv *fpriv = filp->driver_priv;
> +	struct amdgpu_bo_va_mapping *mapping;
> +	struct amdgpu_vm *vm = &fpriv->vm;
> +	struct amdgpu_bo *bo;
> +	u64 addr, *ptr;
> +	int r;
> +
> +	addr = queue->userq_prop.wptr_gpu_addr >> PAGE_SHIFT;
> +
> +	mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
> +	if (!mapping)
> +		return -EINVAL;
> +
> +	bo = mapping->bo_va->base.bo;
> +	r = amdgpu_bo_reserve(bo, true);
> +	if (r) {
> +		DRM_ERROR("Failed to reserve userqueue wptr bo");
> +		return r;
> +	}
> +
> +	r = amdgpu_bo_kmap(bo, (void **)&ptr);
> +	if (r) {
> +		DRM_ERROR("Failed mapping the userqueue wptr bo");
> +		goto map_error;
> +	}
> +
> +	*wptr = le64_to_cpu(*ptr);
> +
> +	amdgpu_bo_kunmap(bo);
> +	amdgpu_bo_unreserve(bo);
> +
> +	return 0;
> +
> +map_error:
> +	amdgpu_bo_unreserve(bo);
> +	return r;
> +}
> +
> +int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
> +			      struct drm_file *filp)
> +{
> +	struct drm_amdgpu_userq_signal *args = data;
> +	struct amdgpu_device *adev = drm_to_adev(dev);
> +	struct amdgpu_userq_mgr *userq_mgr = adev->userq_mgr;
> +	struct amdgpu_usermode_queue *queue;
> +	struct drm_gem_object **gobj = NULL;
> +	struct drm_syncobj *syncobj = NULL;
> +	u32 *bo_handles, num_bo_handles;
> +	struct dma_fence *fence;
> +	struct drm_exec exec;
> +	u64 wptr;
> +	int r, i;
> +
> +	/* Retrieve the user queue */
> +	queue = idr_find(&userq_mgr->userq_idr, args->queue_id);
> +	if (!queue)
> +		return -ENOENT;
> +
> +	r = amdgpu_userq_fence_read_wptr(filp, queue, &wptr);
> +	if (r)
> +		return -EINVAL;
> +
> +	/* Find Syncobj if any */
> +	syncobj = drm_syncobj_find(filp, args->syncobj_handle);
> +
> +	/* Array of bo handles */
> +	num_bo_handles = args->num_bo_handles;
> +	bo_handles = kmalloc_array(num_bo_handles, sizeof(*bo_handles), GFP_KERNEL);
> +	if (!bo_handles)
> +		return -ENOMEM;
> +
> +	if (copy_from_user(bo_handles, u64_to_user_ptr(args->bo_handles_array),
> +			   sizeof(u32) * num_bo_handles)) {
> +		r = -EFAULT;
> +		goto cleanup_bo_handles;
> +	}
> +
> +	/* Create a new fence */
> +	r = amdgpu_userq_fence_create(queue, wptr, &fence);
> +	if (!fence)
> +		goto cleanup_bo_handles;
> +
> +	if (amdgpu_userq_fence_test_signaled(fence))
> +		goto signaled;
> +
> +	/* Add the created fence to syncobj/BO's */
> +	if (syncobj) {
> +		drm_syncobj_replace_fence(syncobj, fence);
> +		dma_fence_put(fence);
> +	}
> +
> +	/* Array of GEM object handles */
> +	gobj = kmalloc_array(num_bo_handles, sizeof(*gobj), GFP_KERNEL);
> +	if (!gobj) {
> +		r = -ENOMEM;
> +		goto cleanup_bo_handles;
> +	}
> +
> +	drm_exec_init(&exec, true);
> +	drm_exec_while_not_all_locked(&exec) {
> +		for (i = 0; i < num_bo_handles; i++) {
> +			/* Retrieve GEM object */
> +			gobj[i] = drm_gem_object_lookup(filp, bo_handles[i]);

Looks like a step into the right direction, e.g. the drm_exec usage 
looks correct for example.

But it's illegal to lockup some object when you are already holding 
locks of another object.

What you need to do is this:

1. Lockup all BOs (using drm_gem_object_lookup).
2. Lock all BOs (using drm_exec).
3. Add fence to the syncobj and BOs
4. Unlock all BOs.
5. Drop the reference grabbed during lockup.




> +			if (!gobj[i]) {
> +				r = -ENOENT;
> +				goto exec_fini;
> +			}
> +
> +			r = drm_exec_prepare_obj(&exec, gobj[i], 1);
> +			drm_exec_continue_on_contention(&exec);
> +			if (unlikely(r))
> +				goto exec_fini;
> +
> +			dma_resv_add_fence(gobj[i]->resv, fence,
> +					   args->bo_flags & AMDGPU_USERQ_BO_READ ?
> +					   DMA_RESV_USAGE_READ :
> +					   DMA_RESV_USAGE_WRITE);
> +		}
> +	}
> +
> +exec_fini:
> +	drm_exec_fini(&exec);
> +signaled:
> +	dma_fence_put(fence);
> +	kfree(gobj);
> +cleanup_bo_handles:
> +	kfree(bo_handles);
> +
> +	return r;
> +}
> +
> +int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
> +			    struct drm_file *filp)
> +{
> +	struct drm_amdgpu_userq_fence_info *fence_info = NULL;
> +	struct drm_amdgpu_userq_wait *wait_info = data;
> +	u32 *syncobj_handles, *bo_handles;
> +	struct dma_resv_iter resv_cursor;
> +	u32 num_syncobj, num_bo_handles;
> +	struct drm_gem_object **gobj;
> +	u64 num_fences = 0;
> +	int i, j, r, cur;
> +	bool bo_flag;
> +
> +	num_bo_handles = wait_info->num_bo_handles;
> +	/* Array of GEM BO handles */
> +	bo_handles = kmalloc_array(num_bo_handles, sizeof(*bo_handles), GFP_KERNEL);
> +	if (!bo_handles)
> +		return -ENOMEM;
> +
> +	if (copy_from_user(bo_handles, u64_to_user_ptr(wait_info->bo_handles_array),
> +			   sizeof(u32) * num_bo_handles)) {
> +		r = -EFAULT;
> +		goto err_free_bo_handles;
> +	}
> +
> +	/* Array of GEM object handles */
> +	gobj = kmalloc_array(num_bo_handles, sizeof(*gobj), GFP_KERNEL);
> +	if (!gobj) {
> +		r = -ENOMEM;
> +		goto err_free_fence_info;
> +	}
> +
> +	if (wait_info->num_fences) {
> +		/* Array of fence gpu address */
> +		fence_info = kmalloc_array(wait_info->num_fences, sizeof(*fence_info), GFP_KERNEL);
> +		if (!fence_info) {
> +			r = -ENOMEM;
> +			goto err_free_bo_handles;
> +		}
> +	}
> +
> +	/* Track fence_info index value */
> +	cur = num_fences;
> +
> +	/* Retrieve GEM objects's fence */
> +	bo_flag = wait_info->bo_wait_flags & AMDGPU_USERQ_BO_READ;
> +	for (i = 0; i < num_bo_handles; i++) {
> +		struct dma_fence *bo_fence;
> +
> +		gobj[i] = drm_gem_object_lookup(filp, bo_handles[i]);
> +		if (!gobj[i]) {
> +			r = -ENOENT;
> +			goto err_put_gobj;
> +		}
> +
> +		dma_resv_for_each_fence(&resv_cursor, gobj[i]->resv,
> +					bo_flag ?
> +					DMA_RESV_USAGE_READ :
> +					DMA_RESV_USAGE_WRITE,
> +					bo_fence) {

When you don't lock the BO (and that's ok) you need to use 
dma_resv_for_each_fence_unlock()!

This also requires resetting the fence when dma_resv_iter_is_restarted() 
returns true.

> +			if (!wait_info->num_fences) {
> +				++num_fences;
> +				dma_fence_get(bo_fence);
> +				continue;
> +			}
> +
> +			/* Store drm syncobj's gpu va address and value */
> +			fence_info[num_fences - cur].va =
> +				to_amdgpu_userq_fence(bo_fence)->fence_drv->gpu_addr;
> +			fence_info[num_fences - cur].value = bo_fence->seqno;

You need to double check if that is really an amdgpu_userq_fence, 
otherwise the cast will just return garbage here.

> +
> +			cur--;

Hui? Filling the array from the end won't work. You can't predict how 
many fences you will have when you start.

So you need to some handling which prevents adding to many fences and 
overwriting things in userspace.

Christian.

> +			dma_fence_put(bo_fence);
> +		}
> +
> +		drm_gem_object_put(gobj[i]);
> +	}
> +
> +	num_syncobj = wait_info->num_syncobj_handles;
> +	/* Array of Syncobj handles */
> +	syncobj_handles = kmalloc_array(num_syncobj, sizeof(*syncobj_handles), GFP_KERNEL);
> +	if (!syncobj_handles)
> +		goto err_put_gobj;
> +
> +	if (copy_from_user(syncobj_handles, u64_to_user_ptr(wait_info->syncobj_handles_array),
> +				sizeof(u32) * num_syncobj)) {
> +		r = -EFAULT;
> +		goto err_free_syncobj_handles;
> +	}
> +
> +	/* Retrieve syncobj's fence */
> +	for (j = 0; j < num_syncobj; j++) {
> +		struct dma_fence *fence = NULL;
> +		struct drm_syncobj *syncobj;
> +
> +		syncobj = drm_syncobj_find(filp, syncobj_handles[j]);
> +		fence = drm_syncobj_fence_get(syncobj);
> +		if (!fence)
> +			continue;
> +
> +		if (!wait_info->num_fences) {
> +			++num_fences;
> +			continue;
> +		}
> +
> +		/* Store drm syncobj's gpu va address and value */
> +		fence_info[num_fences - cur].va = to_amdgpu_userq_fence(fence)->fence_drv->gpu_addr;
> +		fence_info[num_fences - cur].value = fence->seqno;
> +		dma_fence_put(fence);
> +
> +		cur--;
> +		dma_fence_put(fence);
> +	}
> +
> +	/*
> +	 * Passing num_fences = 0 means that userspace doen't want to
> +	 * retrieve userq_fence_info. If num_fences = 0 we skip filling
> +	 * userq_fence_info and return the actual number of fences on
> +	 * args->num_fences.
> +	 */
> +
> +	if (!wait_info->num_fences)
> +		goto no_fences;
> +
> +	if (copy_to_user(u64_to_user_ptr(wait_info->userq_fence_info),
> +			 fence_info, wait_info->num_fences * sizeof(*fence_info))) {
> +		r = -EFAULT;
> +		goto err_free_gobj;
> +	}
> +
> +	/* Free all handles */
> +	kfree(syncobj_handles);
> +	kfree(gobj);
> +	kfree(fence_info);
> +	kfree(bo_handles);
> +
> +	return 0;
> +
> +err_free_syncobj_handles:
> +	kfree(syncobj_handles);
> +err_put_gobj:
> +	while (i-- > 0)
> +		drm_gem_object_put(gobj[i]);
> +no_fences:
> +	if (!wait_info->num_fences)
> +		wait_info->num_fences = num_fences;
> +err_free_gobj:
> +	kfree(gobj);
> +err_free_fence_info:
> +	kfree(fence_info);
> +err_free_bo_handles:
> +	kfree(bo_handles);
> +
> +	return r;
> +}
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
> index 7329b4e5dd30..2b2f52296d76 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
> @@ -29,6 +29,8 @@
>   
>   #include "amdgpu_userqueue.h"
>   
> +#define AMDGPU_USERQ_BO_READ	0x1
> +
>   struct amdgpu_userq_fence {
>   	struct dma_fence base;
>   	/* userq fence lock */
> @@ -60,5 +62,9 @@ void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv);
>   int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev, struct amdgpu_usermode_queue *userq);
>   void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv);
>   void amdgpu_userq_fence_driver_destroy(struct kref *ref);
> +int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
> +			      struct drm_file *filp);
> +int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
> +			    struct drm_file *filp);
>   
>   #endif
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
> index 8918b176fdcb..7022afc17d8d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
> @@ -305,6 +305,7 @@ int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct amdgpu_devi
>       mutex_init(&userq_mgr->userq_mutex);
>       idr_init_base(&userq_mgr->userq_idr, 1);
>       userq_mgr->adev = adev;
> +    adev->userq_mgr = userq_mgr;
>   
>       amdgpu_userqueue_setup_ip_funcs(userq_mgr);
>       return 0;



More information about the amd-gfx mailing list