[Intel-gfx] [PATCH v6 20/20] drm/i915/vm_bind: Async vm_unbind support

Niranjana Vishwanathapura niranjana.vishwanathapura at intel.com
Tue Nov 8 15:46:41 UTC 2022


On Mon, Nov 07, 2022 at 05:39:34PM -0800, Zanoni, Paulo R wrote:
>On Mon, 2022-11-07 at 00:52 -0800, Niranjana Vishwanathapura wrote:
>> Asynchronously unbind the vma upon vm_unbind call.
>> Fall back to synchronous unbind if backend doesn't support
>> async unbind or if async unbind fails.
>>
>> No need for vm_unbind out fence support as i915 will internally
>> handle all sequencing and user need not try to sequence any
>> operation with the unbind completion.
>
>Can you please provide some more details on how this works from the
>user space point of view? I want to be able to know with 100% certainty
>if an unbind has already happened, so I can reuse that vma or whatever
>else I may decide to do. I see the interface does not provide any sort
>of drm_syncobjs for me to wait on the async unbind. So, when does the
>unbind really happen? When can I be sure it's past so I can do stuff
>with it? Why would you provide an async ioctl and provide no means for
>user space to wait on it?
>

Paulo,
The async vm_unbind here is not transparent to user space. From user space
point of view, it is like synchronous and they can reuse the assigned virtual
address immediately after vm_unbind ioctl returns. The i915 driver will
ensure that the unbind completes before there is a rebind at that virtual
address. So, unless there is error from user programming where GPU tries to
access the buffer even after user doing the vm_unbind, it should be fine.

Regards,
Niranjana

>Thanks,
>Paulo
>
>>
>> Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura at intel.com>
>> ---
>>  drivers/gpu/drm/i915/i915_vma.c | 51 ++++++++++++++++++++++++++++++---
>>  drivers/gpu/drm/i915/i915_vma.h |  1 +
>>  2 files changed, 48 insertions(+), 4 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
>> index 08218e3a2f12..03c966fad87b 100644
>> --- a/drivers/gpu/drm/i915/i915_vma.c
>> +++ b/drivers/gpu/drm/i915/i915_vma.c
>> @@ -42,6 +42,8 @@
>>  #include "i915_vma.h"
>>  #include "i915_vma_resource.h"
>>  
>>
>>
>>
>> +static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma);
>> +
>>  static inline void assert_vma_held_evict(const struct i915_vma *vma)
>>  {
>>  	/*
>> @@ -1711,7 +1713,7 @@ void i915_vma_reopen(struct i915_vma *vma)
>>  	spin_unlock_irq(&gt->closed_lock);
>>  }
>>  
>>
>>
>>
>> -static void force_unbind(struct i915_vma *vma)
>> +static void force_unbind(struct i915_vma *vma, bool async)
>>  {
>>  	if (!drm_mm_node_allocated(&vma->node))
>>  		return;
>> @@ -1725,7 +1727,21 @@ static void force_unbind(struct i915_vma *vma)
>>  		i915_vma_set_purged(vma);
>>  
>>
>>
>>
>>  	atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
>> -	WARN_ON(__i915_vma_unbind(vma));
>> +	if (async) {
>> +		struct dma_fence *fence;
>> +
>> +		fence = __i915_vma_unbind_async(vma);
>> +		if (IS_ERR_OR_NULL(fence)) {
>> +			async = false;
>> +		} else {
>> +			dma_resv_add_fence(vma->obj->base.resv, fence,
>> +					   DMA_RESV_USAGE_READ);
>> +			dma_fence_put(fence);
>> +		}
>> +	}
>> +
>> +	if (!async)
>> +		WARN_ON(__i915_vma_unbind(vma));
>>  	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
>>  }
>>  
>>
>>
>>
>> @@ -1785,7 +1801,7 @@ void i915_vma_destroy_locked(struct i915_vma *vma)
>>  {
>>  	lockdep_assert_held(&vma->vm->mutex);
>>  
>>
>>
>>
>> -	force_unbind(vma);
>> +	force_unbind(vma, false);
>>  	list_del_init(&vma->vm_link);
>>  	release_references(vma, vma->vm->gt, false);
>>  }
>> @@ -1796,7 +1812,34 @@ void i915_vma_destroy(struct i915_vma *vma)
>>  	bool vm_ddestroy;
>>  
>>
>>
>>
>>  	mutex_lock(&vma->vm->mutex);
>> -	force_unbind(vma);
>> +	force_unbind(vma, false);
>> +	list_del_init(&vma->vm_link);
>> +	vm_ddestroy = vma->vm_ddestroy;
>> +	vma->vm_ddestroy = false;
>> +
>> +	/* vma->vm may be freed when releasing vma->vm->mutex. */
>> +	gt = vma->vm->gt;
>> +	mutex_unlock(&vma->vm->mutex);
>> +	release_references(vma, gt, vm_ddestroy);
>> +}
>> +
>> +void i915_vma_destroy_async(struct i915_vma *vma)
>> +{
>> +	bool vm_ddestroy, async = vma->obj->mm.rsgt;
>> +	struct intel_gt *gt;
>> +
>> +	if (dma_resv_reserve_fences(vma->obj->base.resv, 1))
>> +		async = false;
>> +
>> +	mutex_lock(&vma->vm->mutex);
>> +	/*
>> +	 * Ensure any asynchronous binding is complete while using
>> +	 * async unbind as we will be releasing the vma here.
>> +	 */
>> +	if (async && i915_active_wait(&vma->active))
>> +		async = false;
>> +
>> +	force_unbind(vma, async);
>>  	list_del_init(&vma->vm_link);
>>  	vm_ddestroy = vma->vm_ddestroy;
>>  	vma->vm_ddestroy = false;
>> diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
>> index 737ef310d046..25f15965dab8 100644
>> --- a/drivers/gpu/drm/i915/i915_vma.h
>> +++ b/drivers/gpu/drm/i915/i915_vma.h
>> @@ -272,6 +272,7 @@ void i915_vma_reopen(struct i915_vma *vma);
>>  
>>
>>
>>
>>  void i915_vma_destroy_locked(struct i915_vma *vma);
>>  void i915_vma_destroy(struct i915_vma *vma);
>> +void i915_vma_destroy_async(struct i915_vma *vma);
>>  
>>
>>
>>
>>  #define assert_vma_held(vma) dma_resv_assert_held((vma)->obj->base.resv)
>>  
>>
>>
>>
>


More information about the Intel-gfx mailing list