[Intel-gfx] [RFC PATCH v3 07/17] drm/i915/vm_bind: Handle persistent vmas
Jani Nikula
jani.nikula at linux.intel.com
Mon Sep 12 13:16:06 UTC 2022
On Sat, 27 Aug 2022, Andi Shyti <andi.shyti at linux.intel.com> wrote:
> From: Niranjana Vishwanathapura <niranjana.vishwanathapura at intel.com>
>
> Treat VM_BIND vmas as persistent across execbuf ioctl calls and handle
> them during the request submission in the execbuff path.
>
> Support eviction by maintaining a list of evicted persistent vmas
> for rebinding during next submission.
>
> Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura at intel.com>
> Signed-off-by: Ramalingam C <ramalingam.c at intel.com>
> Signed-off-by: Andi Shyti <andi.shyti at linux.intel.com>
> ---
> drivers/gpu/drm/i915/gem/i915_gem_object.c | 1 +
> .../drm/i915/gem/i915_gem_vm_bind_object.c | 8 +++
> drivers/gpu/drm/i915/gt/intel_gtt.c | 2 +
> drivers/gpu/drm/i915/gt/intel_gtt.h | 4 ++
> drivers/gpu/drm/i915/i915_gem_gtt.c | 38 +++++++++++++
> drivers/gpu/drm/i915/i915_gem_gtt.h | 3 +
> drivers/gpu/drm/i915/i915_vma.c | 50 +++++++++++++++--
> drivers/gpu/drm/i915/i915_vma.h | 56 +++++++++++++++----
> drivers/gpu/drm/i915/i915_vma_types.h | 24 ++++++++
> 9 files changed, 169 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> index 389e9f157ca5e..825dce41f7113 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> @@ -38,6 +38,7 @@
> #include "i915_gem_mman.h"
> #include "i915_gem_object.h"
> #include "i915_gem_ttm.h"
> +#include "i915_gem_vm_bind.h"
Why do you add this here if you're not using anything from there?
> #include "i915_memcpy.h"
> #include "i915_trace.h"
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c b/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c
> index 9ff929f187cfd..3b45529fe8d4c 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c
> @@ -91,6 +91,13 @@ void i915_gem_vm_bind_remove(struct i915_vma *vma, bool release_obj)
> {
> lockdep_assert_held(&vma->vm->vm_bind_lock);
>
> + spin_lock(&vma->vm->vm_rebind_lock);
> + if (!list_empty(&vma->vm_rebind_link))
> + list_del_init(&vma->vm_rebind_link);
> + i915_vma_set_purged(vma);
> + i915_vma_set_freed(vma);
> + spin_unlock(&vma->vm->vm_rebind_lock);
> +
> if (!list_empty(&vma->vm_bind_link)) {
> list_del_init(&vma->vm_bind_link);
> list_del_init(&vma->non_priv_vm_bind_link);
> @@ -190,6 +197,7 @@ static struct i915_vma *vm_bind_get_vma(struct i915_address_space *vm,
>
> vma->start = va->start;
> vma->last = va->start + va->length - 1;
> + i915_vma_set_persistent(vma);
>
> return vma;
> }
> diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
> index c4f75826213ae..97cd0089b516d 100644
> --- a/drivers/gpu/drm/i915/gt/intel_gtt.c
> +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
> @@ -296,6 +296,8 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
> INIT_LIST_HEAD(&vm->non_priv_vm_bind_list);
> vm->root_obj = i915_gem_object_create_internal(vm->i915, PAGE_SIZE);
> GEM_BUG_ON(IS_ERR(vm->root_obj));
> + INIT_LIST_HEAD(&vm->vm_rebind_list);
> + spin_lock_init(&vm->vm_rebind_lock);
> }
>
> void *__px_vaddr(struct drm_i915_gem_object *p)
> diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
> index 9a2665e4ec2e5..1f3b1967ec175 100644
> --- a/drivers/gpu/drm/i915/gt/intel_gtt.h
> +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
> @@ -265,6 +265,10 @@ struct i915_address_space {
> struct list_head vm_bind_list;
> /** @vm_bound_list: List of vm_binding completed */
> struct list_head vm_bound_list;
> + /* @vm_rebind_list: list of vmas to be rebinded */
> + struct list_head vm_rebind_list;
> + /* @vm_rebind_lock: protects vm_rebound_list */
> + spinlock_t vm_rebind_lock;
> /* @va: tree of persistent vmas */
> struct rb_root_cached va;
> struct list_head non_priv_vm_bind_list;
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 329ff75b80b97..f083724163deb 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -25,6 +25,44 @@
> #include "i915_trace.h"
> #include "i915_vgpu.h"
>
> +/**
> + * i915_vm_sync() - Wait for all requests on private vmas of a vm to be completed
> + * @vm: address space we need to wait for idle
> + *
> + * Waits till all requests of the vm_binded private objs are completed.
> + *
> + * Returns: 0 on success -ve errcode on failure
> + */
> +int i915_vm_sync(struct i915_address_space *vm)
> +{
> + int ret;
> +
> + /* Wait for all requests under this vm to finish */
> + ret = dma_resv_wait_timeout(vm->root_obj->base.resv,
> + DMA_RESV_USAGE_BOOKKEEP, false,
> + MAX_SCHEDULE_TIMEOUT);
> + if (ret < 0)
> + return ret;
> + else if (ret > 0)
> + return 0;
> + else
> + return -ETIMEDOUT;
> +}
> +
> +/**
> + * i915_vm_is_active() - Check for activeness of requests of vm
> + * @vm: address spece targetted
> + *
> + * Check whether all the requests related private vmas are completed or not
> + *
> + * Returns: True when requests are not completed yet. Flase otherwise.
> + */
> +bool i915_vm_is_active(const struct i915_address_space *vm)
> +{
> + return !dma_resv_test_signaled(vm->root_obj->base.resv,
> + DMA_RESV_USAGE_BOOKKEEP);
> +}
> +
> int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
> struct sg_table *pages)
> {
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
> index 8c2f57eb5ddaa..a5bbdc59d9dfb 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.h
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
> @@ -51,4 +51,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
>
> #define PIN_OFFSET_MASK I915_GTT_PAGE_MASK
>
> +int i915_vm_sync(struct i915_address_space *vm);
> +bool i915_vm_is_active(const struct i915_address_space *vm);
Maybe I don't get the gem header structure, but why do you add these in
i915_gem_gtt.h but the implementation in i915_vma.c?
In general, declarations for stuff in i915_foo.c should be in
i915_foo.h.
BR,
Jani.
> +
> #endif
> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
> index 239346e0c07f2..0eb7727d62a6f 100644
> --- a/drivers/gpu/drm/i915/i915_vma.c
> +++ b/drivers/gpu/drm/i915/i915_vma.c
> @@ -237,6 +237,7 @@ vma_create(struct drm_i915_gem_object *obj,
>
> INIT_LIST_HEAD(&vma->vm_bind_link);
> INIT_LIST_HEAD(&vma->non_priv_vm_bind_link);
> + INIT_LIST_HEAD(&vma->vm_rebind_link);
> return vma;
>
> err_unlock:
> @@ -387,8 +388,31 @@ int i915_vma_wait_for_bind(struct i915_vma *vma)
> return err;
> }
>
> -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
> -static int i915_vma_verify_bind_complete(struct i915_vma *vma)
> +/**
> + * i915_vma_sync() - Wait for the vma to be idle
> + * @vma: vma to be tested
> + *
> + * Returns 0 on success and error code on failure
> + */
> +int i915_vma_sync(struct i915_vma *vma)
> +{
> + int ret;
> +
> + /* Wait for the asynchronous bindings and pending GPU reads */
> + ret = i915_active_wait(&vma->active);
> + if (ret || !i915_vma_is_persistent(vma) || i915_vma_is_purged(vma))
> + return ret;
> +
> + return i915_vm_sync(vma->vm);
> +}
> +
> +/**
> + * i915_vma_verify_bind_complete() - Check for the vm_bind completion of the vma
> + * @vma: vma submitted for vm_bind
> + *
> + * Returns: 0 if the vm_bind is completed. Error code otherwise.
> + */
> +int i915_vma_verify_bind_complete(struct i915_vma *vma)
> {
> struct dma_fence *fence = i915_active_fence_get(&vma->active.excl);
> int err;
> @@ -405,9 +429,6 @@ static int i915_vma_verify_bind_complete(struct i915_vma *vma)
>
> return err;
> }
> -#else
> -#define i915_vma_verify_bind_complete(_vma) 0
> -#endif
>
> I915_SELFTEST_EXPORT void
> i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
> @@ -1654,6 +1675,13 @@ static void force_unbind(struct i915_vma *vma)
> if (!drm_mm_node_allocated(&vma->node))
> return;
>
> + /*
> + * Mark persistent vma as purged to avoid it waiting
> + * for VM to be released.
> + */
> + if (i915_vma_is_persistent(vma))
> + i915_vma_set_purged(vma);
> +
> atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
> WARN_ON(__i915_vma_unbind(vma));
> GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
> @@ -1846,6 +1874,8 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
> int err;
>
> assert_object_held(obj);
> + if (i915_vma_is_persistent(vma))
> + return -EINVAL;
>
> GEM_BUG_ON(!vma->pages);
>
> @@ -2014,6 +2044,16 @@ int __i915_vma_unbind(struct i915_vma *vma)
> __i915_vma_evict(vma, false);
>
> drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
> +
> + if (i915_vma_is_persistent(vma)) {
> + spin_lock(&vma->vm->vm_rebind_lock);
> + if (list_empty(&vma->vm_rebind_link) &&
> + !i915_vma_is_purged(vma))
> + list_add_tail(&vma->vm_rebind_link,
> + &vma->vm->vm_rebind_list);
> + spin_unlock(&vma->vm->vm_rebind_lock);
> + }
> +
> return 0;
> }
>
> diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
> index 15eac55a3e274..bf0b5b4abd919 100644
> --- a/drivers/gpu/drm/i915/i915_vma.h
> +++ b/drivers/gpu/drm/i915/i915_vma.h
> @@ -47,12 +47,6 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
>
> void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags);
> #define I915_VMA_RELEASE_MAP BIT(0)
> -
> -static inline bool i915_vma_is_active(const struct i915_vma *vma)
> -{
> - return !i915_active_is_idle(&vma->active);
> -}
> -
> /* do not reserve memory to prevent deadlocks */
> #define __EXEC_OBJECT_NO_RESERVE BIT(31)
>
> @@ -138,6 +132,48 @@ static inline u32 i915_ggtt_pin_bias(struct i915_vma *vma)
> return i915_vm_to_ggtt(vma->vm)->pin_bias;
> }
>
> +static inline bool i915_vma_is_persistent(const struct i915_vma *vma)
> +{
> + return test_bit(I915_VMA_PERSISTENT_BIT, __i915_vma_flags(vma));
> +}
> +
> +static inline void i915_vma_set_persistent(struct i915_vma *vma)
> +{
> + set_bit(I915_VMA_PERSISTENT_BIT, __i915_vma_flags(vma));
> +}
> +
> +static inline bool i915_vma_is_purged(const struct i915_vma *vma)
> +{
> + return test_bit(I915_VMA_PURGED_BIT, __i915_vma_flags(vma));
> +}
> +
> +static inline void i915_vma_set_purged(struct i915_vma *vma)
> +{
> + set_bit(I915_VMA_PURGED_BIT, __i915_vma_flags(vma));
> +}
> +
> +static inline bool i915_vma_is_freed(const struct i915_vma *vma)
> +{
> + return test_bit(I915_VMA_FREED_BIT, __i915_vma_flags(vma));
> +}
> +
> +static inline void i915_vma_set_freed(struct i915_vma *vma)
> +{
> + set_bit(I915_VMA_FREED_BIT, __i915_vma_flags(vma));
> +}
> +
> +static inline bool i915_vma_is_active(const struct i915_vma *vma)
> +{
> + if (i915_vma_is_persistent(vma)) {
> + if (i915_vma_is_purged(vma))
> + return false;
> +
> + return i915_vm_is_active(vma->vm);
> + }
> +
> + return !i915_active_is_idle(&vma->active);
> +}
> +
> static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
> {
> i915_gem_object_get(vma->obj);
> @@ -406,12 +442,8 @@ void i915_vma_make_shrinkable(struct i915_vma *vma);
> void i915_vma_make_purgeable(struct i915_vma *vma);
>
> int i915_vma_wait_for_bind(struct i915_vma *vma);
> -
> -static inline int i915_vma_sync(struct i915_vma *vma)
> -{
> - /* Wait for the asynchronous bindings and pending GPU reads */
> - return i915_active_wait(&vma->active);
> -}
> +int i915_vma_verify_bind_complete(struct i915_vma *vma);
> +int i915_vma_sync(struct i915_vma *vma);
>
> /**
> * i915_vma_get_current_resource - Get the current resource of the vma
> diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
> index de5534d518cdd..5483ccf0c82c7 100644
> --- a/drivers/gpu/drm/i915/i915_vma_types.h
> +++ b/drivers/gpu/drm/i915/i915_vma_types.h
> @@ -264,6 +264,28 @@ struct i915_vma {
> #define I915_VMA_SCANOUT_BIT 17
> #define I915_VMA_SCANOUT ((int)BIT(I915_VMA_SCANOUT_BIT))
>
> + /**
> + * I915_VMA_PERSISTENT_BIT:
> + * The vma is persistent (created with VM_BIND call).
> + *
> + * I915_VMA_PURGED_BIT:
> + * The persistent vma is force unbound either due to VM_UNBIND call
> + * from UMD or VM is released. Do not check/wait for VM activeness
> + * in i915_vma_is_active() and i915_vma_sync() calls.
> + *
> + * I915_VMA_FREED_BIT:
> + * The persistent vma is being released by UMD via VM_UNBIND call.
> + * While releasing the vma, do not take VM_BIND lock as VM_UNBIND call
> + * already holds the lock.
> + */
> +#define I915_VMA_PERSISTENT_BIT 19
> +#define I915_VMA_PURGED_BIT 20
> +#define I915_VMA_FREED_BIT 21
> +
> +#define I915_VMA_PERSISTENT ((int)BIT(I915_VMA_PERSISTENT_BIT))
> +#define I915_VMA_PURGED ((int)BIT(I915_VMA_PURGED_BIT))
> +#define I915_VMA_FREED ((int)BIT(I915_VMA_FREED_BIT))
> +
> struct i915_active active;
>
> #define I915_VMA_PAGES_BIAS 24
> @@ -293,6 +315,8 @@ struct i915_vma {
> struct list_head vm_bind_link;
> /* @non_priv_vm_bind_link: Link in non-private persistent VMA list */
> struct list_head non_priv_vm_bind_link;
> + /* @vm_rebind_link: link to vm_rebind_list and protected by vm_rebind_lock */
> + struct list_head vm_rebind_link; /* Link in vm_rebind_list */
>
> /** Interval tree structures for persistent vma */
--
Jani Nikula, Intel Open Source Graphics Center
More information about the dri-devel
mailing list