[Intel-gfx] [PATCH 06/16] drm/i915/vm_bind: Support for VM private BOs
Matthew Auld
matthew.auld at intel.com
Wed Sep 28 17:54:27 UTC 2022
On 28/09/2022 07:19, Niranjana Vishwanathapura wrote:
> Each VM creates a root_obj and shares it with all of its private objects
> to use it as dma_resv object. This has a performance advantage as it
> requires a single dma_resv object update for all private BOs vs list of
> dma_resv objects update for shared BOs, in the execbuf path.
>
> VM private BOs can be only mapped on specified VM and cannot be dmabuf
> exported. Also, they are supported only in vm_bind mode.
>
> Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura at intel.com>
> Signed-off-by: Andi Shyti <andi.shyti at linux.intel.com>
> ---
> drivers/gpu/drm/i915/gem/i915_gem_create.c | 41 ++++++++++++++++++-
> drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 6 +++
> .../gpu/drm/i915/gem/i915_gem_execbuffer.c | 4 ++
> drivers/gpu/drm/i915/gem/i915_gem_object.c | 3 ++
> .../gpu/drm/i915/gem/i915_gem_object_types.h | 3 ++
> drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 3 ++
> .../drm/i915/gem/i915_gem_vm_bind_object.c | 9 ++++
> drivers/gpu/drm/i915/gt/intel_gtt.c | 4 ++
> drivers/gpu/drm/i915/gt/intel_gtt.h | 2 +
> drivers/gpu/drm/i915/i915_vma.c | 1 +
> drivers/gpu/drm/i915/i915_vma_types.h | 2 +
> include/uapi/drm/i915_drm.h | 30 ++++++++++++++
> 12 files changed, 106 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
> index 4aa7b5582b8e..692d95ef5d3e 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_create.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
> @@ -11,6 +11,7 @@
> #include "pxp/intel_pxp.h"
>
> #include "i915_drv.h"
> +#include "i915_gem_context.h"
> #include "i915_gem_create.h"
> #include "i915_trace.h"
> #include "i915_user_extensions.h"
> @@ -252,6 +253,7 @@ struct create_ext {
> unsigned int n_placements;
> unsigned int placement_mask;
> unsigned long flags;
> + u32 vm_id;
> };
>
> static void repr_placements(char *buf, size_t size,
> @@ -401,9 +403,24 @@ static int ext_set_protected(struct i915_user_extension __user *base, void *data
> return 0;
> }
>
> +static int ext_set_vm_private(struct i915_user_extension __user *base,
> + void *data)
> +{
> + struct drm_i915_gem_create_ext_vm_private ext;
> + struct create_ext *ext_data = data;
> +
> + if (copy_from_user(&ext, base, sizeof(ext)))
> + return -EFAULT;
> +
> + ext_data->vm_id = ext.vm_id;
> +
> + return 0;
> +}
> +
> static const i915_user_extension_fn create_extensions[] = {
> [I915_GEM_CREATE_EXT_MEMORY_REGIONS] = ext_set_placements,
> [I915_GEM_CREATE_EXT_PROTECTED_CONTENT] = ext_set_protected,
> + [I915_GEM_CREATE_EXT_VM_PRIVATE] = ext_set_vm_private,
> };
>
> /**
> @@ -419,6 +436,7 @@ i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
> struct drm_i915_private *i915 = to_i915(dev);
> struct drm_i915_gem_create_ext *args = data;
> struct create_ext ext_data = { .i915 = i915 };
> + struct i915_address_space *vm = NULL;
> struct drm_i915_gem_object *obj;
> int ret;
>
> @@ -432,6 +450,12 @@ i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
> if (ret)
> return ret;
>
> + if (ext_data.vm_id) {
> + vm = i915_gem_vm_lookup(file->driver_priv, ext_data.vm_id);
> + if (unlikely(!vm))
> + return -ENOENT;
> + }
> +
> if (!ext_data.n_placements) {
> ext_data.placements[0] =
> intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
> @@ -458,8 +482,21 @@ i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
> ext_data.placements,
> ext_data.n_placements,
> ext_data.flags);
> - if (IS_ERR(obj))
> - return PTR_ERR(obj);
> + if (IS_ERR(obj)) {
> + ret = PTR_ERR(obj);
> + goto vm_put;
> + }
> +
> + if (vm) {
> + obj->base.resv = vm->root_obj->base.resv;
> + obj->priv_root = i915_gem_object_get(vm->root_obj);
> + i915_vm_put(vm);
> + }
>
> return i915_gem_publish(obj, file, &args->size, &args->handle);
> +vm_put:
> + if (vm)
> + i915_vm_put(vm);
> +
> + return ret;
> }
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> index f5062d0c6333..6433173c3e84 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> @@ -218,6 +218,12 @@ struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
> struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
> DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
>
> + if (obj->priv_root) {
> + drm_dbg(obj->base.dev,
> + "Exporting VM private objects is not allowed\n");
> + return ERR_PTR(-EINVAL);
> + }
> +
> exp_info.ops = &i915_dmabuf_ops;
> exp_info.size = gem_obj->size;
> exp_info.flags = flags;
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> index f85f10cf9c34..33d989a20227 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> @@ -864,6 +864,10 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
> if (unlikely(!obj))
> return ERR_PTR(-ENOENT);
>
> + /* VM private objects are not supported here */
> + if (obj->priv_root)
> + return ERR_PTR(-EINVAL);
> +
> /*
> * If the user has opted-in for protected-object tracking, make
> * sure the object encryption can be used.
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> index 7ff9c7877bec..271ad62b3245 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> @@ -108,6 +108,9 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
> */
> void __i915_gem_object_fini(struct drm_i915_gem_object *obj)
> {
> + if (obj->priv_root && !obj->ttm.created)
> + i915_gem_object_put(obj->priv_root);
> +
> mutex_destroy(&obj->mm.get_page.lock);
> mutex_destroy(&obj->mm.get_dma_page.lock);
> dma_resv_fini(&obj->base._resv);
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> index 40305e2bcd49..2e79cfc0b06a 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> @@ -241,6 +241,9 @@ struct drm_i915_gem_object {
>
> const struct drm_i915_gem_object_ops *ops;
>
> + /* For VM private BO, points to root_obj in VM. NULL otherwise */
> + struct drm_i915_gem_object *priv_root;
> +
> struct {
> /**
> * @vma.lock: protect the list/tree of vmas
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> index e3fc38dd5db0..b4d8cb33df8e 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> @@ -1153,6 +1153,9 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
> mutex_destroy(&obj->ttm.get_io_page.lock);
>
> if (obj->ttm.created) {
> + if (obj->priv_root)
> + i915_gem_object_put(obj->priv_root);
> +
> /*
> * We freely manage the shrinker LRU outide of the mm.pages life
> * cycle. As a result when destroying the object we should be
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c b/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c
> index e529162abd2c..809c78455d2e 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c
> @@ -86,6 +86,7 @@ static void i915_gem_vm_bind_remove(struct i915_vma *vma, bool release_obj)
> lockdep_assert_held(&vma->vm->vm_bind_lock);
>
> list_del_init(&vma->vm_bind_link);
> + list_del_init(&vma->non_priv_vm_bind_link);
> i915_vm_bind_it_remove(vma, &vma->vm->va);
>
> /* Release object */
> @@ -205,6 +206,11 @@ static int i915_gem_vm_bind_obj(struct i915_address_space *vm,
> goto put_obj;
> }
>
> + if (obj->priv_root && obj->priv_root != vm->root_obj) {
> + ret = -EINVAL;
> + goto put_obj;
> + }
> +
> ret = mutex_lock_interruptible(&vm->vm_bind_lock);
> if (ret)
> goto put_obj;
> @@ -231,6 +237,9 @@ static int i915_gem_vm_bind_obj(struct i915_address_space *vm,
>
> list_add_tail(&vma->vm_bind_link, &vm->vm_bound_list);
> i915_vm_bind_it_insert(vma, &vm->va);
> + if (!obj->priv_root)
> + list_add_tail(&vma->non_priv_vm_bind_link,
> + &vm->non_priv_vm_bind_list);
>
> /* Hold object reference until vm_unbind */
> i915_gem_object_get(vma->obj);
> diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
> index 0daa70c6ed0d..da4f9dee0397 100644
> --- a/drivers/gpu/drm/i915/gt/intel_gtt.c
> +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
> @@ -177,6 +177,7 @@ int i915_vm_lock_objects(struct i915_address_space *vm,
> void i915_address_space_fini(struct i915_address_space *vm)
> {
> drm_mm_takedown(&vm->mm);
> + i915_gem_object_put(vm->root_obj);
> GEM_BUG_ON(!RB_EMPTY_ROOT(&vm->va.rb_root));
> mutex_destroy(&vm->vm_bind_lock);
> }
> @@ -292,6 +293,9 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
> INIT_LIST_HEAD(&vm->vm_bind_list);
> INIT_LIST_HEAD(&vm->vm_bound_list);
> mutex_init(&vm->vm_bind_lock);
> + INIT_LIST_HEAD(&vm->non_priv_vm_bind_list);
> + vm->root_obj = i915_gem_object_create_internal(vm->i915, PAGE_SIZE);
> + GEM_BUG_ON(IS_ERR(vm->root_obj));
> }
>
> void *__px_vaddr(struct drm_i915_gem_object *p)
> diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
> index b52061858161..3f2e87d3bf34 100644
> --- a/drivers/gpu/drm/i915/gt/intel_gtt.h
> +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
> @@ -275,6 +275,8 @@ struct i915_address_space {
> struct list_head vm_bound_list;
> /* @va: tree of persistent vmas */
> struct rb_root_cached va;
> + struct list_head non_priv_vm_bind_list;
> + struct drm_i915_gem_object *root_obj;
>
> /* Global GTT */
> bool is_ggtt:1;
> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
> index 33f910473263..89c276163916 100644
> --- a/drivers/gpu/drm/i915/i915_vma.c
> +++ b/drivers/gpu/drm/i915/i915_vma.c
> @@ -240,6 +240,7 @@ vma_create(struct drm_i915_gem_object *obj,
> mutex_unlock(&vm->mutex);
>
> INIT_LIST_HEAD(&vma->vm_bind_link);
> + INIT_LIST_HEAD(&vma->non_priv_vm_bind_link);
> return vma;
>
> err_unlock:
> diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
> index f56ac07c6cfa..3d5ffc3e666b 100644
> --- a/drivers/gpu/drm/i915/i915_vma_types.h
> +++ b/drivers/gpu/drm/i915/i915_vma_types.h
> @@ -298,6 +298,8 @@ struct i915_vma {
>
> /** @vm_bind_link: node for the vm_bind related lists of vm */
> struct list_head vm_bind_link;
> + /* @non_priv_vm_bind_link: Link in non-private persistent VMA list */
> + struct list_head non_priv_vm_bind_link;
>
> /** Interval tree structures for persistent vma */
>
> diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
> index 9760564b4693..3eee3071ad60 100644
> --- a/include/uapi/drm/i915_drm.h
> +++ b/include/uapi/drm/i915_drm.h
> @@ -3601,9 +3601,13 @@ struct drm_i915_gem_create_ext {
> *
> * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
> * struct drm_i915_gem_create_ext_protected_content.
> + *
> + * For I915_GEM_CREATE_EXT_VM_PRIVATE usage see
> + * struct drm_i915_gem_create_ext_vm_private.
> */
> #define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
> #define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
> +#define I915_GEM_CREATE_EXT_VM_PRIVATE 2
> __u64 extensions;
> };
>
> @@ -3721,6 +3725,32 @@ struct drm_i915_gem_create_ext_protected_content {
> /* ID of the protected content session managed by i915 when PXP is active */
> #define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf
>
> +/**
> + * struct drm_i915_gem_create_ext_vm_private - Extension to make the object
> + * private to the specified VM.
> + *
> + * See struct drm_i915_gem_create_ext.
> + *
> + * By default, BOs can be mapped on multiple VMs and can also be dma-buf
> + * exported. Hence these BOs are referred to as Shared BOs.
> + * During each execbuf3 submission, the request fence must be added to the
> + * dma-resv fence list of all shared BOs mapped on the VM.
> + *
> + * Unlike Shared BOs, these VM private BOs can only be mapped on the VM they
> + * are private to and can't be dma-buf exported. All private BOs of a VM share
> + * the dma-resv object. Hence during each execbuf3 submission, they need only
> + * one dma-resv fence list updated. Thus, the fast path (where required
> + * mappings are already bound) submission latency is O(1) w.r.t the number of
> + * VM private BOs.
> + */
> +struct drm_i915_gem_create_ext_vm_private {
> + /** @base: Extension link. See struct i915_user_extension. */
> + struct i915_user_extension base;
> +
> + /** @vm_id: Id of the VM to which Object is private */
> + __u32 vm_id;
Does this not leave a hole? At a glannce I would have assumed we need
explciit __u32 pad somewhere...
> +};
> +
> /**
> * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
> *
More information about the Intel-gfx
mailing list