[PATCH -next 10/18] drm/ttm: Make the object handles idr-generated
Christian König
christian.koenig at amd.com
Wed Sep 26 16:36:53 UTC 2018
Am 26.09.2018 um 18:18 schrieb Thomas Hellstrom:
> Instead of generating user-space object handles based on a, possibly
> processed, hash of the kernel address of the object, use idr to generate
> and lookup those handles. This might improve somewhat on security since
> we loose all connections to the object's kernel address. Also idr is
> designed to do just this.
>
> As a todo-item, since user-space handles are now generated in sequence,
> we can probably use a much simpler hash function to hash them.
>
> Cc: Christian König <christian.koenig at amd.com>
> Signed-off-by: Thomas Hellstrom <thellstrom at vmware.com>
> Reviewed-by: Sinclair Yeh <syeh at vmware.com>
> Reviewed-by: Deepak Rawat <drawat at vmware.com>
Well NAK. I already proposed completely removing ttm_lock.c and
ttm_object.c because vmwgfx is the only user of it.
Please move that functionality into the driver since it isn't TTM
specific after all.
Christian.
> ---
> drivers/gpu/drm/ttm/ttm_lock.c | 2 +-
> drivers/gpu/drm/ttm/ttm_object.c | 42 ++++++++++++-------------
> drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 7 +++--
> drivers/gpu/drm/vmwgfx/vmwgfx_context.c | 12 +++----
> drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 7 +++--
> drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h | 5 +++
> drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | 16 +++-------
> drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c | 5 +--
> drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 18 +++++------
> include/drm/ttm/ttm_object.h | 13 ++++++--
> 10 files changed, 65 insertions(+), 62 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
> index 20694b8a01ca..214cf73241d9 100644
> --- a/drivers/gpu/drm/ttm/ttm_lock.c
> +++ b/drivers/gpu/drm/ttm/ttm_lock.c
> @@ -267,7 +267,7 @@ EXPORT_SYMBOL(ttm_vt_lock);
> int ttm_vt_unlock(struct ttm_lock *lock)
> {
> return ttm_ref_object_base_unref(lock->vt_holder,
> - lock->base.hash.key, TTM_REF_USAGE);
> + lock->base.handle, TTM_REF_USAGE);
> }
> EXPORT_SYMBOL(ttm_vt_unlock);
>
> diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
> index 74f1b1eb1f8e..0782c6280d9b 100644
> --- a/drivers/gpu/drm/ttm/ttm_object.c
> +++ b/drivers/gpu/drm/ttm/ttm_object.c
> @@ -95,6 +95,7 @@ struct ttm_object_device {
> struct dma_buf_ops ops;
> void (*dmabuf_release)(struct dma_buf *dma_buf);
> size_t dma_buf_size;
> + struct idr idr;
> };
>
> /**
> @@ -172,14 +173,15 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
> base->ref_obj_release = ref_obj_release;
> base->object_type = object_type;
> kref_init(&base->refcount);
> + idr_preload(GFP_KERNEL);
> spin_lock(&tdev->object_lock);
> - ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
> - &base->hash,
> - (unsigned long)base, 31, 0, 0);
> + ret = idr_alloc(&tdev->idr, base, 0, 0, GFP_NOWAIT);
> spin_unlock(&tdev->object_lock);
> - if (unlikely(ret != 0))
> - goto out_err0;
> + idr_preload_end();
> + if (ret < 0)
> + return ret;
>
> + base->handle = ret;
> ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
> if (unlikely(ret != 0))
> goto out_err1;
> @@ -189,9 +191,8 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
> return 0;
> out_err1:
> spin_lock(&tdev->object_lock);
> - (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
> + idr_remove(&tdev->idr, base->handle);
> spin_unlock(&tdev->object_lock);
> -out_err0:
> return ret;
> }
> EXPORT_SYMBOL(ttm_base_object_init);
> @@ -203,7 +204,7 @@ static void ttm_release_base(struct kref *kref)
> struct ttm_object_device *tdev = base->tfile->tdev;
>
> spin_lock(&tdev->object_lock);
> - (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
> + idr_remove(&tdev->idr, base->handle);
> spin_unlock(&tdev->object_lock);
>
> /*
> @@ -252,19 +253,13 @@ EXPORT_SYMBOL(ttm_base_object_lookup);
> struct ttm_base_object *
> ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
> {
> - struct ttm_base_object *base = NULL;
> - struct drm_hash_item *hash;
> - struct drm_open_hash *ht = &tdev->object_hash;
> - int ret;
> + struct ttm_base_object *base;
>
> rcu_read_lock();
> - ret = drm_ht_find_item_rcu(ht, key, &hash);
> + base = idr_find(&tdev->idr, key);
>
> - if (likely(ret == 0)) {
> - base = drm_hash_entry(hash, struct ttm_base_object, hash);
> - if (!kref_get_unless_zero(&base->refcount))
> - base = NULL;
> - }
> + if (base && !kref_get_unless_zero(&base->refcount))
> + base = NULL;
> rcu_read_unlock();
>
> return base;
> @@ -289,7 +284,7 @@ bool ttm_ref_object_exists(struct ttm_object_file *tfile,
> struct ttm_ref_object *ref;
>
> rcu_read_lock();
> - if (unlikely(drm_ht_find_item_rcu(ht, base->hash.key, &hash) != 0))
> + if (unlikely(drm_ht_find_item_rcu(ht, base->handle, &hash) != 0))
> goto out_false;
>
> /*
> @@ -340,7 +335,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
>
> while (ret == -EINVAL) {
> rcu_read_lock();
> - ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash);
> + ret = drm_ht_find_item_rcu(ht, base->handle, &hash);
>
> if (ret == 0) {
> ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
> @@ -364,7 +359,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
> return -ENOMEM;
> }
>
> - ref->hash.key = base->hash.key;
> + ref->hash.key = base->handle;
> ref->obj = base;
> ref->tfile = tfile;
> ref->ref_type = ref_type;
> @@ -519,6 +514,7 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob,
> if (ret != 0)
> goto out_no_object_hash;
>
> + idr_init(&tdev->idr);
> tdev->ops = *ops;
> tdev->dmabuf_release = tdev->ops.release;
> tdev->ops.release = ttm_prime_dmabuf_release;
> @@ -538,6 +534,8 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
>
> *p_tdev = NULL;
>
> + WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
> + idr_destroy(&tdev->idr);
> drm_ht_remove(&tdev->object_hash);
>
> kfree(tdev);
> @@ -641,7 +639,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
>
> prime = (struct ttm_prime_object *) dma_buf->priv;
> base = &prime->base;
> - *handle = base->hash.key;
> + *handle = base->handle;
> ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
>
> dma_buf_put(dma_buf);
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> index 2dda03345761..d80801d41f69 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> @@ -441,7 +441,8 @@ static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
> struct_size = backend_size +
> ttm_round_pot(sizeof(struct vmw_buffer_object));
> user_struct_size = backend_size +
> - ttm_round_pot(sizeof(struct vmw_user_buffer_object));
> + ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
> + TTM_OBJ_EXTRA_SIZE;
> }
>
> if (dev_priv->map_mode == vmw_dma_alloc_coherent)
> @@ -631,7 +632,7 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv,
> *p_base = &user_bo->prime.base;
> kref_get(&(*p_base)->refcount);
> }
> - *handle = user_bo->prime.base.hash.key;
> + *handle = user_bo->prime.base.handle;
>
> out_no_base_object:
> return ret;
> @@ -940,7 +941,7 @@ int vmw_user_bo_reference(struct ttm_object_file *tfile,
>
> user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
>
> - *handle = user_bo->prime.base.hash.key;
> + *handle = user_bo->prime.base.handle;
> return ttm_ref_object_add(tfile, &user_bo->prime.base,
> TTM_REF_USAGE, NULL, false);
> }
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
> index 4d502567d24c..24d7c81081ae 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
> @@ -755,14 +755,10 @@ static int vmw_context_define(struct drm_device *dev, void *data,
> return -EINVAL;
> }
>
> - /*
> - * Approximate idr memory usage with 128 bytes. It will be limited
> - * by maximum number_of contexts anyway.
> - */
> -
> if (unlikely(vmw_user_context_size == 0))
> - vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
> - ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
> + vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) +
> + ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
> + + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
>
> ret = ttm_read_lock(&dev_priv->reservation_sem, true);
> if (unlikely(ret != 0))
> @@ -807,7 +803,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
> goto out_err;
> }
>
> - arg->cid = ctx->base.hash.key;
> + arg->cid = ctx->base.handle;
> out_err:
> vmw_resource_unreference(&res);
> out_unlock:
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
> index 3d546d409334..f87261545f2c 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
> @@ -306,7 +306,8 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
> INIT_LIST_HEAD(&fman->cleanup_list);
> INIT_WORK(&fman->work, &vmw_fence_work_func);
> fman->fifo_down = true;
> - fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
> + fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
> + TTM_OBJ_EXTRA_SIZE;
> fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
> fman->event_fence_action_size =
> ttm_round_pot(sizeof(struct vmw_event_fence_action));
> @@ -650,7 +651,7 @@ int vmw_user_fence_create(struct drm_file *file_priv,
> }
>
> *p_fence = &ufence->fence;
> - *p_handle = ufence->base.hash.key;
> + *p_handle = ufence->base.handle;
>
> return 0;
> out_err:
> @@ -1137,7 +1138,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
> "object.\n");
> goto out_no_ref_obj;
> }
> - handle = base->hash.key;
> + handle = base->handle;
> }
> ttm_base_object_unref(&base);
> }
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
> index 645370868296..7e19eba0b0b8 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
> @@ -30,6 +30,11 @@
>
> #include "vmwgfx_drv.h"
>
> +/*
> + * Extra memory required by the resource id's ida storage, which is allocated
> + * separately from the base object itself. We estimate an on-average 128 bytes
> + * per ida.
> + */
> #define VMW_IDA_ACC_SIZE 128
>
> enum vmw_cmdbuf_res_state {
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
> index c72b4351176a..6915c8258e6b 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
> @@ -740,13 +740,10 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
> };
> int ret;
>
> - /*
> - * Approximate idr memory usage with 128 bytes. It will be limited
> - * by maximum number_of shaders anyway.
> - */
> if (unlikely(vmw_user_shader_size == 0))
> vmw_user_shader_size =
> - ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
> + ttm_round_pot(sizeof(struct vmw_user_shader)) +
> + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
>
> ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
> vmw_user_shader_size,
> @@ -792,7 +789,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
> }
>
> if (handle)
> - *handle = ushader->base.hash.key;
> + *handle = ushader->base.handle;
> out_err:
> vmw_resource_unreference(&res);
> out:
> @@ -814,13 +811,10 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
> };
> int ret;
>
> - /*
> - * Approximate idr memory usage with 128 bytes. It will be limited
> - * by maximum number_of shaders anyway.
> - */
> if (unlikely(vmw_shader_size == 0))
> vmw_shader_size =
> - ttm_round_pot(sizeof(struct vmw_shader)) + 128;
> + ttm_round_pot(sizeof(struct vmw_shader)) +
> + VMW_IDA_ACC_SIZE;
>
> ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
> vmw_shader_size,
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
> index 3bd60f7a9d6d..6a6865384e91 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
> @@ -159,7 +159,8 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
>
> alloc_size = offsetof(struct vmw_user_simple_resource, simple) +
> func->size;
> - account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE;
> + account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE +
> + TTM_OBJ_EXTRA_SIZE;
>
> ret = ttm_read_lock(&dev_priv->reservation_sem, true);
> if (ret)
> @@ -208,7 +209,7 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
> goto out_err;
> }
>
> - func->set_arg_handle(data, usimple->base.hash.key);
> + func->set_arg_handle(data, usimple->base.handle);
> out_err:
> vmw_resource_unreference(&res);
> out_ret:
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
> index bd4cf995089c..a67c8f9af129 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
> @@ -731,7 +731,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
>
> if (unlikely(vmw_user_surface_size == 0))
> vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
> - 128;
> + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
>
> num_sizes = 0;
> for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
> @@ -744,7 +744,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
> num_sizes == 0)
> return -EINVAL;
>
> - size = vmw_user_surface_size + 128 +
> + size = vmw_user_surface_size +
> ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
> ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
>
> @@ -886,7 +886,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
> goto out_unlock;
> }
>
> - rep->sid = user_srf->prime.base.hash.key;
> + rep->sid = user_srf->prime.base.handle;
> vmw_resource_unreference(&res);
>
> ttm_read_unlock(&dev_priv->reservation_sem);
> @@ -1024,7 +1024,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
> if (unlikely(ret != 0)) {
> DRM_ERROR("copy_to_user failed %p %u\n",
> user_sizes, srf->num_sizes);
> - ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE);
> + ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE);
> ret = -EFAULT;
> }
>
> @@ -1609,9 +1609,9 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
>
> if (unlikely(vmw_user_surface_size == 0))
> vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
> - 128;
> + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
>
> - size = vmw_user_surface_size + 128;
> + size = vmw_user_surface_size;
>
> /* Define a surface based on the parameters. */
> ret = vmw_surface_gb_priv_define(dev,
> @@ -1683,7 +1683,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
> goto out_unlock;
> }
>
> - rep->handle = user_srf->prime.base.hash.key;
> + rep->handle = user_srf->prime.base.handle;
> rep->backup_size = res->backup_size;
> if (res->backup) {
> rep->buffer_map_handle =
> @@ -1745,7 +1745,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
> if (unlikely(ret != 0)) {
> DRM_ERROR("Could not add a reference to a GB surface "
> "backup buffer.\n");
> - (void) ttm_ref_object_base_unref(tfile, base->hash.key,
> + (void) ttm_ref_object_base_unref(tfile, base->handle,
> TTM_REF_USAGE);
> goto out_bad_resource;
> }
> @@ -1759,7 +1759,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
> rep->creq.base.array_size = srf->array_size;
> rep->creq.base.buffer_handle = backup_handle;
> rep->creq.base.base_size = srf->base_size;
> - rep->crep.handle = user_srf->prime.base.hash.key;
> + rep->crep.handle = user_srf->prime.base.handle;
> rep->crep.backup_size = srf->res.backup_size;
> rep->crep.buffer_handle = backup_handle;
> rep->crep.buffer_map_handle =
> diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
> index a98bfeb4239e..6204acc2ebf4 100644
> --- a/include/drm/ttm/ttm_object.h
> +++ b/include/drm/ttm/ttm_object.h
> @@ -125,14 +125,14 @@ struct ttm_object_device;
>
> struct ttm_base_object {
> struct rcu_head rhead;
> - struct drm_hash_item hash;
> - enum ttm_object_type object_type;
> - bool shareable;
> struct ttm_object_file *tfile;
> struct kref refcount;
> void (*refcount_release) (struct ttm_base_object **base);
> void (*ref_obj_release) (struct ttm_base_object *base,
> enum ttm_ref_type ref_type);
> + u32 handle;
> + enum ttm_object_type object_type;
> + u32 shareable;
> };
>
>
> @@ -351,4 +351,11 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
>
> #define ttm_prime_object_kfree(__obj, __prime) \
> kfree_rcu(__obj, __prime.base.rhead)
> +
> +/*
> + * Extra memory required by the base object's idr storage, which is allocated
> + * separately from the base object itself. We estimate an on-average 128 bytes
> + * per idr.
> + */
> +#define TTM_OBJ_EXTRA_SIZE 128
> #endif
More information about the dri-devel
mailing list