[PATCH v2] drm/gem: Fix GEM handle release errors
Christian König
christian.koenig at amd.com
Sun Aug 7 16:52:52 UTC 2022
Am 03.08.22 um 10:32 schrieb Jeffy Chen:
> Currently we are assuming a one to one mapping between dmabuf and handle
> when releasing GEM handles.
>
> But that is not always true, since we would create extra handles for the
> GEM obj in cases like gem_open() and getfb{,2}().
>
> A similar issue was reported at:
> https://lore.kernel.org/all/20211105083308.392156-1-jay.xu@rock-chips.com/
>
> Another problem is that the drm_gem_remove_prime_handles() now only
> remove handle to the exported dmabuf (gem_obj->dma_buf), so the imported
> ones would leak:
> WARNING: CPU: 2 PID: 236 at drivers/gpu/drm/drm_prime.c:228 drm_prime_destroy_file_private+0x18/0x24
>
> Let's fix these by using handle to find the exact map to remove.
Well we are clearly something missing here. As far as I can see the
current code is correct.
Creating multiple GEM handles for the same DMA-buf is possible, but illegal.
In other words when a GEM handle is exported as DMA-buf and imported
again you should intentionally always get the same handle.
So this is pretty much a clear NAK to this patch since it shouldn't be
necessary or something is seriously broken somewhere else.
Regards,
Christian.
>
> Signed-off-by: Jeffy Chen <jeffy.chen at rock-chips.com>
> ---
>
> Changes in v2:
> Fix a typo of rbtree.
>
> drivers/gpu/drm/drm_gem.c | 17 +----------------
> drivers/gpu/drm/drm_internal.h | 4 ++--
> drivers/gpu/drm/drm_prime.c | 20 ++++++++++++--------
> 3 files changed, 15 insertions(+), 26 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
> index eb0c2d041f13..ed39da383570 100644
> --- a/drivers/gpu/drm/drm_gem.c
> +++ b/drivers/gpu/drm/drm_gem.c
> @@ -168,21 +168,6 @@ void drm_gem_private_object_init(struct drm_device *dev,
> }
> EXPORT_SYMBOL(drm_gem_private_object_init);
>
> -static void
> -drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
> -{
> - /*
> - * Note: obj->dma_buf can't disappear as long as we still hold a
> - * handle reference in obj->handle_count.
> - */
> - mutex_lock(&filp->prime.lock);
> - if (obj->dma_buf) {
> - drm_prime_remove_buf_handle_locked(&filp->prime,
> - obj->dma_buf);
> - }
> - mutex_unlock(&filp->prime.lock);
> -}
> -
> /**
> * drm_gem_object_handle_free - release resources bound to userspace handles
> * @obj: GEM object to clean up.
> @@ -253,7 +238,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
> if (obj->funcs->close)
> obj->funcs->close(obj, file_priv);
>
> - drm_gem_remove_prime_handles(obj, file_priv);
> + drm_prime_remove_buf_handle(&file_priv->prime, id);
> drm_vma_node_revoke(&obj->vma_node, file_priv);
>
> drm_gem_object_handle_put_unlocked(obj);
> diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
> index 1fbbc19f1ac0..7bb98e6a446d 100644
> --- a/drivers/gpu/drm/drm_internal.h
> +++ b/drivers/gpu/drm/drm_internal.h
> @@ -74,8 +74,8 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
>
> void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
> void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
> -void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
> - struct dma_buf *dma_buf);
> +void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
> + uint32_t handle);
>
> /* drm_drv.c */
> struct drm_minor *drm_minor_acquire(unsigned int minor_id);
> diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
> index e3f09f18110c..bd5366b16381 100644
> --- a/drivers/gpu/drm/drm_prime.c
> +++ b/drivers/gpu/drm/drm_prime.c
> @@ -190,29 +190,33 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
> return -ENOENT;
> }
>
> -void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
> - struct dma_buf *dma_buf)
> +void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
> + uint32_t handle)
> {
> struct rb_node *rb;
>
> - rb = prime_fpriv->dmabufs.rb_node;
> + mutex_lock(&prime_fpriv->lock);
> +
> + rb = prime_fpriv->handles.rb_node;
> while (rb) {
> struct drm_prime_member *member;
>
> - member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
> - if (member->dma_buf == dma_buf) {
> + member = rb_entry(rb, struct drm_prime_member, handle_rb);
> + if (member->handle == handle) {
> rb_erase(&member->handle_rb, &prime_fpriv->handles);
> rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
>
> - dma_buf_put(dma_buf);
> + dma_buf_put(member->dma_buf);
> kfree(member);
> - return;
> - } else if (member->dma_buf < dma_buf) {
> + break;
> + } else if (member->handle < handle) {
> rb = rb->rb_right;
> } else {
> rb = rb->rb_left;
> }
> }
> +
> + mutex_unlock(&prime_fpriv->lock);
> }
>
> void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
More information about the dri-devel
mailing list