[Intel-gfx] [PATCH 02/24] drm/i915: Pin backing pages whilst exporting through a dmabuf vmap
Ben Widawsky
ben at bwidawsk.net
Fri Sep 7 00:55:41 CEST 2012
On Tue, 4 Sep 2012 21:02:54 +0100
Chris Wilson <chris at chris-wilson.co.uk> wrote:
> We need to refcount our pages in order to prevent reaping them at
> inopportune times, such as when they currently vmapped or exported to
> another driver. However, we also wish to keep the lazy deallocation of
> our pages so we need to take a pin/unpinned approach rather than a
> simple refcount.
I've not followed the dmabuf development much but is there no interface
to map partial objects, ie. have some pages of an object pinned, but not
all?
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
With comment below addressed or not it's:
Reviewed-by: Ben Widawsky <ben at bwidawsk.net>
> ---
> drivers/gpu/drm/i915/i915_drv.h | 12 ++++++++++++
> drivers/gpu/drm/i915/i915_gem.c | 11 +++++++++--
> drivers/gpu/drm/i915/i915_gem_dmabuf.c | 8 ++++++--
> 3 files changed, 27 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index f180874..0747472 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -994,6 +994,7 @@ struct drm_i915_gem_object {
> unsigned int has_global_gtt_mapping:1;
>
> struct page **pages;
> + int pages_pin_count;
>
> /**
> * DMAR support
> @@ -1327,6 +1328,17 @@ void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
> void i915_gem_lastclose(struct drm_device *dev);
>
> int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
> +static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
> +{
> + BUG_ON(obj->pages == NULL);
> + obj->pages_pin_count++;
> +}
> +static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
> +{
> + BUG_ON(obj->pages_pin_count == 0);
> + obj->pages_pin_count--;
> +}
> +
Big fan of BUG_ON(!mutex_is_locked()) here.
> int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
> int i915_gem_object_sync(struct drm_i915_gem_object *obj,
> struct intel_ring_buffer *to);
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 66fbd9f..aa088ef 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -1699,6 +1699,9 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
>
> BUG_ON(obj->gtt_space);
>
> + if (obj->pages_pin_count)
> + return -EBUSY;
> +
> ops->put_pages(obj);
>
> list_del(&obj->gtt_list);
> @@ -1830,6 +1833,8 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
> if (obj->sg_table || obj->pages)
> return 0;
>
> + BUG_ON(obj->pages_pin_count);
> +
> ret = ops->get_pages(obj);
> if (ret)
> return ret;
> @@ -3736,6 +3741,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
> dev_priv->mm.interruptible = was_interruptible;
> }
>
> + obj->pages_pin_count = 0;
> i915_gem_object_put_pages(obj);
> i915_gem_object_free_mmap_offset(obj);
>
> @@ -4395,9 +4401,10 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
>
> cnt = 0;
> list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
> - cnt += obj->base.size >> PAGE_SHIFT;
> + if (obj->pages_pin_count == 0)
> + cnt += obj->base.size >> PAGE_SHIFT;
> list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
> - if (obj->pin_count == 0)
> + if (obj->pin_count == 0 && obj->pages_pin_count == 0)
> cnt += obj->base.size >> PAGE_SHIFT;
>
> mutex_unlock(&dev->struct_mutex);
> diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
> index e4f1141..eca4726 100644
> --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
> +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
> @@ -50,6 +50,8 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
> /* link the pages into an SG then map the sg */
> sg = drm_prime_pages_to_sg(obj->pages, npages);
> nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
> + i915_gem_object_pin_pages(obj);
> +
> out:
> mutex_unlock(&dev->struct_mutex);
> return sg;
> @@ -102,6 +104,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
> }
>
> obj->vmapping_count = 1;
> + i915_gem_object_pin_pages(obj);
> out_unlock:
> mutex_unlock(&dev->struct_mutex);
> return obj->dma_buf_vmapping;
> @@ -117,10 +120,11 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
> if (ret)
> return;
>
> - --obj->vmapping_count;
> - if (obj->vmapping_count == 0) {
> + if (--obj->vmapping_count == 0) {
> vunmap(obj->dma_buf_vmapping);
> obj->dma_buf_vmapping = NULL;
> +
> + i915_gem_object_unpin_pages(obj);
> }
> mutex_unlock(&dev->struct_mutex);
> }
--
Ben Widawsky, Intel Open Source Technology Center
More information about the Intel-gfx
mailing list