[Intel-gfx] [PATCH v5 3/5] drm/gem: Add drm_gem_object_funcs
Daniel Vetter
daniel at ffwll.ch
Thu Nov 1 08:36:36 UTC 2018
On Thu, Nov 01, 2018 at 12:37:14AM +0100, Noralf Trønnes wrote:
>
> Den 17.10.2018 15.04, skrev Noralf Trønnes:
> > This adds an optional function table on GEM objects.
> > The main benefit is for drivers that support more than one type of
> > memory (shmem,vram,cma) for their buffers depending on the hardware it
> > runs on. With the callbacks attached to the GEM object itself, it is
> > easier to have core helpers for the the various buffer types. The driver
> > only has to make the decision about buffer type on GEM object creation
> > and all other callbacks can be handled by the chosen helper.
> >
> > drm_driver->gem_prime_res_obj has not been added since there's a todo to
> > put a reservation_object into drm_gem_object.
> >
> > v2: Drop drm_gem_object_funcs->prime_mmap in favour of
> > drm_gem_prime_mmap() (Daniel Vetter)
> >
> > v1:
> > - drm_gem_object_funcs.map -> .prime_map let it only do PRIME mmap like
> > the function it superseeds (Daniel Vetter)
> > - Flip around the if ladders and make obj->funcs the first choice
> > highlighting the fact that this the new default way of doing it
> > (Daniel Vetter)
> >
> > Signed-off-by: Noralf Trønnes <noralf at tronnes.org>
> > Acked-by: Daniel Vetter <daniel.vetter at ffwll.ch>
> > ---
>
> Now that the shmem helper has now been postponed to the future I'm
> thinking about doing some prep work in tinydrm to loosen the coupling to
> the cma helper (longterm goal is try and get rid of tinydrm.ko altogether).
>
> First step could be to add a drm_driver->gem_create_object function for
> the cma helper that gives the default gem functions:
>
> static const struct drm_gem_object_funcs drm_cma_gem_funcs = {
> .free = drm_gem_cma_free_object,
> .print_info = drm_gem_cma_print_info,
> .get_sg_table = drm_gem_cma_prime_get_sg_table,
> .vmap = drm_gem_cma_prime_vmap,
> .vm_ops = &drm_gem_cma_vm_ops,
> };
>
> struct drm_gem_object *
> drm_cma_gem_create_object_default_funcs(struct drm_device *dev, size_t size)
> {
> struct drm_gem_cma_object *cma_obj;
>
> cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
> if (!cma_obj)
> return NULL;
>
> cma_obj->base.funcs = &drm_cma_gem_funcs;
>
> return cma_obj;
> }
>
> tinydrm drivers relies on the buffer always having a virtual address, so
> for imported buffers there's currently a custom .gem_prime_import_sg_table
> that vmaps the buffer. In order to use the default cma helper gem
> functions I plan to do the vmapping on framebuffer creation instead.
>
> I could create a pair of functions (fb_create/destroy) in the cma helper
> to achieve that:
>
> struct drm_framebuffer *
> drm_cma_fb_create_vmap_with_funcs(struct drm_device *dev, struct drm_file
> *file,
> const struct drm_mode_fb_cmd2 *mode_cmd,
> const struct drm_framebuffer_funcs *funcs)
> {
> struct drm_gem_cma_object *cma;
> struct drm_framebuffer *fb;
> struct drm_gem_object *gem;
>
> fb = drm_gem_fb_create_with_funcs(dev, file, mode_cmd, funcs);
> if (IS_ERR(fb))
> return fb;
>
> gem = drm_gem_fb_get_obj(fb, 0);
> cma = to_drm_gem_cma_obj(gem);
>
> if (gem->import_attach)
> cma->vaddr = dma_buf_vmap(gem->import_attach->dmabuf);
>
> if (!cma->vaddr) {
> DRM_DEV_DEBUG(dev->dev, "Failed to vmap buffer\n");
> drm_gem_fb_destroy(fb);
> return ERR_PTR(-ENOMEM);
> }
>
> return fb;
> }
>
> void drm_cma_fb_destroy_vunmap(struct drm_framebuffer *fb)
> {
> struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0);
> struct drm_gem_cma_object *cma = to_drm_gem_cma_obj(gem);
>
> if (gem->import_attach) {
> dma_buf_vmap(gem->import_attach->dmabuf, cma->vaddr);
> cma->vaddr = NULL;
> }
> drm_gem_fb_destroy(fb);
> }
>
>
> This approach means that I would have to do the same for the future shmem
> helper as well. If I add a vaddr to the gem object, I could create a
> common pair of functions in the gem fb helper that can be used regardless
> of the underlying buffer:
>
> struct drm_gem_object {
> void *vaddr;
> };
>
> struct drm_framebuffer *
> drm_gem_fb_create_vmap_with_funcs(struct drm_device *dev, struct drm_file
> *file,
> const struct drm_mode_fb_cmd2 *mode_cmd,
> const struct drm_framebuffer_funcs *funcs)
> {
> struct drm_framebuffer *fb;
> struct drm_gem_object *obj;
> void *vaddr;
>
> fb = drm_gem_fb_create_with_funcs(dev, file, mode_cmd, funcs);
> if (IS_ERR(fb))
> return fb;
>
> if (obj->import_attach)
> vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
> else
> vaddr = drm_gem_vmap(obj);
>
> if (!vaddr || IS_ERR(vaddr)) {
> DRM_DEV_DEBUG(dev->dev, "Failed to vmap buffer\n");
> drm_gem_fb_destroy(fb);
> return ERR_PTR(-ENOMEM);
> }
>
> obj->vaddr = vaddr;
>
> return fb;
> }
>
> void drm_gem_fb_destroy_vunmap(struct drm_framebuffer *fb)
> {
> struct drm_gem_object *obj = drm_gem_fb_get_obj(fb, 0);
>
> if (obj->import_attach) {
> dma_buf_vmap(obj->import_attach->dmabuf, obj->vaddr);
> obj->vaddr = NULL;
> } else {
> drm_gem_vunmap(obj, obj->vaddr);
> }
> drm_gem_fb_destroy(fb);
> }
>
>
> It is also possible to take this a bit further and move the dma_buf_vmap()
> call into drm_gem_vmap() and let it provide an address for imported
> buffers as well:
>
> struct drm_gem_object {
> struct mutex vmap_lock;
> unsigned int vmap_use_count;
> void *vaddr;
> };
>
> void *drm_gem_vmap(struct drm_gem_object *obj)
> {
> void *vaddr;
> int ret;
>
> ret = mutex_lock_interruptible(&obj->vmap_lock);
> if (ret)
> return ERR_PTR(ret);
>
> if (obj->vmap_use_count++ > 0) {
> vaddr = obj->vaddr;
> goto out_unlock;
> }
>
> if (obj->import_attach)
> vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
> else if (obj->funcs && obj->funcs->vmap)
> vaddr = obj->funcs->vmap(obj);
> else if (obj->dev->driver->gem_prime_vmap)
> vaddr = obj->dev->driver->gem_prime_vmap(obj);
> else
> vaddr = ERR_PTR(-EOPNOTSUPP);
>
> if (!vaddr)
> vaddr = ERR_PTR(-ENOMEM);
> if (IS_ERR(vaddr)) {
> obj->vmap_use_count = 0;
> goto out_unlock;
> }
>
> obj->vaddr = vaddr;
> out_unlock:
> mutex_unlock(&obj->vmap_lock);
>
> return vaddr;
> }
>
> void drm_gem_vunmap(struct drm_gem_object *obj)
> {
> mutex_lock(&obj->vmap_lock);
>
> if (WARN_ON_ONCE(!obj->vmap_use_count))
> goto out_unlock;
>
> if (--obj->vmap_use_count > 0)
> goto out_unlock;
>
> if (obj->import_attach)
> dma_buf_vunmap(obj->import_attach->dmabuf, obj->vaddr);
> else if (obj->funcs && obj->funcs->vunmap)
> obj->funcs->vunmap(obj, obj->vaddr);
> else if (obj->dev->driver->gem_prime_vunmap)
> obj->dev->driver->gem_prime_vunmap(obj, obj->vaddr);
> out_unlock:
> mutex_unlock(&obj->vmap_lock);
> }
>
>
> What do you think?
For the reasons you've pointed out in your other mail, vmap is
complicated. Even more complicated when you want to mix it with buffer
sharing. On reasonable modern architectures (anything ppc, x86 and arm8+)
it just amounts to getting the cache flushing right, but on older arm you
have all that fun rmk laid out with incompatible mappings and stuff.
On top we have subsystems that kinda muddle on, like spi, so no clear
indication what would be best from those either.
I honestly have no idea what's the best approach.
Your draft should work, but we might encode vmap expectatins that just
don't really work in general.
-Daniel
>
> Noralf.
>
> > drivers/gpu/drm/drm_client.c | 12 ++--
> > drivers/gpu/drm/drm_gem.c | 109 ++++++++++++++++++++++++++++++++---
> > drivers/gpu/drm/drm_prime.c | 34 ++++++-----
> > include/drm/drm_gem.h | 131 +++++++++++++++++++++++++++++++++++++++++++
> > 4 files changed, 252 insertions(+), 34 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
> > index 17d9a64e885e..eca7331762e4 100644
> > --- a/drivers/gpu/drm/drm_client.c
> > +++ b/drivers/gpu/drm/drm_client.c
> > @@ -80,8 +80,7 @@ int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
> > {
> > int ret;
> > - if (!drm_core_check_feature(dev, DRIVER_MODESET) ||
> > - !dev->driver->dumb_create || !dev->driver->gem_prime_vmap)
> > + if (!drm_core_check_feature(dev, DRIVER_MODESET) || !dev->driver->dumb_create)
> > return -EOPNOTSUPP;
> > if (funcs && !try_module_get(funcs->owner))
> > @@ -212,8 +211,7 @@ static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
> > {
> > struct drm_device *dev = buffer->client->dev;
> > - if (buffer->vaddr && dev->driver->gem_prime_vunmap)
> > - dev->driver->gem_prime_vunmap(buffer->gem, buffer->vaddr);
> > + drm_gem_vunmap(buffer->gem, buffer->vaddr);
> > if (buffer->gem)
> > drm_gem_object_put_unlocked(buffer->gem);
> > @@ -266,9 +264,9 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
> > * fd_install step out of the driver backend hooks, to make that
> > * final step optional for internal users.
> > */
> > - vaddr = dev->driver->gem_prime_vmap(obj);
> > - if (!vaddr) {
> > - ret = -ENOMEM;
> > + vaddr = drm_gem_vmap(obj);
> > + if (IS_ERR(vaddr)) {
> > + ret = PTR_ERR(vaddr);
> > goto err_delete;
> > }
> > diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
> > index 512078ebd97b..8b55ece97967 100644
> > --- a/drivers/gpu/drm/drm_gem.c
> > +++ b/drivers/gpu/drm/drm_gem.c
> > @@ -257,7 +257,9 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
> > struct drm_gem_object *obj = ptr;
> > struct drm_device *dev = obj->dev;
> > - if (dev->driver->gem_close_object)
> > + if (obj->funcs && obj->funcs->close)
> > + obj->funcs->close(obj, file_priv);
> > + else if (dev->driver->gem_close_object)
> > dev->driver->gem_close_object(obj, file_priv);
> > if (drm_core_check_feature(dev, DRIVER_PRIME))
> > @@ -410,7 +412,11 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
> > if (ret)
> > goto err_remove;
> > - if (dev->driver->gem_open_object) {
> > + if (obj->funcs && obj->funcs->open) {
> > + ret = obj->funcs->open(obj, file_priv);
> > + if (ret)
> > + goto err_revoke;
> > + } else if (dev->driver->gem_open_object) {
> > ret = dev->driver->gem_open_object(obj, file_priv);
> > if (ret)
> > goto err_revoke;
> > @@ -835,7 +841,9 @@ drm_gem_object_free(struct kref *kref)
> > container_of(kref, struct drm_gem_object, refcount);
> > struct drm_device *dev = obj->dev;
> > - if (dev->driver->gem_free_object_unlocked) {
> > + if (obj->funcs) {
> > + obj->funcs->free(obj);
> > + } else if (dev->driver->gem_free_object_unlocked) {
> > dev->driver->gem_free_object_unlocked(obj);
> > } else if (dev->driver->gem_free_object) {
> > WARN_ON(!mutex_is_locked(&dev->struct_mutex));
> > @@ -864,13 +872,13 @@ drm_gem_object_put_unlocked(struct drm_gem_object *obj)
> > dev = obj->dev;
> > - if (dev->driver->gem_free_object_unlocked) {
> > - kref_put(&obj->refcount, drm_gem_object_free);
> > - } else {
> > + if (dev->driver->gem_free_object) {
> > might_lock(&dev->struct_mutex);
> > if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
> > &dev->struct_mutex))
> > mutex_unlock(&dev->struct_mutex);
> > + } else {
> > + kref_put(&obj->refcount, drm_gem_object_free);
> > }
> > }
> > EXPORT_SYMBOL(drm_gem_object_put_unlocked);
> > @@ -960,11 +968,14 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
> > if (obj_size < vma->vm_end - vma->vm_start)
> > return -EINVAL;
> > - if (!dev->driver->gem_vm_ops)
> > + if (obj->funcs && obj->funcs->vm_ops)
> > + vma->vm_ops = obj->funcs->vm_ops;
> > + else if (dev->driver->gem_vm_ops)
> > + vma->vm_ops = dev->driver->gem_vm_ops;
> > + else
> > return -EINVAL;
> > vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
> > - vma->vm_ops = dev->driver->gem_vm_ops;
> > vma->vm_private_data = obj;
> > vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
> > vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
> > @@ -1066,6 +1077,86 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
> > drm_printf_indent(p, indent, "imported=%s\n",
> > obj->import_attach ? "yes" : "no");
> > - if (obj->dev->driver->gem_print_info)
> > + if (obj->funcs && obj->funcs->print_info)
> > + obj->funcs->print_info(p, indent, obj);
> > + else if (obj->dev->driver->gem_print_info)
> > obj->dev->driver->gem_print_info(p, indent, obj);
> > }
> > +
> > +/**
> > + * drm_gem_pin - Pin backing buffer in memory
> > + * @obj: GEM object
> > + *
> > + * Make sure the backing buffer is pinned in memory.
> > + *
> > + * Returns:
> > + * 0 on success or a negative error code on failure.
> > + */
> > +int drm_gem_pin(struct drm_gem_object *obj)
> > +{
> > + if (obj->funcs && obj->funcs->pin)
> > + return obj->funcs->pin(obj);
> > + else if (obj->dev->driver->gem_prime_pin)
> > + return obj->dev->driver->gem_prime_pin(obj);
> > + else
> > + return 0;
> > +}
> > +EXPORT_SYMBOL(drm_gem_pin);
> > +
> > +/**
> > + * drm_gem_unpin - Unpin backing buffer from memory
> > + * @obj: GEM object
> > + *
> > + * Relax the requirement that the backing buffer is pinned in memory.
> > + */
> > +void drm_gem_unpin(struct drm_gem_object *obj)
> > +{
> > + if (obj->funcs && obj->funcs->unpin)
> > + obj->funcs->unpin(obj);
> > + else if (obj->dev->driver->gem_prime_unpin)
> > + obj->dev->driver->gem_prime_unpin(obj);
> > +}
> > +EXPORT_SYMBOL(drm_gem_unpin);
> > +
> > +/**
> > + * drm_gem_vmap - Map buffer into kernel virtual address space
> > + * @obj: GEM object
> > + *
> > + * Returns:
> > + * A virtual pointer to a newly created GEM object or an ERR_PTR-encoded negative
> > + * error code on failure.
> > + */
> > +void *drm_gem_vmap(struct drm_gem_object *obj)
> > +{
> > + void *vaddr;
> > +
> > + if (obj->funcs && obj->funcs->vmap)
> > + vaddr = obj->funcs->vmap(obj);
> > + else if (obj->dev->driver->gem_prime_vmap)
> > + vaddr = obj->dev->driver->gem_prime_vmap(obj);
> > + else
> > + vaddr = ERR_PTR(-EOPNOTSUPP);
> > +
> > + if (!vaddr)
> > + vaddr = ERR_PTR(-ENOMEM);
> > +
> > + return vaddr;
> > +}
> > +EXPORT_SYMBOL(drm_gem_vmap);
> > +
> > +/**
> > + * drm_gem_vunmap - Remove buffer mapping from kernel virtual address space
> > + * @obj: GEM object
> > + * @vaddr: Virtual address (can be NULL)
> > + */
> > +void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
> > +{
> > + if (!vaddr)
> > + return;
> > +
> > + if (obj->funcs && obj->funcs->vunmap)
> > + obj->funcs->vunmap(obj, vaddr);
> > + else if (obj->dev->driver->gem_prime_vunmap)
> > + obj->dev->driver->gem_prime_vunmap(obj, vaddr);
> > +}
> > +EXPORT_SYMBOL(drm_gem_vunmap);
> > diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
> > index 42abf98c1d4a..e0ab5213efa7 100644
> > --- a/drivers/gpu/drm/drm_prime.c
> > +++ b/drivers/gpu/drm/drm_prime.c
> > @@ -199,7 +199,6 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
> > {
> > struct drm_prime_attachment *prime_attach;
> > struct drm_gem_object *obj = dma_buf->priv;
> > - struct drm_device *dev = obj->dev;
> > prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
> > if (!prime_attach)
> > @@ -208,10 +207,7 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
> > prime_attach->dir = DMA_NONE;
> > attach->priv = prime_attach;
> > - if (!dev->driver->gem_prime_pin)
> > - return 0;
> > -
> > - return dev->driver->gem_prime_pin(obj);
> > + return drm_gem_pin(obj);
> > }
> > EXPORT_SYMBOL(drm_gem_map_attach);
> > @@ -228,7 +224,6 @@ void drm_gem_map_detach(struct dma_buf *dma_buf,
> > {
> > struct drm_prime_attachment *prime_attach = attach->priv;
> > struct drm_gem_object *obj = dma_buf->priv;
> > - struct drm_device *dev = obj->dev;
> > if (prime_attach) {
> > struct sg_table *sgt = prime_attach->sgt;
> > @@ -247,8 +242,7 @@ void drm_gem_map_detach(struct dma_buf *dma_buf,
> > attach->priv = NULL;
> > }
> > - if (dev->driver->gem_prime_unpin)
> > - dev->driver->gem_prime_unpin(obj);
> > + drm_gem_unpin(obj);
> > }
> > EXPORT_SYMBOL(drm_gem_map_detach);
> > @@ -310,7 +304,10 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
> > if (WARN_ON(prime_attach->dir != DMA_NONE))
> > return ERR_PTR(-EBUSY);
> > - sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
> > + if (obj->funcs)
> > + sgt = obj->funcs->get_sg_table(obj);
> > + else
> > + sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
> > if (!IS_ERR(sgt)) {
> > if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
> > @@ -406,12 +403,13 @@ EXPORT_SYMBOL(drm_gem_dmabuf_release);
> > void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
> > {
> > struct drm_gem_object *obj = dma_buf->priv;
> > - struct drm_device *dev = obj->dev;
> > + void *vaddr;
> > - if (dev->driver->gem_prime_vmap)
> > - return dev->driver->gem_prime_vmap(obj);
> > - else
> > - return NULL;
> > + vaddr = drm_gem_vmap(obj);
> > + if (IS_ERR(vaddr))
> > + vaddr = NULL;
> > +
> > + return vaddr;
> > }
> > EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
> > @@ -426,10 +424,8 @@ EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
> > void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
> > {
> > struct drm_gem_object *obj = dma_buf->priv;
> > - struct drm_device *dev = obj->dev;
> > - if (dev->driver->gem_prime_vunmap)
> > - dev->driver->gem_prime_vunmap(obj, vaddr);
> > + drm_gem_vunmap(obj, vaddr);
> > }
> > EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
> > @@ -529,7 +525,9 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
> > return dmabuf;
> > }
> > - if (dev->driver->gem_prime_export)
> > + if (obj->funcs && obj->funcs->export)
> > + dmabuf = obj->funcs->export(obj, flags);
> > + else if (dev->driver->gem_prime_export)
> > dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
> > else
> > dmabuf = drm_gem_prime_export(dev, obj, flags);
> > diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
> > index 3583b98a1718..f466ce5bde0e 100644
> > --- a/include/drm/drm_gem.h
> > +++ b/include/drm/drm_gem.h
> > @@ -38,6 +38,121 @@
> > #include <drm/drm_vma_manager.h>
> > +struct drm_gem_object;
> > +
> > +/**
> > + * struct drm_gem_object_funcs - GEM object functions
> > + */
> > +struct drm_gem_object_funcs {
> > + /**
> > + * @free:
> > + *
> > + * Deconstructor for drm_gem_objects.
> > + *
> > + * This callback is mandatory.
> > + */
> > + void (*free)(struct drm_gem_object *obj);
> > +
> > + /**
> > + * @open:
> > + *
> > + * Called upon GEM handle creation.
> > + *
> > + * This callback is optional.
> > + */
> > + int (*open)(struct drm_gem_object *obj, struct drm_file *file);
> > +
> > + /**
> > + * @close:
> > + *
> > + * Called upon GEM handle release.
> > + *
> > + * This callback is optional.
> > + */
> > + void (*close)(struct drm_gem_object *obj, struct drm_file *file);
> > +
> > + /**
> > + * @print_info:
> > + *
> > + * If driver subclasses struct &drm_gem_object, it can implement this
> > + * optional hook for printing additional driver specific info.
> > + *
> > + * drm_printf_indent() should be used in the callback passing it the
> > + * indent argument.
> > + *
> > + * This callback is called from drm_gem_print_info().
> > + *
> > + * This callback is optional.
> > + */
> > + void (*print_info)(struct drm_printer *p, unsigned int indent,
> > + const struct drm_gem_object *obj);
> > +
> > + /**
> > + * @export:
> > + *
> > + * Export backing buffer as a &dma_buf.
> > + * If this is not set drm_gem_prime_export() is used.
> > + *
> > + * This callback is optional.
> > + */
> > + struct dma_buf *(*export)(struct drm_gem_object *obj, int flags);
> > +
> > + /**
> > + * @pin:
> > + *
> > + * Pin backing buffer in memory.
> > + *
> > + * This callback is optional.
> > + */
> > + int (*pin)(struct drm_gem_object *obj);
> > +
> > + /**
> > + * @unpin:
> > + *
> > + * Unpin backing buffer.
> > + *
> > + * This callback is optional.
> > + */
> > + void (*unpin)(struct drm_gem_object *obj);
> > +
> > + /**
> > + * @get_sg_table:
> > + *
> > + * Returns a Scatter-Gather table representation of the buffer.
> > + * Used when exporting a buffer.
> > + *
> > + * This callback is mandatory if buffer export is supported.
> > + */
> > + struct sg_table *(*get_sg_table)(struct drm_gem_object *obj);
> > +
> > + /**
> > + * @vmap:
> > + *
> > + * Returns a virtual address for the buffer.
> > + *
> > + * This callback is optional.
> > + */
> > + void *(*vmap)(struct drm_gem_object *obj);
> > +
> > + /**
> > + * @vunmap:
> > + *
> > + * Releases the the address previously returned by @vmap.
> > + *
> > + * This callback is optional.
> > + */
> > + void (*vunmap)(struct drm_gem_object *obj, void *vaddr);
> > +
> > + /**
> > + * @vm_ops:
> > + *
> > + * Virtual memory operations used with mmap.
> > + *
> > + * This is optional but necessary for mmap support.
> > + */
> > + const struct vm_operations_struct *vm_ops;
> > +};
> > +
> > /**
> > * struct drm_gem_object - GEM buffer object
> > *
> > @@ -146,6 +261,17 @@ struct drm_gem_object {
> > * simply leave it as NULL.
> > */
> > struct dma_buf_attachment *import_attach;
> > +
> > + /**
> > + * @funcs:
> > + *
> > + * Optional GEM object functions. If this is set, it will be used instead of the
> > + * corresponding &drm_driver GEM callbacks.
> > + *
> > + * New drivers should use this.
> > + *
> > + */
> > + const struct drm_gem_object_funcs *funcs;
> > };
> > /**
> > @@ -293,4 +419,9 @@ int drm_gem_dumb_destroy(struct drm_file *file,
> > struct drm_device *dev,
> > uint32_t handle);
> > +int drm_gem_pin(struct drm_gem_object *obj);
> > +void drm_gem_unpin(struct drm_gem_object *obj);
> > +void *drm_gem_vmap(struct drm_gem_object *obj);
> > +void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr);
> > +
> > #endif /* __DRM_GEM_H__ */
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
More information about the Intel-gfx
mailing list