<div dir="ltr"><div class="gmail_quote"><div dir="ltr">On Mon, Jun 20, 2016 at 1:07 PM Chris Wilson <<a href="mailto:chris@chris-wilson.co.uk">chris@chris-wilson.co.uk</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">Enable the standard GEM dma-buf interface provided by the DRM core, but<br>
only for exporting the VGEM object. This allows passing around the VGEM<br>
objects created from the dumb interface and using them as sources<br>
elsewhere. Creating a VGEM object for a foriegn handle is not supported.<br>
<br>
v2: With additional completeness.<br>
v3: Need to clear the CPU cache upon exporting the dma-addresses.<br>
<br>
Testcase: igt/vgem_basic/dmabuf-*<br>
Testcase: igt/prime_vgem<br>
Signed-off-by: Chris Wilson <<a href="mailto:chris@chris-wilson.co.uk" target="_blank">chris@chris-wilson.co.uk</a>><br>
Cc: Sean Paul <<a href="mailto:seanpaul@chromium.org" target="_blank">seanpaul@chromium.org</a>><br>
Cc: Zach Reizner <<a href="mailto:zachr@google.com" target="_blank">zachr@google.com</a>><br>
---<br>
 drivers/gpu/drm/vgem/vgem_drv.c | 112 +++++++++++++++++++++++++++++++++++++++-<br>
 1 file changed, 111 insertions(+), 1 deletion(-)<br>
<br>
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c<br>
index e1a697d0662f..db48e837992d 100644<br>
--- a/drivers/gpu/drm/vgem/vgem_drv.c<br>
+++ b/drivers/gpu/drm/vgem/vgem_drv.c<br>
@@ -193,14 +193,124 @@ static const struct file_operations vgem_driver_fops = {<br>
        .release        = drm_release,<br>
 };<br>
<br>
+static void __put_pages(struct page **pages, long n_pages)<br>
+{<br>
+       while (n_pages--)<br>
+               put_page(pages[n_pages]);<br>
+       drm_free_large(pages);<br>
+}<br>
+<br>
+static int vgem_prime_pin(struct drm_gem_object *obj)<br>
+{<br>
+       long n_pages = obj->size >> PAGE_SHIFT;<br>
+       struct page **pages;<br>
+<br>
+       /* Flush the object from the CPU cache so that importers<br>
+        * can rely on coherent indirect access via access the<br>
+        * exported dma-address.<br>
+        */<br>
+       pages = drm_gem_get_pages(obj);<br>
+       if (IS_ERR(pages))<br>
+               return PTR_ERR(pages);<br>
+<br>
+       drm_clflush_pages(pages, n_pages);<br>
+       __put_pages(pages, n_pages);<br>
+<br>
+       return 0;<br>
+}<br>
+<br>
+static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)<br>
+{<br>
+       long n_pages = obj->size >> PAGE_SHIFT;<br>
+       struct sg_table *st;<br>
+       struct page **pages;<br>
+       int ret;<br>
+<br>
+       st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);<br>
+       if (st == NULL)<br>
+               return ERR_PTR(-ENOMEM);<br>
+<br>
+       pages = drm_gem_get_pages(obj);<br>
+       if (IS_ERR(pages)) {<br>
+               ret = PTR_ERR(pages);<br>
+               goto err;<br>
+       }<br>
+<br>
+       ret = sg_alloc_table_from_pages(st, pages, n_pages,<br>
+                                       0, obj->size, GFP_KERNEL);<br>
+       __put_pages(pages, n_pages);<br>
+       if (ret)<br>
+               goto err;<br>
+<br>
+       return st;<br>
+<br>
+err:<br>
+       kfree(st);<br>
+       return ERR_PTR(ret);<br>
+}<br>
+<br>
+static void *vgem_prime_vmap(struct drm_gem_object *obj)<br>
+{<br>
+       long n_pages = obj->size >> PAGE_SHIFT;<br>
+       struct page **pages;<br>
+       void *addr;<br>
+<br>
+       pages = drm_gem_get_pages(obj);<br>
+       if (IS_ERR(pages))<br>
+               return NULL;<br>
+<br>
+       addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL_IO));<br>
+       __put_pages(pages, n_pages);<br>
+<br>
+       return addr;<br>
+}<br>
+<br>
+static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)<br>
+{<br>
+       vunmap(vaddr);<br>
+}<br>
+<br>
+static int vgem_prime_mmap(struct drm_gem_object *obj,<br>
+                          struct vm_area_struct *vma)<br>
+{<br>
+       int ret;<br>
+<br>
+       if (obj->size < vma->vm_end - vma->vm_start)<br>
+               return -EINVAL;<br>
+<br>
+       if (!obj->filp)<br>
+               return -ENODEV;<br>
+<br>
+       ret = obj->filp->f_op->mmap(obj->filp, vma);<br>
+       if (ret)<br>
+               return ret;<br>
+<br>
+       fput(vma->vm_file);<br>
+       vma->vm_file = get_file(obj->filp);<br>
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;<br>
+       vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));<br>
+<br>
+       return 0;<br>
+}<br>
+<br>
 static struct drm_driver vgem_driver = {<br>
-       .driver_features                = DRIVER_GEM,<br>
+       .driver_features                = DRIVER_GEM | DRIVER_PRIME,<br>
        .gem_free_object_unlocked       = vgem_gem_free_object,<br>
        .gem_vm_ops                     = &vgem_gem_vm_ops,<br>
        .ioctls                         = vgem_ioctls,<br>
        .fops                           = &vgem_driver_fops,<br>
+<br>
        .dumb_create                    = vgem_gem_dumb_create,<br>
        .dumb_map_offset                = vgem_gem_dumb_map,<br>
+<br>
+       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,<br>
+       .gem_prime_pin = vgem_prime_pin,<br>
+       .gem_prime_export = drm_gem_prime_export,<br>
+       .gem_prime_get_sg_table = vgem_prime_get_sg_table,<br>
+       .gem_prime_vmap = vgem_prime_vmap,<br>
+       .gem_prime_vunmap = vgem_prime_vunmap,<br>
+       .gem_prime_mmap = vgem_prime_mmap,<br>
+<br>
        .name   = DRIVER_NAME,<br>
        .desc   = DRIVER_DESC,<br>
        .date   = DRIVER_DATE,<br>
--<br>
2.8.1<br>
<br></blockquote><div><br></div><div>Acked-by: Zach Reizner <<a href="mailto:zachr@google.com">zachr@google.com</a>> </div></div></div>