<div dir="ltr"><div class="gmail_quote"><div dir="ltr">On Mon, Jun 20, 2016 at 1:07 PM Chris Wilson <<a href="mailto:chris@chris-wilson.co.uk">chris@chris-wilson.co.uk</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">Enable the standard GEM dma-buf interface provided by the DRM core, but<br>
only for exporting the VGEM object. This allows passing around the VGEM<br>
objects created from the dumb interface and using them as sources<br>
elsewhere. Creating a VGEM object for a foriegn handle is not supported.<br>
<br>
v2: With additional completeness.<br>
v3: Need to clear the CPU cache upon exporting the dma-addresses.<br>
<br>
Testcase: igt/vgem_basic/dmabuf-*<br>
Testcase: igt/prime_vgem<br>
Signed-off-by: Chris Wilson <<a href="mailto:chris@chris-wilson.co.uk" target="_blank">chris@chris-wilson.co.uk</a>><br>
Cc: Sean Paul <<a href="mailto:seanpaul@chromium.org" target="_blank">seanpaul@chromium.org</a>><br>
Cc: Zach Reizner <<a href="mailto:zachr@google.com" target="_blank">zachr@google.com</a>><br>
---<br>
 drivers/gpu/drm/vgem/vgem_drv.c | 112 +++++++++++++++++++++++++++++++++++++++-<br>
 1 file changed, 111 insertions(+), 1 deletion(-)<br>
<br>
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c<br>
index e1a697d0662f..db48e837992d 100644<br>
--- a/drivers/gpu/drm/vgem/vgem_drv.c<br>
+++ b/drivers/gpu/drm/vgem/vgem_drv.c<br>
@@ -193,14 +193,124 @@ static const struct file_operations vgem_driver_fops = {<br>
    .release    = drm_release,<br>
 };<br>
<br>
+static void __put_pages(struct page **pages, long n_pages)<br>
+{<br>
+Â Â Â Â while (n_pages--)<br>
+Â Â Â Â Â Â Â Â put_page(pages[n_pages]);<br>
+Â Â Â Â drm_free_large(pages);<br>
+}<br>
+<br>
+static int vgem_prime_pin(struct drm_gem_object *obj)<br>
+{<br>
+Â Â Â Â long n_pages = obj->size >> PAGE_SHIFT;<br>
+Â Â Â Â struct page **pages;<br>
+<br>
+Â Â Â Â /* Flush the object from the CPU cache so that importers<br>
+Â Â Â Â * can rely on coherent indirect access via access the<br>
+Â Â Â Â * exported dma-address.<br>
+Â Â Â Â */<br>
+Â Â Â Â pages = drm_gem_get_pages(obj);<br>
+Â Â Â Â if (IS_ERR(pages))<br>
+Â Â Â Â Â Â Â Â return PTR_ERR(pages);<br>
+<br>
+Â Â Â Â drm_clflush_pages(pages, n_pages);<br>
+Â Â Â Â __put_pages(pages, n_pages);<br>
+<br>
+Â Â Â Â return 0;<br>
+}<br>
+<br>
+static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)<br>
+{<br>
+Â Â Â Â long n_pages = obj->size >> PAGE_SHIFT;<br>
+Â Â Â Â struct sg_table *st;<br>
+Â Â Â Â struct page **pages;<br>
+Â Â Â Â int ret;<br>
+<br>
+Â Â Â Â st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);<br>
+Â Â Â Â if (st == NULL)<br>
+Â Â Â Â Â Â Â Â return ERR_PTR(-ENOMEM);<br>
+<br>
+Â Â Â Â pages = drm_gem_get_pages(obj);<br>
+Â Â Â Â if (IS_ERR(pages)) {<br>
+Â Â Â Â Â Â Â Â ret = PTR_ERR(pages);<br>
+Â Â Â Â Â Â Â Â goto err;<br>
+Â Â Â Â }<br>
+<br>
+Â Â Â Â ret = sg_alloc_table_from_pages(st, pages, n_pages,<br>
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â 0, obj->size, GFP_KERNEL);<br>
+Â Â Â Â __put_pages(pages, n_pages);<br>
+Â Â Â Â if (ret)<br>
+Â Â Â Â Â Â Â Â goto err;<br>
+<br>
+Â Â Â Â return st;<br>
+<br>
+err:<br>
+Â Â Â Â kfree(st);<br>
+Â Â Â Â return ERR_PTR(ret);<br>
+}<br>
+<br>
+static void *vgem_prime_vmap(struct drm_gem_object *obj)<br>
+{<br>
+Â Â Â Â long n_pages = obj->size >> PAGE_SHIFT;<br>
+Â Â Â Â struct page **pages;<br>
+Â Â Â Â void *addr;<br>
+<br>
+Â Â Â Â pages = drm_gem_get_pages(obj);<br>
+Â Â Â Â if (IS_ERR(pages))<br>
+Â Â Â Â Â Â Â Â return NULL;<br>
+<br>
+Â Â Â Â addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL_IO));<br>
+Â Â Â Â __put_pages(pages, n_pages);<br>
+<br>
+Â Â Â Â return addr;<br>
+}<br>
+<br>
+static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)<br>
+{<br>
+Â Â Â Â vunmap(vaddr);<br>
+}<br>
+<br>
+static int vgem_prime_mmap(struct drm_gem_object *obj,<br>
+Â Â Â Â Â Â Â Â Â Â Â Â Â struct vm_area_struct *vma)<br>
+{<br>
+Â Â Â Â int ret;<br>
+<br>
+Â Â Â Â if (obj->size < vma->vm_end - vma->vm_start)<br>
+Â Â Â Â Â Â Â Â return -EINVAL;<br>
+<br>
+Â Â Â Â if (!obj->filp)<br>
+Â Â Â Â Â Â Â Â return -ENODEV;<br>
+<br>
+Â Â Â Â ret = obj->filp->f_op->mmap(obj->filp, vma);<br>
+Â Â Â Â if (ret)<br>
+Â Â Â Â Â Â Â Â return ret;<br>
+<br>
+Â Â Â Â fput(vma->vm_file);<br>
+Â Â Â Â vma->vm_file = get_file(obj->filp);<br>
+Â Â Â Â vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;<br>
+Â Â Â Â vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));<br>
+<br>
+Â Â Â Â return 0;<br>
+}<br>
+<br>
 static struct drm_driver vgem_driver = {<br>
-    .driver_features        = DRIVER_GEM,<br>
+    .driver_features        = DRIVER_GEM | DRIVER_PRIME,<br>
    .gem_free_object_unlocked    = vgem_gem_free_object,<br>
    .gem_vm_ops           = &vgem_gem_vm_ops,<br>
    .ioctls             = vgem_ioctls,<br>
    .fops              = &vgem_driver_fops,<br>
+<br>
    .dumb_create          = vgem_gem_dumb_create,<br>
    .dumb_map_offset        = vgem_gem_dumb_map,<br>
+<br>
+Â Â Â Â .prime_handle_to_fd = drm_gem_prime_handle_to_fd,<br>
+Â Â Â Â .gem_prime_pin = vgem_prime_pin,<br>
+Â Â Â Â .gem_prime_export = drm_gem_prime_export,<br>
+Â Â Â Â .gem_prime_get_sg_table = vgem_prime_get_sg_table,<br>
+Â Â Â Â .gem_prime_vmap = vgem_prime_vmap,<br>
+Â Â Â Â .gem_prime_vunmap = vgem_prime_vunmap,<br>
+Â Â Â Â .gem_prime_mmap = vgem_prime_mmap,<br>
+<br>
    .name  = DRIVER_NAME,<br>
    .desc  = DRIVER_DESC,<br>
    .date  = DRIVER_DATE,<br>
--<br>
2.8.1<br>
<br></blockquote><div><br></div><div>Acked-by: Zach Reizner <<a href="mailto:zachr@google.com">zachr@google.com</a>>Â </div></div></div>