[PATCH v2] drm/vgem: Enable dmabuf interface for export

Chris Wilson chris at chris-wilson.co.uk
Sun Jun 19 06:18:49 UTC 2016


Enable the standard GEM dma-buf interface provided by the DRM core, but
only for exporting the VGEM object. This allows passing around the VGEM
objects created from the dumb interface and using them as sources
elsewhere. Creating a VGEM object for a foriegn handle is not supported.

Testcase: igt/vgem_basic/dmabuf-mmap
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/vgem/vgem_drv.c | 101 +++++++++++++++++++++++++++++++++++++++-
 1 file changed, 100 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 4747b7f98e7a..32e2f51ed55f 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -193,14 +193,113 @@ static const struct file_operations vgem_driver_fops = {
 	.release	= drm_release,
 };
 
+static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+	struct address_space *mapping = file_inode(obj->filp)->i_mapping;
+	long n_pages = obj->size >> PAGE_SHIFT, i;
+	struct sg_table *st;
+	struct scatterlist *sg;
+
+	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (st == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	if (sg_alloc_table(st, n_pages, GFP_KERNEL)) {
+		kfree(st);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	sg = st->sgl;
+	for (i = 0; i < n_pages; i++) {
+		struct page *page = shmem_read_mapping_page(mapping, i);
+		if (IS_ERR(page)) {
+			sg_mark_end(sg);
+			goto err_unwind;
+		}
+
+		sg_set_page(sg, page, PAGE_SIZE, 0);
+		sg = sg_next(sg);
+	}
+
+	return st;
+
+err_unwind:
+	for (sg = st->sgl; sg; sg = sg_next(sg))
+		put_page(sg_page(sg));
+	sg_free_table(st);
+	kfree(st);
+	return ERR_PTR(-ENOMEM);
+}
+
+static void *vgem_prime_vmap(struct drm_gem_object *obj)
+{
+	struct address_space *mapping = file_inode(obj->filp)->i_mapping;
+	long n_pages = obj->size >> PAGE_SHIFT, i;
+	struct page **pages;
+	void *addr = NULL;
+
+	pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
+	if (!pages)
+		return NULL;
+
+	for (i = 0; i < n_pages; i++) {
+		struct page *page = shmem_read_mapping_page(mapping, i);
+		if (IS_ERR(page))
+			goto out;
+	}
+
+	addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL_IO));
+out:
+	while (i--)
+		put_page(pages[i]);
+	drm_free_large(pages);
+
+	return addr;
+}
+
+static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+	vunmap(vaddr);
+}
+
+static int vgem_prime_mmap(struct drm_gem_object *obj,
+			   struct vm_area_struct *vma)
+{
+	int ret;
+
+	if (obj->size < vma->vm_end - vma->vm_start)
+		return -EINVAL;
+
+	if (!obj->filp)
+		return -ENODEV;
+
+	ret = obj->filp->f_op->mmap(obj->filp, vma);
+	if (ret)
+		return ret;
+
+	fput(vma->vm_file);
+	vma->vm_file = get_file(obj->filp);
+
+	return 0;
+}
+
 static struct drm_driver vgem_driver = {
-	.driver_features		= DRIVER_GEM,
+	.driver_features		= DRIVER_GEM | DRIVER_PRIME,
 	.gem_free_object_unlocked	= vgem_gem_free_object,
 	.gem_vm_ops			= &vgem_gem_vm_ops,
 	.ioctls				= vgem_ioctls,
 	.fops				= &vgem_driver_fops,
+
 	.dumb_create			= vgem_gem_dumb_create,
 	.dumb_map_offset		= vgem_gem_dumb_map,
+
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.gem_prime_export = drm_gem_prime_export,
+	.gem_prime_get_sg_table = vgem_prime_get_sg_table,
+	.gem_prime_vmap = vgem_prime_vmap,
+	.gem_prime_vunmap = vgem_prime_vunmap,
+	.gem_prime_mmap = vgem_prime_mmap,
+
 	.name	= DRIVER_NAME,
 	.desc	= DRIVER_DESC,
 	.date	= DRIVER_DATE,
-- 
2.8.1



More information about the dri-devel mailing list