[Intel-gfx] [PATCHv4 3/3] drm/vgem: Enable dmabuf import interfaces

Laura Abbott labbott at redhat.com
Thu May 4 18:45:48 UTC 2017


Enable the GEM dma-buf import interfaces in addition to the export
interfaces. This lets vgem be used as a test source for other allocators
(e.g. Ion).

Reviewed-by: Chris Wilson <chris at chris-wilson.co.uk>
Signed-off-by: Laura Abbott <labbott at redhat.com>
---
v4: Use new drm_gem_prime_import_dev function
---
 drivers/gpu/drm/vgem/vgem_drv.c | 136 +++++++++++++++++++++++++++++++---------
 drivers/gpu/drm/vgem/vgem_drv.h |   2 +
 2 files changed, 109 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index d1d98af..c9381d45 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -48,6 +48,11 @@ static void vgem_gem_free_object(struct drm_gem_object *obj)
 {
 	struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
 
+	drm_free_large(vgem_obj->pages);
+
+	if (obj->import_attach)
+		drm_prime_gem_destroy(obj, vgem_obj->table);
+
 	drm_gem_object_release(obj);
 	kfree(vgem_obj);
 }
@@ -58,26 +63,49 @@ static int vgem_gem_fault(struct vm_fault *vmf)
 	struct drm_vgem_gem_object *obj = vma->vm_private_data;
 	/* We don't use vmf->pgoff since that has the fake offset */
 	unsigned long vaddr = vmf->address;
-	struct page *page;
-
-	page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping,
-				       (vaddr - vma->vm_start) >> PAGE_SHIFT);
-	if (!IS_ERR(page)) {
-		vmf->page = page;
-		return 0;
-	} else switch (PTR_ERR(page)) {
-		case -ENOSPC:
-		case -ENOMEM:
-			return VM_FAULT_OOM;
-		case -EBUSY:
-			return VM_FAULT_RETRY;
-		case -EFAULT:
-		case -EINVAL:
-			return VM_FAULT_SIGBUS;
-		default:
-			WARN_ON_ONCE(PTR_ERR(page));
-			return VM_FAULT_SIGBUS;
+	int ret;
+	loff_t num_pages;
+	pgoff_t page_offset;
+	page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
+
+	num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
+
+	if (page_offset > num_pages)
+		return VM_FAULT_SIGBUS;
+
+	if (obj->pages) {
+		get_page(obj->pages[page_offset]);
+		vmf->page = obj->pages[page_offset];
+		ret = 0;
+	} else {
+		struct page *page;
+
+		page = shmem_read_mapping_page(
+					file_inode(obj->base.filp)->i_mapping,
+					page_offset);
+		if (!IS_ERR(page)) {
+			vmf->page = page;
+			ret = 0;
+		} else switch (PTR_ERR(page)) {
+			case -ENOSPC:
+			case -ENOMEM:
+				ret = VM_FAULT_OOM;
+				break;
+			case -EBUSY:
+				ret = VM_FAULT_RETRY;
+				break;
+			case -EFAULT:
+			case -EINVAL:
+				ret = VM_FAULT_SIGBUS;
+				break;
+			default:
+				WARN_ON(PTR_ERR(page));
+				ret = VM_FAULT_SIGBUS;
+				break;
+		}
+
 	}
+	return ret;
 }
 
 static const struct vm_operations_struct vgem_gem_vm_ops = {
@@ -114,12 +142,8 @@ static void vgem_postclose(struct drm_device *dev, struct drm_file *file)
 	kfree(vfile);
 }
 
-/* ioctls */
-
-static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
-					      struct drm_file *file,
-					      unsigned int *handle,
-					      unsigned long size)
+static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
+						unsigned long size)
 {
 	struct drm_vgem_gem_object *obj;
 	int ret;
@@ -129,8 +153,31 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
 		return ERR_PTR(-ENOMEM);
 
 	ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
-	if (ret)
-		goto err_free;
+	if (ret) {
+		kfree(obj);
+		return ERR_PTR(ret);
+	}
+
+	return obj;
+}
+
+static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj)
+{
+	drm_gem_object_release(&obj->base);
+	kfree(obj);
+}
+
+static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
+					      struct drm_file *file,
+					      unsigned int *handle,
+					      unsigned long size)
+{
+	struct drm_vgem_gem_object *obj;
+	int ret;
+
+	obj = __vgem_gem_create(dev, size);
+	if (IS_ERR(obj))
+		return ERR_CAST(obj);
 
 	ret = drm_gem_handle_create(file, &obj->base, handle);
 	drm_gem_object_unreference_unlocked(&obj->base);
@@ -139,9 +186,8 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
 
 	return &obj->base;
 
-err_free:
-	kfree(obj);
 err:
+	__vgem_gem_destroy(obj);
 	return ERR_PTR(ret);
 }
 
@@ -258,6 +304,35 @@ static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
 	return st;
 }
 
+static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
+						struct dma_buf *dma_buf)
+{
+	return drm_gem_prime_import_dev(dev, dma_buf, &vgem_platform->dev);
+}
+
+static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
+			struct dma_buf_attachment *attach, struct sg_table *sg)
+{
+	struct drm_vgem_gem_object *obj;
+	int npages;
+
+	obj = __vgem_gem_create(dev, attach->dmabuf->size);
+	if (IS_ERR(obj))
+		return ERR_CAST(obj);
+
+	npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
+
+	obj->table = sg;
+	obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+	if (!obj->pages) {
+		__vgem_gem_destroy(obj);
+		return ERR_PTR(-ENOMEM);
+	}
+	drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
+					npages);
+	return &obj->base;
+}
+
 static void *vgem_prime_vmap(struct drm_gem_object *obj)
 {
 	long n_pages = obj->size >> PAGE_SHIFT;
@@ -316,8 +391,11 @@ static struct drm_driver vgem_driver = {
 	.dumb_map_offset		= vgem_gem_dumb_map,
 
 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
 	.gem_prime_pin = vgem_prime_pin,
+	.gem_prime_import = vgem_prime_import,
 	.gem_prime_export = drm_gem_prime_export,
+	.gem_prime_import_sg_table = vgem_prime_import_sg_table,
 	.gem_prime_get_sg_table = vgem_prime_get_sg_table,
 	.gem_prime_vmap = vgem_prime_vmap,
 	.gem_prime_vunmap = vgem_prime_vunmap,
diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
index cb59c7a..1aae014 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.h
+++ b/drivers/gpu/drm/vgem/vgem_drv.h
@@ -43,6 +43,8 @@ struct vgem_file {
 #define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
 struct drm_vgem_gem_object {
 	struct drm_gem_object base;
+	struct page **pages;
+	struct sg_table *table;
 };
 
 int vgem_fence_open(struct vgem_file *file);
-- 
2.7.4



More information about the Intel-gfx mailing list