[PATCH 5/5] drm/vgem: properly implement mmap

Ben Widawsky ben at bwidawsk.net
Wed Feb 8 15:19:31 PST 2012


Mostly copied from i915 gtt mmaps, this will properly fault in pages as
the user tries to use them. The only thing of note are that no
prefaulting occurs, so perhaps some kind of madvise will happen later if
needed.

The only other thing missing right not is shrinker support, which will
come next after I figure out if locking is actually required right now.
Hmm, and now that I think about it, mremap, and munmap may not work
either.

Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
---
 drivers/gpu/drm/vgem/vgem_drv.c |  105 ++++++++++++++++++++++++++++++++++++--
 1 files changed, 99 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 7a7a05f..16f88ee 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -33,6 +33,7 @@
 #include "vgem_drm.h"
 #include <linux/module.h>
 #include <linux/ramfs.h>
+#include <linux/shmem_fs.h>
 
 #define DRIVER_NAME	"vgem"
 #define DRIVER_DESC	"Virtual GEM provider"
@@ -40,8 +41,11 @@
 #define DRIVER_MAJOR	1
 #define DRIVER_MINOR	0
 
+#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
+
 struct drm_vgem_gem_object {
 	struct drm_gem_object base;
+	struct page **pages;
 };
 
 static int vgem_load(struct drm_device *dev, unsigned long flags)
@@ -67,21 +71,109 @@ static int vgem_gem_init_object(struct drm_gem_object *obj)
 	return 0;
 }
 
+static void vgem_gem_put_pages(struct drm_vgem_gem_object *obj)
+{
+	int num_pages = obj->base.size / PAGE_SIZE;
+	int i;
+
+	for (i = 0; i < num_pages; i++) {
+		page_cache_release(obj->pages[i]);
+	}
+
+	drm_free_large(obj->pages);
+	obj->pages = NULL;
+}
+
 static void vgem_gem_free_object(struct drm_gem_object *obj)
 {
-	if (obj->map_list.map)
+	struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
+
+	if (obj)
 		drm_gem_free_mmap_offset(obj);
 
 	drm_gem_object_release(obj);
+
+	if (vgem_obj->pages)
+		vgem_gem_put_pages(vgem_obj);
+
+	kfree(vgem_obj);
+}
+
+static int vgem_gem_get_pages(struct drm_vgem_gem_object *obj)
+{
+	struct address_space *mapping;
+	gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
+	int num_pages, i, ret = 0;
+
+	num_pages = obj->base.size / PAGE_SIZE;
+
+	if (!obj->pages) {
+		obj->pages = drm_malloc_ab(num_pages, sizeof(struct page *));
+		if (obj->pages == NULL)
+			return -ENOMEM;
+	}
+
+	mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+	gfpmask |= mapping_gfp_mask(mapping);
+
+	if (WARN_ON(mapping == NULL))
+		return VM_FAULT_SIGBUS;
+
+	for (i = 0; i < num_pages; i++) {
+		struct page *page;
+		page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+		if (IS_ERR(page)) {
+			ret = PTR_ERR(page);
+			goto err_out;
+		}
+		obj->pages[i] = page;
+	}
+
+	return ret;
+
+err_out:
+	while (i--)
+		page_cache_release(obj->pages[i]);
+	drm_free_large(obj->pages);
+	obj->pages = NULL;
+	return ret;
 }
 
-/* XXX I don't think this is ever hit */
 static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-	BUG();
+	struct drm_vgem_gem_object *obj = to_vgem_bo(vma->vm_private_data);
+	loff_t num_pages;
+	pgoff_t page_offset;
+	int ret;
+
+	/* We don't use vmf->pgoff since that has the fake offset */
+	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
+		PAGE_SHIFT;
+
+	num_pages = obj->base.size / PAGE_SIZE;
+
+	if (WARN_ON(page_offset > num_pages))
+		return VM_FAULT_SIGBUS;
+
+	ret = vgem_gem_get_pages(obj);
+	if (ret)
+		return ret;
+
+	ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
+			     obj->pages[page_offset]);
+
+	/* Pretty dumb handler for now */
+	switch (ret) {
+	case 0:
+	case -ERESTARTSYS:
+	case -EINTR:
+		return VM_FAULT_NOPAGE;
+	default:
+		return VM_FAULT_SIGBUS;
+	}
 }
 
-static struct vm_operations_struct vgem_gem_vm_ops = {
+static const struct vm_operations_struct vgem_gem_vm_ops = {
 	.fault = vgem_gem_fault,
 	.open = drm_gem_vm_open,
 	.close = drm_gem_vm_close,
@@ -150,7 +242,6 @@ static int vgem_gem_mmap_ioctl(struct drm_device *dev, void *data,
 {
 	struct vgem_gem_mmap *args = data;
 	struct drm_gem_object *obj;
-	uintptr_t addr;
 
 	obj = drm_gem_object_lookup(dev, file, args->handle);
 	if (!obj)
@@ -160,7 +251,9 @@ static int vgem_gem_mmap_ioctl(struct drm_device *dev, void *data,
 
 	BUG_ON(!obj->map_list.map);
 
-	args->mapped = obj->map_list.hash.key << PAGE_SHIFT;
+	args->mapped = (uint64_t)obj->map_list.hash.key << PAGE_SHIFT;
+
+	drm_gem_object_unreference_unlocked(obj);
 
 	return 0;
 }
-- 
1.7.9



More information about the dri-devel mailing list