[PATCH 2/3] drm/ttm: add object per-file mmap validation

Dave Airlie airlied at gmail.com
Tue Dec 18 20:41:58 PST 2012


From: Dave Airlie <airlied at redhat.com>

So instead of creating a whole set of per-file address spaces, and having
some sort of melt down in my understanding of the inode/address_space code,
I realised we could just keep a lot of filps that the object has been attached
to, or has had a handle created on.

At the moment this gives us nothing extra, since you can nearly always guess
the handles and gem open them, but in the future where we have fd passing
or flink to, then this should block other clients from mmaping objects they
haven't been explicitly given a handle for.

This just implements it in TTM for radeon/nouveau, vmwgfx and other drivers
would need updates as well. It might also be nice to try and consolidate
things a bit better.

Signed-off-by: Dave Airlie <airlied at redhat.com>
---
 drivers/gpu/drm/drm_vma_offset_man.c  | 43 +++++++++++++++++++++++++++++++++++
 drivers/gpu/drm/nouveau/nouveau_gem.c |  7 ++++++
 drivers/gpu/drm/radeon/radeon_gem.c   |  7 ++++++
 drivers/gpu/drm/ttm/ttm_bo_vm.c       | 15 +++++++++---
 include/drm/drm_vma_offset_man.h      | 18 ++++++++++++++-
 5 files changed, 86 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/drm_vma_offset_man.c b/drivers/gpu/drm/drm_vma_offset_man.c
index 7456892..ee2f425 100644
--- a/drivers/gpu/drm/drm_vma_offset_man.c
+++ b/drivers/gpu/drm/drm_vma_offset_man.c
@@ -91,6 +91,7 @@ int drm_vma_offset_setup(struct drm_vma_offset_manager *man,
 {
 	int ret;
 
+	INIT_LIST_HEAD(&node->flist);
 retry_pre_get:
 	ret = drm_mm_pre_get(&man->addr_space_mm);
 	if (unlikely(ret != 0))
@@ -158,3 +159,45 @@ void drm_vma_offset_man_fini(struct drm_vma_offset_manager *man)
 	write_unlock(&man->vm_lock);
 }
 EXPORT_SYMBOL(drm_vma_offset_man_fini);
+
+int drm_vma_offset_node_add_file(struct drm_vma_offset_node *node,
+				 struct file *filp)
+{
+	struct drm_vma_offset_node_per_file *fnode;
+
+	fnode = kmalloc(sizeof(*fnode), GFP_KERNEL);
+	if (!fnode)
+		return -ENOMEM;
+
+	fnode->filp = filp;
+	list_add(&fnode->lhead, &node->flist);
+	return 0;
+}
+EXPORT_SYMBOL(drm_vma_offset_node_add_file);
+
+void drm_vma_offset_node_remove_file(struct drm_vma_offset_node *node,
+			       struct file *filp)
+{
+	struct drm_vma_offset_node_per_file *fnode, *temp;
+
+	list_for_each_entry_safe(fnode, temp, &node->flist, lhead) {
+		if (fnode->filp == filp) {
+			list_del(&fnode->lhead);
+			kfree(fnode);
+			break;
+		}
+	}
+}
+EXPORT_SYMBOL(drm_vma_offset_node_remove_file);
+
+bool drm_vma_offset_node_valid_file(struct drm_vma_offset_node *node,
+				    struct file *filp)
+{
+	struct drm_vma_offset_node_per_file *fnode;
+	list_for_each_entry(fnode, &node->flist, lhead) {	
+		if (fnode->filp == filp)
+			return true;
+	}
+	return false;
+}
+EXPORT_SYMBOL(drm_vma_offset_node_valid_file);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 6be9249..8281f5c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -74,6 +74,11 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
 	struct nouveau_vma *vma;
 	int ret;
 
+	ret = drm_vma_offset_node_add_file(&nvbo->bo.vma_offset,
+					   file_priv->filp);
+	if (ret)
+		return ret;
+
 	if (!cli->base.vm)
 		return 0;
 
@@ -111,6 +116,8 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
 	struct nouveau_vma *vma;
 	int ret;
 
+	drm_vma_offset_node_remove_file(&nvbo->bo.vma_offset, file_priv->filp);
+
 	if (!cli->base.vm)
 		return;
 
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index fe5c1f6..daba965 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -146,6 +146,12 @@ int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
 	struct radeon_bo_va *bo_va;
 	int r;
 
+	/* allocate a file to bo */
+	r = drm_vma_offset_node_add_file(&rbo->tbo.vma_offset,
+					 file_priv->filp);
+	if (r)
+		return r;
+
 	if (rdev->family < CHIP_CAYMAN) {
 		return 0;
 	}
@@ -176,6 +182,7 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
 	struct radeon_bo_va *bo_va;
 	int r;
 
+	drm_vma_offset_node_remove_file(&rbo->tbo.vma_offset, file_priv->filp);
 	if (rdev->family < CHIP_CAYMAN) {
 		return;
 	}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 3e52e25..d111d3d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -218,7 +218,8 @@ static const struct vm_operations_struct ttm_bo_vm_ops = {
 	.close = ttm_bo_vm_close
 };
 
-static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
+static struct ttm_buffer_object *ttm_bo_vm_lookup(struct file *filp,
+						  struct ttm_bo_device *bdev,
 						  unsigned long dev_offset,
 						  unsigned long num_pages)
 {
@@ -236,10 +237,18 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
 	bo = container_of(node, struct ttm_buffer_object, vma_offset);
 	ttm_bo_reference(bo);
 
+	if (drm_vma_offset_node_valid_file(&bo->vma_offset, filp) == false) {
+		bo = NULL;
+		pr_err("Invalid buffer object for this file descriptor\n");
+		goto out;
+	}
+
 	if (!kref_get_unless_zero(&bo->kref)) {
 		bo = NULL;
 		pr_err("Could not find buffer object to map\n");
 	}
+
+out:
 	read_unlock(&bdev->vm_lock);
 	return bo;
 }
@@ -251,7 +260,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
 	struct ttm_buffer_object *bo;
 	int ret;
 
-	bo = ttm_bo_vm_lookup(bdev,
+	bo = ttm_bo_vm_lookup(filp, bdev,
 			      vma->vm_pgoff,
 			      (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
 	if (unlikely(bo == NULL))
@@ -313,7 +322,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
 	bool no_wait = false;
 	bool dummy;
 
-	bo = ttm_bo_vm_lookup(bdev, dev_offset, 1);
+	bo = ttm_bo_vm_lookup(filp, bdev, dev_offset, 1);
 	if (unlikely(bo == NULL))
 		return -EFAULT;
 
diff --git a/include/drm/drm_vma_offset_man.h b/include/drm/drm_vma_offset_man.h
index b8ef845..6a297f2 100644
--- a/include/drm/drm_vma_offset_man.h
+++ b/include/drm/drm_vma_offset_man.h
@@ -2,13 +2,23 @@
 #ifndef DRM_VMA_OFFSET_MAN
 #define DRM_VMA_OFFSET_MAN
 
+#include <linux/fs.h>
 #include <linux/mutex.h>
 #include <linux/rbtree.h>
 #include <drm/drm_mm.h>
 
-struct drm_mm_node;
+/* store a linked list of file privs this object is valid on,
+   the bust a move in mmap */
 
+struct drm_vma_offset_node_per_file {
+	struct list_head lhead;
+	struct file *filp;
+};
+
+/* you'd want a linked list of file privs per node */
 struct drm_vma_offset_node {
+	struct list_head flist;
+
 	struct drm_mm_node *vm_node;
 	struct rb_node vm_rb;
 	uint64_t num_pages;
@@ -58,4 +68,10 @@ static inline uint64_t drm_vma_node_offset_addr(struct drm_vma_offset_node *node
 	return ((uint64_t) node->vm_node->start) << PAGE_SHIFT;
 }
 
+int drm_vma_offset_node_add_file(struct drm_vma_offset_node *node,
+				    struct file *filp);
+void drm_vma_offset_node_remove_file(struct drm_vma_offset_node *node,
+				     struct file *filp);
+bool drm_vma_offset_node_valid_file(struct drm_vma_offset_node *node,
+				    struct file *filp);
 #endif
-- 
1.8.0.2



More information about the dri-devel mailing list