[PATCH 1/3] drm: create unified vma offset manager

Dave Airlie airlied at gmail.com
Tue Dec 18 17:56:18 PST 2012


From: Dave Airlie <airlied at redhat.com>

So we have to offset manager implementations for dealing with VMA offsets.
GEM had one using the hash table, TTM had one using an rbtree,

I'd rather we just had one to rule them all, since ttm is using the rbtree
variant to allow sub mappings, to keep ABI we need to use that one.

This also adds a bunch of inline helpers to avoid gem/ttm poking around
inside the vma_offset objects. TTM needs a reset helper to remove a vma_offset
when it copies an object to another one for buffer moves. The helpers
also let drivers avoid the map_list.hash_key << PAGE_SHIFT nonsense.

TODO: check interactions with ttm vm_lock.

I've tested booted this on Intel Ironlake and AMD REDWOOD.

Signed-off-by: Dave Airlie <airlied at redhat.com>
---
 drivers/gpu/drm/Makefile                |   2 +-
 drivers/gpu/drm/drm_gem.c               |  54 +++---------
 drivers/gpu/drm/drm_gem_cma_helper.c    |   4 +-
 drivers/gpu/drm/drm_vma_offset_man.c    | 149 ++++++++++++++++++++++++++++++++
 drivers/gpu/drm/exynos/exynos_drm_gem.c |   6 +-
 drivers/gpu/drm/gma500/gem.c            |   6 +-
 drivers/gpu/drm/i915/i915_gem.c         |   8 +-
 drivers/gpu/drm/ttm/ttm_bo.c            |  74 +++-------------
 drivers/gpu/drm/ttm/ttm_bo_util.c       |   2 +-
 drivers/gpu/drm/ttm/ttm_bo_vm.c         |  86 ++++++++----------
 drivers/gpu/drm/udl/udl_gem.c           |   6 +-
 include/drm/drmP.h                      |   8 +-
 include/drm/drm_vma_offset_man.h        |  58 +++++++++++++
 include/drm/ttm/ttm_bo_api.h            |  10 +--
 include/drm/ttm/ttm_bo_driver.h         |   3 +-
 15 files changed, 291 insertions(+), 185 deletions(-)
 create mode 100644 drivers/gpu/drm/drm_vma_offset_man.c
 create mode 100644 include/drm/drm_vma_offset_man.h

diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 6f58c81..20a1eeb 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -12,7 +12,7 @@ drm-y       :=	drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
 		drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
 		drm_crtc.o drm_modes.o drm_edid.o \
 		drm_info.o drm_debugfs.o drm_encoder_slave.o \
-		drm_trace_points.o drm_global.o drm_prime.o
+		drm_trace_points.o drm_global.o drm_prime.o drm_vma_offset_man.o
 
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 24efae4..551352f 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -103,18 +103,11 @@ drm_gem_init(struct drm_device *dev)
 
 	dev->mm_private = mm;
 
-	if (drm_ht_create(&mm->offset_hash, 12)) {
+	if (drm_vma_offset_man_init(&mm->vma_manager, DRM_FILE_PAGE_OFFSET_START, DRM_FILE_PAGE_OFFSET_SIZE)) {
 		kfree(mm);
 		return -ENOMEM;
 	}
-
-	if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
-			DRM_FILE_PAGE_OFFSET_SIZE)) {
-		drm_ht_remove(&mm->offset_hash);
-		kfree(mm);
-		return -ENOMEM;
-	}
-
+	  
 	return 0;
 }
 
@@ -123,8 +116,7 @@ drm_gem_destroy(struct drm_device *dev)
 {
 	struct drm_gem_mm *mm = dev->mm_private;
 
-	drm_mm_takedown(&mm->offset_manager);
-	drm_ht_remove(&mm->offset_hash);
+	drm_vma_offset_man_fini(&mm->vma_manager);
 	kfree(mm);
 	dev->mm_private = NULL;
 }
@@ -314,8 +306,7 @@ drm_gem_free_mmap_offset(struct drm_gem_object *obj)
 	struct drm_gem_mm *mm = dev->mm_private;
 	struct drm_map_list *list = &obj->map_list;
 
-	drm_ht_remove_item(&mm->offset_hash, &list->hash);
-	drm_mm_put_block(list->file_offset_node);
+	drm_vma_offset_destroy(&mm->vma_manager, &list->vma_offset);
 	kfree(list->map);
 	list->map = NULL;
 }
@@ -352,34 +343,13 @@ drm_gem_create_mmap_offset(struct drm_gem_object *obj)
 	map->size = obj->size;
 	map->handle = obj;
 
-	/* Get a DRM GEM mmap offset allocated... */
-	list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
-			obj->size / PAGE_SIZE, 0, false);
-
-	if (!list->file_offset_node) {
-		DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
-		ret = -ENOSPC;
-		goto out_free_list;
-	}
-
-	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
-			obj->size / PAGE_SIZE, 0);
-	if (!list->file_offset_node) {
-		ret = -ENOMEM;
+	ret = drm_vma_offset_setup(&mm->vma_manager, &list->vma_offset,
+				   obj->size / PAGE_SIZE);
+	if (ret)
 		goto out_free_list;
-	}
-
-	list->hash.key = list->file_offset_node->start;
-	ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
-	if (ret) {
-		DRM_ERROR("failed to add to map hash\n");
-		goto out_free_mm;
-	}
 
 	return 0;
 
-out_free_mm:
-	drm_mm_put_block(list->file_offset_node);
 out_free_list:
 	kfree(list->map);
 	list->map = NULL;
@@ -674,7 +644,8 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 	struct drm_gem_mm *mm = dev->mm_private;
 	struct drm_local_map *map = NULL;
 	struct drm_gem_object *obj;
-	struct drm_hash_item *hash;
+	struct drm_vma_offset_node *offset_node;
+	struct drm_map_list *list;
 	int ret = 0;
 
 	if (drm_device_is_unplugged(dev))
@@ -682,12 +653,15 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 
 	mutex_lock(&dev->struct_mutex);
 
-	if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
+	offset_node = drm_vma_offset_lookup(&mm->vma_manager,
+					    vma->vm_pgoff, 1);
+	if (!offset_node) {
 		mutex_unlock(&dev->struct_mutex);
 		return drm_mmap(filp, vma);
 	}
 
-	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
+	list = container_of(offset_node, struct drm_map_list, vma_offset);
+	map = list->map;
 	if (!map ||
 	    ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
 		ret =  -EPERM;
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 1aa8fee..43b485f 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -29,7 +29,7 @@
 
 static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
 {
-	return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
+	return (unsigned int)drm_vma_node_offset_addr(&obj->map_list.vma_offset);
 }
 
 static void drm_gem_cma_buf_destroy(struct drm_device *drm,
@@ -140,7 +140,7 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
 {
 	struct drm_gem_cma_object *cma_obj;
 
-	if (gem_obj->map_list.map)
+	if (drm_vma_node_is_allocated(&obj->map_list.vma_offset))
 		drm_gem_free_mmap_offset(gem_obj);
 
 	drm_gem_object_release(gem_obj);
diff --git a/drivers/gpu/drm/drm_vma_offset_man.c b/drivers/gpu/drm/drm_vma_offset_man.c
new file mode 100644
index 0000000..cf2e291
--- /dev/null
+++ b/drivers/gpu/drm/drm_vma_offset_man.c
@@ -0,0 +1,149 @@
+
+#include <drm/drmP.h>
+
+static struct drm_vma_offset_node *drm_vma_offset_lookup_rb_locked(struct drm_vma_offset_manager *man,
+							    unsigned long page_start,
+							    unsigned long num_pages)
+{
+	struct rb_node *cur = man->addr_space_rb.rb_node;
+	unsigned long cur_offset;
+	struct drm_vma_offset_node *node;
+	struct drm_vma_offset_node *best_node = NULL;
+
+	while (likely(cur != NULL)) {
+		node = rb_entry(cur, struct drm_vma_offset_node, vm_rb);
+		cur_offset = node->vm_node->start;
+		if (page_start >= cur_offset) {
+			cur = cur->rb_right;
+			best_node = node;
+			if (page_start == cur_offset)
+				break;
+		} else
+			cur = cur->rb_left;
+	}
+	if (unlikely(best_node == NULL))
+		return NULL;
+
+	if (unlikely((best_node->vm_node->start + best_node->num_pages) <
+		     (page_start + num_pages)))
+		return NULL;
+	return best_node;
+}
+
+struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *man,
+						  unsigned long page_start,
+						  unsigned long num_pages)
+{
+	struct drm_vma_offset_node *node;
+	read_lock(&man->vm_lock);
+	node = drm_vma_offset_lookup_rb_locked(man, page_start, num_pages);
+	read_unlock(&man->vm_lock);
+	return node;
+}
+EXPORT_SYMBOL(drm_vma_offset_lookup);
+
+static void drm_vma_offset_insert_rb(struct drm_vma_offset_manager *man, struct drm_vma_offset_node *node)
+{
+	struct rb_node **cur = &man->addr_space_rb.rb_node;
+	struct rb_node *parent = NULL;
+	struct drm_vma_offset_node *cur_node;
+	unsigned long offset = node->vm_node->start;
+	unsigned long cur_offset;
+
+	while (*cur) {
+		parent = *cur;
+		cur_node = rb_entry(parent, struct drm_vma_offset_node, vm_rb);
+		cur_offset = cur_node->vm_node->start;
+		if (offset < cur_offset)
+			cur = &parent->rb_left;
+		else if (offset > cur_offset)
+			cur = &parent->rb_right;
+		else
+			BUG();
+	}
+
+	rb_link_node(&node->vm_rb, parent, cur);
+	rb_insert_color(&node->vm_rb, &man->addr_space_rb);
+}
+
+void drm_vma_offset_destroy_locked(struct drm_vma_offset_manager *man,
+				   struct drm_vma_offset_node *node)
+{
+	if (likely(node->vm_node != NULL)) {
+		rb_erase(&node->vm_rb, &man->addr_space_rb);
+		drm_mm_put_block(node->vm_node);
+		node->vm_node = NULL;
+	}
+}
+
+void drm_vma_offset_destroy(struct drm_vma_offset_manager *man,
+			    struct drm_vma_offset_node *node)
+{
+	write_lock(&man->vm_lock);
+	drm_vma_offset_destroy_locked(man, node);
+	write_unlock(&man->vm_lock);
+}
+EXPORT_SYMBOL(drm_vma_offset_destroy);
+
+int drm_vma_offset_setup(struct drm_vma_offset_manager *man,
+			 struct drm_vma_offset_node *node,
+			 unsigned long num_pages)
+{
+	int ret;
+
+retry_pre_get:
+	ret = drm_mm_pre_get(&man->addr_space_mm);
+	if (unlikely(ret != 0))
+		return ret;
+
+	write_lock(&man->vm_lock);
+	node->vm_node = drm_mm_search_free(&man->addr_space_mm,
+					   num_pages, 0, 0);
+
+	if (unlikely(node->vm_node == NULL)) {
+		ret = -ENOMEM;
+		goto out_unlock;
+	}
+
+	node->vm_node = drm_mm_get_block_atomic(node->vm_node,
+						num_pages, 0);
+
+	if (unlikely(node->vm_node == NULL)) {
+		write_unlock(&man->vm_lock);
+		goto retry_pre_get;
+	}
+
+	node->num_pages = num_pages;
+	drm_vma_offset_insert_rb(man, node);
+	write_unlock(&man->vm_lock);
+
+	return 0;
+out_unlock:
+	write_unlock(&man->vm_lock);
+	return ret;	
+	
+}
+EXPORT_SYMBOL(drm_vma_offset_setup);
+
+int drm_vma_offset_man_init(struct drm_vma_offset_manager *man, uint64_t file_page_offset, uint64_t size)
+{
+	int ret;
+	rwlock_init(&man->vm_lock);
+	man->addr_space_rb = RB_ROOT;
+	ret = drm_mm_init(&man->addr_space_mm, file_page_offset, size);
+	if (unlikely(ret != 0))
+		goto out_no_addr_mm;
+	return 0;
+out_no_addr_mm:
+	return ret;
+}
+EXPORT_SYMBOL(drm_vma_offset_man_init);
+
+void drm_vma_offset_man_fini(struct drm_vma_offset_manager *man)
+{
+	BUG_ON(!drm_mm_clean(&man->addr_space_mm));
+	write_lock(&man->vm_lock);
+	drm_mm_takedown(&man->addr_space_mm);
+	write_unlock(&man->vm_lock);
+}
+EXPORT_SYMBOL(drm_vma_offset_man_fini);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index d48183e..5389bfb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -168,7 +168,7 @@ out:
 	exynos_drm_fini_buf(obj->dev, buf);
 	exynos_gem_obj->buffer = NULL;
 
-	if (obj->map_list.map)
+	if (drm_vma_node_is_allocated(&obj->map_list.vma_offset))
 		drm_gem_free_mmap_offset(obj);
 
 	/* release file pointer to gem object. */
@@ -704,13 +704,13 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
 		goto unlock;
 	}
 
-	if (!obj->map_list.map) {
+	if (!drm_vma_node_is_allocated(&obj->map_list.vma_offset)) {
 		ret = drm_gem_create_mmap_offset(obj);
 		if (ret)
 			goto out;
 	}
 
-	*offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
+	*offset = drm_vma_node_offset_addr(&obj->map_list.vma_offset);
 	DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
 
 out:
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index eefd6cc..1c49fbc 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -38,7 +38,7 @@ void psb_gem_free_object(struct drm_gem_object *obj)
 	struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
 
 	/* Remove the list map if one is present */
-	if (obj->map_list.map)
+	if (drm_vma_node_is_allocated(&obj->map_list.vma_offset))
 		drm_gem_free_mmap_offset(obj);
 	drm_gem_object_release(obj);
 
@@ -81,13 +81,13 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
 	/* What validation is needed here ? */
 
 	/* Make it mmapable */
-	if (!obj->map_list.map) {
+	if (!drm_vma_node_is_allocated(&obj->map_list.vma_offset)) {
 		ret = drm_gem_create_mmap_offset(obj);
 		if (ret)
 			goto out;
 	}
 	/* GEM should really work out the hash offsets for us */
-	*offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
+	*offset = drm_vma_node_offset_addr(&obj->map_list.vma_offset);
 out:
 	drm_gem_object_unreference(obj);
 unlock:
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c1f6919..75138cb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1426,7 +1426,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
 
 	if (obj->base.dev->dev_mapping)
 		unmap_mapping_range(obj->base.dev->dev_mapping,
-				    (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
+				    (loff_t)drm_vma_node_offset_addr(&obj->base.map_list.vma_offset),
 				    obj->base.size, 1);
 
 	obj->fault_mappable = false;
@@ -1514,7 +1514,7 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 	int ret;
 
-	if (obj->base.map_list.map)
+	if (drm_vma_node_is_allocated(&obj->base.map_list.vma_offset))
 		return 0;
 
 	ret = drm_gem_create_mmap_offset(&obj->base);
@@ -1539,7 +1539,7 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
 
 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
 {
-	if (!obj->base.map_list.map)
+	if (!drm_vma_node_is_allocated(&obj->base.map_list.vma_offset))
 		return;
 
 	drm_gem_free_mmap_offset(&obj->base);
@@ -1580,7 +1580,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
 	if (ret)
 		goto out;
 
-	*offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
+	*offset = drm_vma_node_offset_addr(&obj->base.map_list.vma_offset);
 
 out:
 	drm_gem_object_unreference(&obj->base);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a915133..7f3596c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -712,11 +712,7 @@ static void ttm_bo_release(struct kref *kref)
 	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
 
 	write_lock(&bdev->vm_lock);
-	if (likely(bo->vm_node != NULL)) {
-		rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
-		drm_mm_put_block(bo->vm_node);
-		bo->vm_node = NULL;
-	}
+	drm_vma_offset_destroy(&bdev->offset_manager, &bo->vma_offset);
 	write_unlock(&bdev->vm_lock);
 	ttm_mem_io_lock(man, false);
 	ttm_mem_io_free_vm(bo);
@@ -1524,10 +1520,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
 		TTM_DEBUG("Swap list was clean\n");
 	spin_unlock(&glob->lru_lock);
 
-	BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
-	write_lock(&bdev->vm_lock);
-	drm_mm_takedown(&bdev->addr_space_mm);
-	write_unlock(&bdev->vm_lock);
+	drm_vma_offset_man_fini(&bdev->offset_manager);
 
 	return ret;
 }
@@ -1554,11 +1547,12 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
 	if (unlikely(ret != 0))
 		goto out_no_sys;
 
-	bdev->addr_space_rb = RB_ROOT;
-	ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
+
+	ret = drm_vma_offset_man_init(&bdev->offset_manager, file_page_offset, 0x10000000);
 	if (unlikely(ret != 0))
 		goto out_no_addr_mm;
 
+
 	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
 	INIT_LIST_HEAD(&bdev->ddestroy);
 	bdev->dev_mapping = NULL;
@@ -1624,31 +1618,6 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
 
 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
 
-static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
-{
-	struct ttm_bo_device *bdev = bo->bdev;
-	struct rb_node **cur = &bdev->addr_space_rb.rb_node;
-	struct rb_node *parent = NULL;
-	struct ttm_buffer_object *cur_bo;
-	unsigned long offset = bo->vm_node->start;
-	unsigned long cur_offset;
-
-	while (*cur) {
-		parent = *cur;
-		cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
-		cur_offset = cur_bo->vm_node->start;
-		if (offset < cur_offset)
-			cur = &parent->rb_left;
-		else if (offset > cur_offset)
-			cur = &parent->rb_right;
-		else
-			BUG();
-	}
-
-	rb_link_node(&bo->vm_rb, parent, cur);
-	rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
-}
-
 /**
  * ttm_bo_setup_vm:
  *
@@ -1665,35 +1634,12 @@ static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
 	struct ttm_bo_device *bdev = bo->bdev;
 	int ret;
 
-retry_pre_get:
-	ret = drm_mm_pre_get(&bdev->addr_space_mm);
-	if (unlikely(ret != 0))
-		return ret;
-
-	write_lock(&bdev->vm_lock);
-	bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
-					 bo->mem.num_pages, 0, 0);
-
-	if (unlikely(bo->vm_node == NULL)) {
-		ret = -ENOMEM;
-		goto out_unlock;
-	}
-
-	bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
-					      bo->mem.num_pages, 0);
+	ret = drm_vma_offset_setup(&bdev->offset_manager,
+				   &bo->vma_offset,
+				   bo->mem.num_pages);
 
-	if (unlikely(bo->vm_node == NULL)) {
-		write_unlock(&bdev->vm_lock);
-		goto retry_pre_get;
-	}
-
-	ttm_bo_vm_insert_rb(bo);
-	write_unlock(&bdev->vm_lock);
-	bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
-
-	return 0;
-out_unlock:
-	write_unlock(&bdev->vm_lock);
+	if (ret == 0)
+		bo->addr_space_offset = drm_vma_node_offset_addr(&bo->vma_offset);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 9e9c5d2..52d9750 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -438,7 +438,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
 	INIT_LIST_HEAD(&fbo->lru);
 	INIT_LIST_HEAD(&fbo->swap);
 	INIT_LIST_HEAD(&fbo->io_reserve_lru);
-	fbo->vm_node = NULL;
+	drm_vma_node_reset(&fbo->vma_offset);
 	atomic_set(&fbo->cpu_writers, 0);
 
 	fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 74705f3..3e52e25 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -40,37 +40,6 @@
 
 #define TTM_BO_VM_NUM_PREFAULT 16
 
-static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
-						     unsigned long page_start,
-						     unsigned long num_pages)
-{
-	struct rb_node *cur = bdev->addr_space_rb.rb_node;
-	unsigned long cur_offset;
-	struct ttm_buffer_object *bo;
-	struct ttm_buffer_object *best_bo = NULL;
-
-	while (likely(cur != NULL)) {
-		bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
-		cur_offset = bo->vm_node->start;
-		if (page_start >= cur_offset) {
-			cur = cur->rb_right;
-			best_bo = bo;
-			if (page_start == cur_offset)
-				break;
-		} else
-			cur = cur->rb_left;
-	}
-
-	if (unlikely(best_bo == NULL))
-		return NULL;
-
-	if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
-		     (page_start + num_pages)))
-		return NULL;
-
-	return best_bo;
-}
-
 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
@@ -146,9 +115,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	}
 
 	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
-	    bo->vm_node->start - vma->vm_pgoff;
+		drm_vma_node_start(&bo->vma_offset) - vma->vm_pgoff;
 	page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
-	    bo->vm_node->start - vma->vm_pgoff;
+		drm_vma_node_start(&bo->vma_offset) - vma->vm_pgoff;
 
 	if (unlikely(page_offset >= bo->num_pages)) {
 		retval = VM_FAULT_SIGBUS;
@@ -249,24 +218,44 @@ static const struct vm_operations_struct ttm_bo_vm_ops = {
 	.close = ttm_bo_vm_close
 };
 
-int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
-		struct ttm_bo_device *bdev)
+static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
+						  unsigned long dev_offset,
+						  unsigned long num_pages)
 {
-	struct ttm_bo_driver *driver;
+	struct drm_vma_offset_node *node;
 	struct ttm_buffer_object *bo;
-	int ret;
 
 	read_lock(&bdev->vm_lock);
-	bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
-				 (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
-	if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
-		bo = NULL;
-	read_unlock(&bdev->vm_lock);
+	node = drm_vma_offset_lookup(&bdev->offset_manager, dev_offset,
+				     num_pages);
+	if (unlikely(node == NULL)) {
+		pr_err("Could not find buffer object to map\n");
+		read_unlock(&bdev->vm_lock);
+		return NULL;
+	}
+	bo = container_of(node, struct ttm_buffer_object, vma_offset);
+	ttm_bo_reference(bo);
 
-	if (unlikely(bo == NULL)) {
+	if (!kref_get_unless_zero(&bo->kref)) {
+		bo = NULL;
 		pr_err("Could not find buffer object to map\n");
-		return -EINVAL;
 	}
+	read_unlock(&bdev->vm_lock);
+	return bo;
+}
+
+int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
+		struct ttm_bo_device *bdev)
+{
+	struct ttm_bo_driver *driver;
+	struct ttm_buffer_object *bo;
+	int ret;
+
+	bo = ttm_bo_vm_lookup(bdev,
+			      vma->vm_pgoff,
+			      (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
+	if (unlikely(bo == NULL))
+		return -EFAULT;
 
 	driver = bo->bdev->driver;
 	if (unlikely(!driver->verify_access)) {
@@ -324,12 +313,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
 	bool no_wait = false;
 	bool dummy;
 
-	read_lock(&bdev->vm_lock);
-	bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
-	if (likely(bo != NULL))
-		ttm_bo_reference(bo);
-	read_unlock(&bdev->vm_lock);
-
+	bo = ttm_bo_vm_lookup(bdev, dev_offset, 1);
 	if (unlikely(bo == NULL))
 		return -EFAULT;
 
@@ -343,7 +327,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
 	if (unlikely(ret != 0))
 		goto out_unref;
 
-	kmap_offset = dev_offset - bo->vm_node->start;
+	kmap_offset = dev_offset - drm_vma_node_start(&bo->vma_offset);
 	if (unlikely(kmap_offset >= bo->num_pages)) {
 		ret = -EFBIG;
 		goto out_unref;
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index afd212c..2cc79d2 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -223,7 +223,7 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
 	if (obj->pages)
 		udl_gem_put_pages(obj);
 
-	if (gem_obj->map_list.map)
+	if (drm_vma_node_is_allocated(&gem_obj->map_list.vma_offset))
 		drm_gem_free_mmap_offset(gem_obj);
 }
 
@@ -247,13 +247,13 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
 	ret = udl_gem_get_pages(gobj, GFP_KERNEL);
 	if (ret)
 		goto out;
-	if (!gobj->base.map_list.map) {
+	if (!drm_vma_node_is_allocated(&gobj->base.map_list.vma_offset)) {
 		ret = drm_gem_create_mmap_offset(obj);
 		if (ret)
 			goto out;
 	}
 
-	*offset = (u64)gobj->base.map_list.hash.key << PAGE_SHIFT;
+	*offset = drm_vma_node_offset_addr(&obj->map_list.vma_offset);
 
 out:
 	drm_gem_object_unreference(&gobj->base);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index fad21c9..963e00e 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -88,6 +88,8 @@ struct drm_device;
 #include <drm/drm_os_linux.h>
 #include <drm/drm_hashtab.h>
 #include <drm/drm_mm.h>
+#include <drm/drm_vma_offset_man.h>
+
 
 #define DRM_UT_CORE 		0x01
 #define DRM_UT_DRIVER		0x02
@@ -578,7 +580,7 @@ struct drm_map_list {
 	struct drm_local_map *map;	/**< mapping */
 	uint64_t user_token;
 	struct drm_master *master;
-	struct drm_mm_node *file_offset_node;	/**< fake offset */
+	struct drm_vma_offset_node vma_offset;
 };
 
 /**
@@ -613,8 +615,7 @@ struct drm_ati_pcigart_info {
  * GEM specific mm private for tracking GEM objects
  */
 struct drm_gem_mm {
-	struct drm_mm offset_manager;	/**< Offset mgmt for buffer objects */
-	struct drm_open_hash offset_hash; /**< User token hash table for maps */
+	struct drm_vma_offset_manager vma_manager;
 };
 
 /**
@@ -1198,6 +1199,7 @@ struct drm_device {
 	int switch_power_state;
 
 	atomic_t unplugged; /* device has been unplugged or gone away */
+
 };
 
 #define DRM_SWITCH_POWER_ON 0
diff --git a/include/drm/drm_vma_offset_man.h b/include/drm/drm_vma_offset_man.h
new file mode 100644
index 0000000..4211c60
--- /dev/null
+++ b/include/drm/drm_vma_offset_man.h
@@ -0,0 +1,58 @@
+
+#ifndef DRM_VMA_OFFSET_MAN
+#define DRM_VMA_OFFSET_MAN
+
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <drm/drm_mm.h>
+
+struct drm_mm_node;
+
+struct drm_vma_offset_node {
+	struct drm_mm_node *vm_node;
+	struct rb_node vm_rb;
+	uint64_t num_pages;
+};
+
+struct drm_vma_offset_manager {
+	rwlock_t vm_lock;
+	struct rb_root addr_space_rb;
+	struct drm_mm addr_space_mm;
+};
+
+struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *man,
+						  unsigned long page_start,
+						  unsigned long num_pages);
+
+int drm_vma_offset_setup(struct drm_vma_offset_manager *man,
+			 struct drm_vma_offset_node *node,
+			 unsigned long num_pages);
+
+void drm_vma_offset_destroy_locked(struct drm_vma_offset_manager *man,
+				     struct drm_vma_offset_node *node);
+void drm_vma_offset_destroy(struct drm_vma_offset_manager *man,
+			      struct drm_vma_offset_node *node);
+int drm_vma_offset_man_init(struct drm_vma_offset_manager *man, uint64_t file_page_offset, uint64_t size);
+void drm_vma_offset_man_fini(struct drm_vma_offset_manager *man);
+
+static inline void drm_vma_node_reset(struct drm_vma_offset_node *node)
+{
+	node->vm_node = NULL;
+}
+
+static inline uint64_t drm_vma_node_start(struct drm_vma_offset_node *node)
+{
+	return node->vm_node->start;
+}
+
+static inline bool drm_vma_node_is_allocated(struct drm_vma_offset_node *node)
+{
+	return node->vm_node ? true : false;
+}
+
+static inline uint64_t drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
+{
+	return ((uint64_t) node->vm_node->start) << PAGE_SHIFT;
+}
+
+#endif
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 3cb5d84..fd2020f 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -32,12 +32,12 @@
 #define _TTM_BO_API_H_
 
 #include <drm/drm_hashtab.h>
+#include <drm/drm_vma_offset_man.h>
 #include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/wait.h>
 #include <linux/mutex.h>
 #include <linux/mm.h>
-#include <linux/rbtree.h>
 #include <linux/bitmap.h>
 
 struct ttm_bo_device;
@@ -254,13 +254,7 @@ struct ttm_buffer_object {
 	void *sync_obj;
 	unsigned long priv_flags;
 
-	/**
-	 * Members protected by the bdev::vm_lock
-	 */
-
-	struct rb_node vm_rb;
-	struct drm_mm_node *vm_node;
-
+	struct drm_vma_offset_node vma_offset;
 
 	/**
 	 * Special members that are protected by the reserve lock
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index e3a43a4..77afa32 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -541,8 +541,7 @@ struct ttm_bo_device {
 	/*
 	 * Protected by the vm lock.
 	 */
-	struct rb_root addr_space_rb;
-	struct drm_mm addr_space_mm;
+	struct drm_vma_offset_manager offset_manager;
 
 	/*
 	 * Protected by the global:lru lock.
-- 
1.8.0.2



More information about the dri-devel mailing list