[PATCH 2/4] drm/ttm: Add optional support for two-ended allocation

Lauri Kasanen cand at gmx.com
Mon Mar 31 05:28:18 PDT 2014


Allocating small bos from one end and large ones from the other helps
improve the quality of fragmentation.

This depends on "drm: Optionally create mm blocks from top-to-bottom" by
Chris Wilson.

Signed-off-by: Lauri Kasanen <cand at gmx.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c         |  4 +++-
 drivers/gpu/drm/ttm/ttm_bo_manager.c | 16 +++++++++++++---
 include/drm/ttm/ttm_bo_driver.h      |  7 ++++++-
 3 files changed, 22 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 9df79ac..caf7cd3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1453,7 +1453,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
 		       struct ttm_bo_driver *driver,
 		       struct address_space *mapping,
 		       uint64_t file_page_offset,
-		       bool need_dma32)
+		       bool need_dma32,
+		       uint32_t alloc_threshold)
 {
 	int ret = -EINVAL;
 
@@ -1476,6 +1477,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
 	bdev->dev_mapping = mapping;
 	bdev->glob = glob;
 	bdev->need_dma32 = need_dma32;
+	bdev->alloc_threshold = alloc_threshold;
 	bdev->val_seq = 0;
 	spin_lock_init(&bdev->fence_lock);
 	mutex_lock(&glob->device_list_mutex);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index c58eba33..db9fcb4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -55,6 +55,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
 	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
 	struct drm_mm *mm = &rman->mm;
 	struct drm_mm_node *node = NULL;
+	enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
 	unsigned long lpfn;
 	int ret;
 
@@ -65,12 +66,21 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
 	node = kzalloc(sizeof(*node), GFP_KERNEL);
 	if (!node)
 		return -ENOMEM;
+	/**
+	 * If the driver requested a threshold, use two-ended allocation.
+	 * Pinned buffers require bottom-up allocation.
+	 */
+	if (man->bdev->alloc_threshold &&
+		!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT) &&
+		man->bdev->alloc_threshold < (mem->num_pages * PAGE_SIZE))
+		aflags = DRM_MM_CREATE_TOP;
 
 	spin_lock(&rman->lock);
-	ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
-					  mem->page_alignment,
+	ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
+					  mem->page_alignment, 0,
 					  placement->fpfn, lpfn,
-					  DRM_MM_SEARCH_BEST);
+					  DRM_MM_SEARCH_BEST,
+					  aflags);
 	spin_unlock(&rman->lock);
 
 	if (unlikely(ret)) {
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 5d8aabe..f5fe6df 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -565,6 +565,7 @@ struct ttm_bo_device {
 	struct delayed_work wq;
 
 	bool need_dma32;
+	uint32_t alloc_threshold;
 };
 
 /**
@@ -751,6 +752,8 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
  * @file_page_offset: Offset into the device address space that is available
  * for buffer data. This ensures compatibility with other users of the
  * address space.
+ * @alloc_threshold: If non-zero, use this as the threshold for two-ended
+ * allocation.
  *
  * Initializes a struct ttm_bo_device:
  * Returns:
@@ -760,7 +763,9 @@ extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
 			      struct ttm_bo_global *glob,
 			      struct ttm_bo_driver *driver,
 			      struct address_space *mapping,
-			      uint64_t file_page_offset, bool need_dma32);
+			      uint64_t file_page_offset,
+			      bool need_dma32,
+			      uint32_t alloc_threshold);
 
 /**
  * ttm_bo_unmap_virtual
-- 
1.8.3.1



More information about the dri-devel mailing list