[PATCH 09/13] drm/radeon: improve sa allocator v2

j.glisse at gmail.com j.glisse at gmail.com
Tue May 1 21:00:54 PDT 2012


From: Jerome Glisse <jglisse at redhat.com>

The sa allocator is suppose to be a ring allocator, ie allocation
happen first at the end and if there is no more room we start at
the begining again. This patch make the code match this design.
sa_manager keep track of the start & end hole, it first try to
allocate in the end hole, if it fails it allocate in the begining
hole, if it fails it returns (caller is expected to retry).

When freeing we need to make sure that we properly grow the end
hole and start hole. We take advantage of the fact that the sa_bo
list is ordered by offset. That means that when we free an sa_bo
the previous sa_bo in list is also the sa_bo just before the
sa_bo we are freeing and reversly for the next.

v2: Use read ptr metaphore to mimic ring behavior and simplify
    code a bit.

Signed-off-by: Jerome Glisse <jglisse at redhat.com>
---
 drivers/gpu/drm/radeon/radeon.h           |    4 +-
 drivers/gpu/drm/radeon/radeon_cs.c        |    4 +-
 drivers/gpu/drm/radeon/radeon_gart.c      |    6 +-
 drivers/gpu/drm/radeon/radeon_object.h    |   11 +++
 drivers/gpu/drm/radeon/radeon_ring.c      |    6 +-
 drivers/gpu/drm/radeon/radeon_sa.c        |  128 +++++++++++++++++++----------
 drivers/gpu/drm/radeon/radeon_semaphore.c |    4 +-
 7 files changed, 107 insertions(+), 56 deletions(-)

diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 1aefbd9..dc4f4f3 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -385,6 +385,7 @@ struct radeon_sa_manager {
 	struct radeon_bo	*bo;
 	struct list_head	sa_bo;
 	unsigned		size;
+	struct radeon_sa_bo	*last;
 	uint64_t		gpu_addr;
 	void			*cpu_ptr;
 	uint32_t		domain;
@@ -396,7 +397,8 @@ struct radeon_sa_bo;
 struct radeon_sa_bo {
 	struct list_head		list;
 	struct radeon_sa_manager	*manager;
-	unsigned			offset;
+	unsigned			soffset;
+	unsigned			eoffset;
 	unsigned			size;
 };
 
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 5cac832..8de6b3a 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -476,7 +476,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
 		/* ib pool is bind at 0 in virtual address space to gpu_addr is the
 		 * offset inside the pool bo
 		 */
-		parser->const_ib->gpu_addr = parser->const_ib->sa_bo.offset;
+		parser->const_ib->gpu_addr = parser->const_ib->sa_bo.soffset;
 		r = radeon_ib_schedule(rdev, parser->const_ib);
 		if (r)
 			goto out;
@@ -486,7 +486,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
 	/* ib pool is bind at 0 in virtual address space to gpu_addr is the
 	 * offset inside the pool bo
 	 */
-	parser->ib->gpu_addr = parser->ib->sa_bo.offset;
+	parser->ib->gpu_addr = parser->ib->sa_bo.soffset;
 	parser->ib->is_const_ib = false;
 	r = radeon_ib_schedule(rdev, parser->ib);
 out:
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index c58a036..4a5d9d4 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -404,10 +404,8 @@ retry:
 		radeon_vm_unbind(rdev, vm_evict);
 		goto retry;
 	}
-	vm->pt = rdev->vm_manager.sa_manager.cpu_ptr;
-	vm->pt += (vm->sa_bo.offset >> 3);
-	vm->pt_gpu_addr = rdev->vm_manager.sa_manager.gpu_addr;
-	vm->pt_gpu_addr += vm->sa_bo.offset;
+	vm->pt = radeon_sa_bo_cpu_addr(&vm->sa_bo);
+	vm->pt_gpu_addr = radeon_sa_bo_gpu_addr(&vm->sa_bo);
 	memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8));
 
 retry_id:
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index d9b9333..99ab46a 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -146,6 +146,17 @@ extern struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo,
 /*
  * sub allocation
  */
+
+static inline uint64_t radeon_sa_bo_gpu_addr(struct radeon_sa_bo *sa_bo)
+{
+	return sa_bo->manager->gpu_addr + sa_bo->soffset;
+}
+
+static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
+{
+	return sa_bo->manager->cpu_ptr + sa_bo->soffset;
+}
+
 extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
 				     struct radeon_sa_manager *sa_manager,
 				     unsigned size, u32 domain);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 1d9bce9..981ab95 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -127,10 +127,8 @@ retry:
 					     size, 256);
 			if (!r) {
 				*ib = &rdev->ib_pool.ibs[idx];
-				(*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;
-				(*ib)->ptr += ((*ib)->sa_bo.offset >> 2);
-				(*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
-				(*ib)->gpu_addr += (*ib)->sa_bo.offset;
+				(*ib)->ptr = radeon_sa_bo_cpu_addr(&(*ib)->sa_bo);
+				(*ib)->gpu_addr = radeon_sa_bo_gpu_addr(&(*ib)->sa_bo);
 				(*ib)->fence = fence;
 				(*ib)->vm_id = 0;
 				(*ib)->is_const_ib = false;
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index 1e1bec1..63b0cd2 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -41,6 +41,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
 	sa_manager->bo = NULL;
 	sa_manager->size = size;
 	sa_manager->domain = domain;
+	sa_manager->last = NULL;
 	INIT_LIST_HEAD(&sa_manager->sa_bo);
 
 	r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
@@ -64,7 +65,9 @@ void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
 	list_for_each_entry_safe(sa_bo, tmp, &sa_manager->sa_bo, list) {
 		list_del_init(&sa_bo->list);
 	}
-	radeon_bo_unref(&sa_manager->bo);
+	if (sa_manager->bo) {
+		radeon_bo_unref(&sa_manager->bo);
+	}
 	sa_manager->size = 0;
 }
 
@@ -114,18 +117,37 @@ int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
 	return r;
 }
 
+static void radeon_sa_bo_free_locked(struct radeon_device *rdev, struct radeon_sa_bo *sa_bo)
+{
+	struct radeon_sa_manager *sa_manager = sa_bo->manager;
+	struct list_head *prev;
+
+	prev = sa_bo->list.prev;
+	list_del_init(&sa_bo->list);
+	if (list_empty(&sa_manager->sa_bo)) {
+		/* this bo was alone in the list */
+		sa_manager->last = NULL;
+	} else if (sa_manager->last == sa_bo) {
+		if (prev == &sa_manager->sa_bo) {
+			/* sa_bo is begining of list, the new last became
+			 * the last of the list
+			 */
+			sa_manager->last = list_entry(sa_manager->sa_bo.prev, struct radeon_sa_bo, list);
+		} else {
+			/* prev became the new last */
+			sa_manager->last = list_entry(prev, struct radeon_sa_bo, list);
+		}
+	}
+}
+
 /*
  * Principe is simple, we keep a list of sub allocation in offset
  * order (first entry has offset == 0, last entry has the highest
  * offset).
  *
- * When allocating new object we first check if there is room at
- * the end total_size - (last_object_offset + last_object_size) >=
- * alloc_size. If so we allocate new object there.
- *
- * When there is not enough room at the end, we start waiting for
- * each sub object until we reach object_offset+object_size >=
- * alloc_size, this object then become the sub object we return.
+ * The last ptr serve as equivalent to read position in cp ring.
+ * last->prev is the previous last, while last->next is the oldest
+ * sa_bo allocated.
  *
  * Alignment can't be bigger than page size
  */
@@ -134,52 +156,65 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
 		     struct radeon_sa_bo *sa_bo,
 		     unsigned size, unsigned align)
 {
-	struct radeon_sa_bo *tmp;
-	struct list_head *head;
-	unsigned offset = 0, wasted = 0;
+	struct radeon_sa_bo *next, *oldest;
+	unsigned offset, wasted, hole_offset, hole_size;
+	bool try_begining = false, add_begining = false;
 
 	BUG_ON(align > RADEON_GPU_PAGE_SIZE);
 	BUG_ON(size > sa_manager->size);
-	spin_lock(&sa_manager->lock);
 
-	/* no one ? */
-	if (list_empty(&sa_manager->sa_bo)) {
-		head = &sa_manager->sa_bo;
+	sa_bo->manager = sa_manager;
+	sa_bo->soffset = 0;
+	sa_bo->eoffset = 0;
+	sa_bo->size = 0;
+	INIT_LIST_HEAD(&sa_bo->list);
+
+	spin_lock(&sa_manager->lock);
+	if (sa_manager->last == NULL) {
+		offset = 0;
+		add_begining = true;
 		goto out;
 	}
 
-	/* look for a hole big enough */
-	list_for_each_entry(tmp, &sa_manager->sa_bo, list) {
-		/* room before this object ? */
-		if (offset < tmp->offset && (tmp->offset - offset) >= size) {
-			head = tmp->list.prev;
+	hole_offset = sa_manager->last->eoffset;
+	wasted = (align - (hole_offset % align)) % align;
+	if (sa_manager->last->list.next == &sa_manager->sa_bo) {
+		/* no sa bo after that one */
+		hole_size = sa_manager->size - hole_offset;
+		try_begining = true;
+		oldest = list_entry(sa_manager->sa_bo.next, struct radeon_sa_bo, list);
+	} else {
+		next = list_entry(sa_manager->last->list.next, struct radeon_sa_bo, list);
+		hole_size = next->soffset - hole_offset;
+	}
+	if ((size + wasted) >= hole_size) {
+		offset = hole_offset + wasted;
+		goto out;
+	} else if (try_begining) {
+		/* last was at end of list, so if we wrap over we might find
+		 * room at the begining of the list
+		 */
+		offset = 0;
+		hole_size = oldest->soffset;
+		if (size >= hole_size) {
+			add_begining = true;
 			goto out;
 		}
-		offset = tmp->offset + tmp->size;
-		wasted = offset % align;
-		if (wasted) {
-			offset += align - wasted;
-		}
-	}
-	/* room at the end ? */
-	head = sa_manager->sa_bo.prev;
-	tmp = list_entry(head, struct radeon_sa_bo, list);
-	offset = tmp->offset + tmp->size;
-	wasted = offset % align;
-	if (wasted) {
-		offset += wasted = align - wasted;
-	}
-	if ((sa_manager->size - offset) < size) {
-		/* failed to find somethings big enough */
-		spin_unlock(&sa_manager->lock);
-		return -ENOMEM;
 	}
 
+	spin_unlock(&sa_manager->lock);
+	return -ENOMEM;
+
 out:
-	sa_bo->manager = sa_manager;
-	sa_bo->offset = offset;
+	if (add_begining) {
+		list_add(&sa_bo->list, &sa_manager->sa_bo);
+	} else {
+		list_add(&sa_bo->list, &sa_manager->last->list);
+	}
+	sa_manager->last = sa_bo;
+	sa_bo->soffset = offset;
+	sa_bo->eoffset = offset + size;
 	sa_bo->size = size;
-	list_add(&sa_bo->list, head);
 	spin_unlock(&sa_manager->lock);
 	return 0;
 }
@@ -189,7 +224,13 @@ void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo *sa_bo)
 	struct radeon_sa_manager *sa_manager = sa_bo->manager;
 
 	spin_lock(&sa_manager->lock);
-	list_del_init(&sa_bo->list);
+	if (list_empty(&sa_bo->list)) {
+		/* it has already been free */
+		goto out;
+	}
+	radeon_sa_bo_free_locked(rdev, sa_bo);
+
+out:
 	spin_unlock(&sa_manager->lock);
 }
 
@@ -201,7 +242,8 @@ void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
 
 	spin_lock(&sa_manager->lock);
 	list_for_each_entry(i, &sa_manager->sa_bo, list) {
-		seq_printf(m, "offset %08d: size %4d\n", i->offset, i->size);
+		seq_printf(m, "[0x%08x 0x%08x]/0x%08x size %d\n", i->soffset,
+			   i->eoffset, sa_manager->size, i->size);
 	}
 	spin_unlock(&sa_manager->lock);
 }
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 61dd4e3..c3763e4 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -55,9 +55,9 @@ static int radeon_semaphore_add_bo(struct radeon_device *rdev)
 		return r;
 	}
 	gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
-	gpu_addr += bo->ib->sa_bo.offset;
+	gpu_addr += bo->ib->sa_bo.soffset;
 	cpu_ptr = rdev->ib_pool.sa_manager.cpu_ptr;
-	cpu_ptr += (bo->ib->sa_bo.offset >> 2);
+	cpu_ptr += (bo->ib->sa_bo.soffset >> 2);
 	for (i = 0; i < (RADEON_SEMAPHORE_BO_SIZE/8); i++) {
 		bo->semaphores[i].gpu_addr = gpu_addr;
 		bo->semaphores[i].cpu_ptr = cpu_ptr;
-- 
1.7.7.6



More information about the dri-devel mailing list