[PATCH 08/13] drm/radeon: add biggest hole tracking and wakequeue to the sa

Christian König deathsimple at vodafone.de
Thu Apr 19 15:39:15 PDT 2012


With that in place clients are automatically blocking
until their memory request can be handled.

Signed-off-by: Christian König <deathsimple at vodafone.de>
---
 drivers/gpu/drm/radeon/radeon.h      |    5 +-
 drivers/gpu/drm/radeon/radeon_ring.c |   18 ++--
 drivers/gpu/drm/radeon/radeon_sa.c   |  192 +++++++++++++++++++++++++---------
 3 files changed, 153 insertions(+), 62 deletions(-)

diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 1aefbd9..415a496 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -381,17 +381,16 @@ struct radeon_bo_list {
  * alignment).
  */
 struct radeon_sa_manager {
-	spinlock_t		lock;
+	wait_queue_head_t	queue;
 	struct radeon_bo	*bo;
 	struct list_head	sa_bo;
 	unsigned		size;
+	struct list_head	*biggest_hole;
 	uint64_t		gpu_addr;
 	void			*cpu_ptr;
 	uint32_t		domain;
 };
 
-struct radeon_sa_bo;
-
 /* sub-allocation buffer */
 struct radeon_sa_bo {
 	struct list_head		list;
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 1d9bce9..5942769 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -205,10 +205,16 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
 
 int radeon_ib_pool_init(struct radeon_device *rdev)
 {
-	struct radeon_sa_manager tmp;
 	int i, r;
 
-	r = radeon_sa_bo_manager_init(rdev, &tmp,
+	radeon_mutex_lock(&rdev->ib_pool.mutex);
+	if (rdev->ib_pool.ready) {
+		return 0;
+	}
+	rdev->ib_pool.ready = true;
+	radeon_mutex_unlock(&rdev->ib_pool.mutex);
+
+	r = radeon_sa_bo_manager_init(rdev, &rdev->ib_pool.sa_manager,
 				      RADEON_IB_POOL_SIZE*64*1024,
 				      RADEON_GEM_DOMAIN_GTT);
 	if (r) {
@@ -216,14 +222,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
 	}
 
 	radeon_mutex_lock(&rdev->ib_pool.mutex);
-	if (rdev->ib_pool.ready) {
-		radeon_mutex_unlock(&rdev->ib_pool.mutex);
-		radeon_sa_bo_manager_fini(rdev, &tmp);
-		return 0;
-	}
-
-	rdev->ib_pool.sa_manager = tmp;
-	INIT_LIST_HEAD(&rdev->ib_pool.sa_manager.sa_bo);
 	for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
 		rdev->ib_pool.ibs[i].fence = NULL;
 		rdev->ib_pool.ibs[i].idx = i;
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index 013a787..72ebb77 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -26,6 +26,7 @@
 /*
  * Authors:
  *    Jerome Glisse <glisse at freedesktop.org>
+ *    Christian König <christian.koenig at amd.com>
  */
 #include "drmP.h"
 #include "drm.h"
@@ -37,9 +38,10 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
 {
 	int r;
 
-	spin_lock_init(&sa_manager->lock);
+	init_waitqueue_head(&sa_manager->queue);
 	sa_manager->bo = NULL;
 	sa_manager->size = size;
+	sa_manager->biggest_hole = &sa_manager->sa_bo;
 	sa_manager->domain = domain;
 	INIT_LIST_HEAD(&sa_manager->sa_bo);
 
@@ -58,6 +60,7 @@ void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
 {
 	struct radeon_sa_bo *sa_bo, *tmp;
 
+	wake_up_all(&sa_manager->queue);
 	if (!list_empty(&sa_manager->sa_bo)) {
 		dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
 	}
@@ -129,81 +132,172 @@ int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
  *
  * Alignment can't be bigger than page size
  */
+
+static inline unsigned radeon_sa_bo_hole_start(struct radeon_sa_manager *m,
+					       struct list_head *entry)
+{
+	struct radeon_sa_bo *sa_bo;
+
+	if (entry == &m->sa_bo)
+		return 0;
+
+	sa_bo = list_entry(entry, struct radeon_sa_bo, list);
+	return sa_bo->offset + sa_bo->size;
+}
+
+static inline unsigned radeon_sa_bo_hole_end(struct radeon_sa_manager *m,
+					     struct list_head *entry)
+{
+	if (entry->next == &m->sa_bo)
+		return m->size;
+
+	return list_entry(entry->next, struct radeon_sa_bo, list)->offset;
+}
+
+static inline unsigned radeon_sa_bo_hole_size(struct radeon_sa_manager *m,
+					      struct list_head *entry,
+					      unsigned align)
+{
+	unsigned start, end, wasted;
+	start = radeon_sa_bo_hole_start(m, m->biggest_hole);
+	wasted = start % align;
+	if (wasted)
+		start += align - wasted;
+
+	end = radeon_sa_bo_hole_end(m, m->biggest_hole);
+	return start < end ? end - start : 0;
+}
+
 int radeon_sa_bo_new(struct radeon_device *rdev,
 		     struct radeon_sa_manager *sa_manager,
 		     struct radeon_sa_bo *sa_bo,
 		     unsigned size, unsigned align)
 {
-	struct radeon_sa_bo *tmp;
-	struct list_head *head;
-	unsigned offset = 0, wasted = 0;
-	unsigned long flags;
+	struct list_head *head, *curr, *hole;
+	unsigned start, currsize, wasted, holesize = 0;
+	int r;
 
 	BUG_ON(align > RADEON_GPU_PAGE_SIZE);
 	BUG_ON(size > sa_manager->size);
-	spin_lock_irqsave(&sa_manager->lock, flags);
 
-	/* no one ? */
-	if (list_empty(&sa_manager->sa_bo)) {
-		head = &sa_manager->sa_bo;
-		goto out;
+	spin_lock_irq(&sa_manager->queue.lock);
+
+	r = wait_event_interruptible_locked_irq(sa_manager->queue,
+		radeon_sa_bo_hole_size(sa_manager, sa_manager->biggest_hole, align) >= size
+	);
+	if (r) {
+		spin_unlock(&sa_manager->queue.lock);
+		return r;
 	}
 
-	/* look for a hole big enough */
-	list_for_each_entry(tmp, &sa_manager->sa_bo, list) {
-		/* room before this object ? */
-		if (offset < tmp->offset && (tmp->offset - offset) >= size) {
-			head = tmp->list.prev;
-			goto out;
-		}
-		offset = tmp->offset + tmp->size;
-		wasted = offset % align;
+	curr = head = hole = &sa_manager->sa_bo;
+	do {
+		start = radeon_sa_bo_hole_start(sa_manager, curr);
+		currsize = radeon_sa_bo_hole_end(sa_manager, curr) - start;
+
+		wasted = start % align;
 		if (wasted) {
-			offset += align - wasted;
+			wasted = align - wasted;
+			start += wasted;
+		}
+
+		/* room after current big enough ? */
+		if (currsize >= (size + wasted)) {
+			sa_bo->manager = sa_manager;
+			sa_bo->offset = start;
+			sa_bo->size = size;
+			list_add(&sa_bo->list, curr);
+			
+			/* did we borrowed from the biggest hole ? */
+			if (curr == sa_manager->biggest_hole) {
+
+				/* consider the space left after the newly added sa_bo */
+				curr = curr->next;
+				currsize -= size;
+				if (holesize < currsize) {
+					hole = curr;
+					holesize = currsize;
+				}
+				curr = curr->next;
+
+				while (curr != head) {
+					currsize = radeon_sa_bo_hole_end(sa_manager, curr);
+					currsize -= radeon_sa_bo_hole_start(sa_manager, curr);
+					if (holesize < currsize) {
+						hole = curr;
+						holesize = currsize;
+					}
+					curr = curr->next;
+				}
+				sa_manager->biggest_hole = hole;
+				wake_up_locked(&sa_manager->queue);
+			}
+
+			spin_unlock_irq(&sa_manager->queue.lock);
+			return 0;
+		}
+
+		if (holesize < currsize) {
+			hole = curr;
+			holesize = currsize;
 		}
-	}
-	/* room at the end ? */
-	head = sa_manager->sa_bo.prev;
-	tmp = list_entry(head, struct radeon_sa_bo, list);
-	offset = tmp->offset + tmp->size;
-	wasted = offset % align;
-	if (wasted) {
-		offset += wasted = align - wasted;
-	}
-	if ((sa_manager->size - offset) < size) {
-		/* failed to find somethings big enough */
-		spin_unlock_irqrestore(&sa_manager->lock, flags);
-		return -ENOMEM;
-	}
 
-out:
-	sa_bo->manager = sa_manager;
-	sa_bo->offset = offset;
-	sa_bo->size = size;
-	list_add(&sa_bo->list, head);
-	spin_unlock_irqrestore(&sa_manager->lock, flags);
-	return 0;
+		curr = curr->next;
+	} while (curr != head);
+
+	/* failed to find somethings big enough */
+	spin_unlock_irq(&sa_manager->queue.lock);
+	return -ENOMEM;
 }
 
 void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo *sa_bo)
 {
+	struct radeon_sa_manager *sa_manager = sa_bo->manager;
 	unsigned long flags;
-	spin_lock_irqsave(&sa_bo->manager->lock, flags);
+
+	spin_lock_irqsave(&sa_manager->queue.lock, flags);
+	if (&sa_bo->list == sa_manager->biggest_hole ||
+	    sa_bo->list.prev == sa_manager->biggest_hole) {
+
+		sa_manager->biggest_hole = sa_bo->list.prev;
+		wake_up_locked(&sa_manager->queue);
+	}
 	list_del_init(&sa_bo->list);
-	spin_unlock_irqrestore(&sa_bo->manager->lock, flags);
+	spin_unlock_irqrestore(&sa_manager->queue.lock, flags);
 }
 
 #if defined(CONFIG_DEBUG_FS)
 void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
 				  struct seq_file *m)
 {
-	struct radeon_sa_bo *i;
+	struct list_head *head, *curr;
+	struct radeon_sa_bo *sa_bo;
+	unsigned start, end;
 	unsigned long flags;
 
-	spin_lock_irqsave(&sa_manager->lock, flags);
-	list_for_each_entry(i, &sa_manager->sa_bo, list) {
-		seq_printf(m, "offset %08d: size %4d\n", i->offset, i->size);
-	}
-	spin_unlock_irqrestore(&sa_manager->lock, flags);
+	spin_lock_irqsave(&sa_manager->queue.lock, flags);
+	curr = head = &sa_manager->sa_bo;
+	do {
+		if (curr != &sa_manager->sa_bo) {
+			sa_bo = list_entry(curr, struct radeon_sa_bo, list);
+			seq_printf(m, "reservation  %p %08d: size %7d\n",
+				   curr, sa_bo->offset, sa_bo->size);
+		}
+
+		start = radeon_sa_bo_hole_start(sa_manager, curr);
+		end = radeon_sa_bo_hole_end(sa_manager, curr);
+		if (start < end) {
+			seq_printf(m, "hole         %p %08d: size %7d\n",
+				   curr, start, end - start);
+		}
+		curr = curr->next;
+	} while (curr != head);
+
+	start = radeon_sa_bo_hole_start(sa_manager, sa_manager->biggest_hole);
+	end = radeon_sa_bo_hole_end(sa_manager, sa_manager->biggest_hole);
+	seq_printf(m, "\nbiggest hole %p %08d: size %7d\n",
+		   sa_manager->biggest_hole, start, end - start);
+
+	spin_unlock_irqrestore(&sa_manager->queue.lock, flags);
 }
 #endif
-- 
1.7.5.4



More information about the dri-devel mailing list