[PATCH 4/6] drm/ttm: use ttm_bo_reserve_slowpath_nolru in ttm_eu_reserve_buffers

Maarten Lankhorst m.b.lankhorst at gmail.com
Fri Nov 30 04:12:58 PST 2012


This requires re-use of the seqno, which increases fairness slightly.
Instead of spinning with a new seqno every time we keep the current one,
but still drop all other reservations we hold. Only when we succeed,
we try to get back our other reservations again.

This should increase fairness slightly as well.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst at canonical.com>
---
 drivers/gpu/drm/ttm/ttm_execbuf_util.c | 14 ++++++++++++--
 1 file changed, 12 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index c7d3236..c02b2b6 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -129,13 +129,17 @@ int ttm_eu_reserve_buffers(struct list_head *list)
 	entry = list_first_entry(list, struct ttm_validate_buffer, head);
 	glob = entry->bo->glob;
 
-retry:
 	spin_lock(&glob->lru_lock);
 	val_seq = entry->bo->bdev->val_seq++;
 
+retry:
 	list_for_each_entry(entry, list, head) {
 		struct ttm_buffer_object *bo = entry->bo;
 
+		/* already slowpath reserved? */
+		if (entry->reserved)
+			continue;
+
 		ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
 		switch (ret) {
 		case 0:
@@ -157,9 +161,15 @@ retry:
 			ttm_eu_backoff_reservation_locked(list);
 			spin_unlock(&glob->lru_lock);
 			ttm_eu_list_ref_sub(list);
-			ret = ttm_bo_wait_unreserved(bo, true);
+			ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq);
 			if (unlikely(ret != 0))
 				return ret;
+			spin_lock(&glob->lru_lock);
+			entry->reserved = true;
+			if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
+				ret = -EBUSY;
+				goto err;
+			}
 			goto retry;
 		default:
 			goto err;
-- 
1.8.0



More information about the dri-devel mailing list