[PATCH 1/2] drm/ttm: Allow execbuf util reserves without ticket

Thomas Hellstrom thellstrom at vmware.com
Fri Nov 15 00:24:31 PST 2013


If no reservation ticket is given to the execbuf reservation utilities,
try reservation with non-blocking semantics.
This is intended for eviction paths that use the execbuf reservation
utilities for convenience rather than for deadlock avoidance.

Signed-off-by: Thomas Hellstrom <thellstrom at vmware.com>
---
 drivers/gpu/drm/ttm/ttm_execbuf_util.c |   32 +++++++++++++++++++-------------
 include/drm/ttm/ttm_execbuf_util.h     |    3 ++-
 2 files changed, 21 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 6c91178..479e941 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -32,8 +32,7 @@
 #include <linux/sched.h>
 #include <linux/module.h>
 
-static void ttm_eu_backoff_reservation_locked(struct list_head *list,
-					      struct ww_acquire_ctx *ticket)
+static void ttm_eu_backoff_reservation_locked(struct list_head *list)
 {
 	struct ttm_validate_buffer *entry;
 
@@ -93,8 +92,9 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
 	entry = list_first_entry(list, struct ttm_validate_buffer, head);
 	glob = entry->bo->glob;
 	spin_lock(&glob->lru_lock);
-	ttm_eu_backoff_reservation_locked(list, ticket);
-	ww_acquire_fini(ticket);
+	ttm_eu_backoff_reservation_locked(list);
+	if (ticket)
+		ww_acquire_fini(ticket);
 	spin_unlock(&glob->lru_lock);
 }
 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
@@ -130,7 +130,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
 	entry = list_first_entry(list, struct ttm_validate_buffer, head);
 	glob = entry->bo->glob;
 
-	ww_acquire_init(ticket, &reservation_ww_class);
+	if (ticket)
+		ww_acquire_init(ticket, &reservation_ww_class);
 retry:
 	list_for_each_entry(entry, list, head) {
 		struct ttm_buffer_object *bo = entry->bo;
@@ -139,16 +140,17 @@ retry:
 		if (entry->reserved)
 			continue;
 
-
-		ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket);
+		ret = ttm_bo_reserve_nolru(bo, true, (ticket == NULL), true,
+					   ticket);
 
 		if (ret == -EDEADLK) {
 			/* uh oh, we lost out, drop every reservation and try
 			 * to only reserve this buffer, then start over if
 			 * this succeeds.
 			 */
+			BUG_ON(ticket == NULL);
 			spin_lock(&glob->lru_lock);
-			ttm_eu_backoff_reservation_locked(list, ticket);
+			ttm_eu_backoff_reservation_locked(list);
 			spin_unlock(&glob->lru_lock);
 			ttm_eu_list_ref_sub(list);
 			ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
@@ -175,7 +177,8 @@ retry:
 		}
 	}
 
-	ww_acquire_done(ticket);
+	if (ticket)
+		ww_acquire_done(ticket);
 	spin_lock(&glob->lru_lock);
 	ttm_eu_del_from_lru_locked(list);
 	spin_unlock(&glob->lru_lock);
@@ -184,12 +187,14 @@ retry:
 
 err:
 	spin_lock(&glob->lru_lock);
-	ttm_eu_backoff_reservation_locked(list, ticket);
+	ttm_eu_backoff_reservation_locked(list);
 	spin_unlock(&glob->lru_lock);
 	ttm_eu_list_ref_sub(list);
 err_fini:
-	ww_acquire_done(ticket);
-	ww_acquire_fini(ticket);
+	if (ticket) {
+		ww_acquire_done(ticket);
+		ww_acquire_fini(ticket);
+	}
 	return ret;
 }
 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -224,7 +229,8 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
 	}
 	spin_unlock(&bdev->fence_lock);
 	spin_unlock(&glob->lru_lock);
-	ww_acquire_fini(ticket);
+	if (ticket)
+		ww_acquire_fini(ticket);
 
 	list_for_each_entry(entry, list, head) {
 		if (entry->old_sync_obj)
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index ec8a1d3..16db7d0 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -70,7 +70,8 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
 /**
  * function ttm_eu_reserve_buffers
  *
- * @ticket:  [out] ww_acquire_ctx returned by call.
+ * @ticket:  [out] ww_acquire_ctx filled in by call, or NULL if only
+ *           non-blocking reserves should be tried.
  * @list:    thread private list of ttm_validate_buffer structs.
  *
  * Tries to reserve bos pointed to by the list entries for validation.
-- 
1.7.10.4


More information about the dri-devel mailing list