[RFC PATCH 154/162] drm/i915: Support ww eviction

Matthew Auld matthew.auld at intel.com
Fri Nov 27 12:07:10 UTC 2020


From: Thomas Hellström <thomas.hellstrom at intel.com>

Use sleeping ww locks if we're in a ww transaction.
Trylock otherwise.
We unlock the evicted objects either when eviction failed or
when we've reached the target. The ww ticket locks will then
ensure we will eventually succeed reaching the target if there
is evictable space available. However another process may still
steal the evicted memory before we have a chance to allocate it.
To ensure we eventually succeed we need to move the evict unlock
until after get pages succeeds. That's considered a TODO for now.

Signed-off-by: Thomas Hellström <thomas.hellstrom at intel.com>
Cc: Matthew Auld <matthew.auld at intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_region.c |  7 ++-
 drivers/gpu/drm/i915/intel_memory_region.c | 57 ++++++++++++++++------
 drivers/gpu/drm/i915/intel_memory_region.h |  2 +
 3 files changed, 49 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index 1ec6528498c8..8ec59fbaa3e6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -204,6 +204,7 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 	struct scatterlist *sg;
 	unsigned int sg_page_sizes;
 	int ret;
+	struct i915_gem_ww_ctx *ww = i915_gem_get_locking_ctx(obj);
 
 	/* XXX: Check if we have any post. This is nasty hack, see gem_create */
 	if (obj->mm.gem_create_posted_err)
@@ -222,7 +223,8 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 	if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
 		flags |= I915_ALLOC_CONTIGUOUS;
 
-	ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
+	ret = __intel_memory_region_get_pages_buddy(mem, ww, size, flags,
+						    blocks);
 	if (ret)
 		goto err_free_sg;
 
@@ -277,7 +279,8 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 		if (ret) {
 			/* swapin failed, free the pages */
 			__intel_memory_region_put_pages_buddy(mem, blocks);
-			ret = -ENXIO;
+			if (ret != -EDEADLK && ret != -EINTR)
+				ret = -ENXIO;
 			goto err_free_sg;
 		}
 	} else if (obj->flags & I915_BO_ALLOC_CPU_CLEAR) {
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index 57f01ef16628..6b26b6cd5958 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -96,6 +96,7 @@ __intel_memory_region_put_block_buddy(struct i915_buddy_block *block)
 }
 
 static int intel_memory_region_evict(struct intel_memory_region *mem,
+				     struct i915_gem_ww_ctx *ww,
 				     resource_size_t target)
 {
 	struct drm_i915_private *i915 = mem->i915;
@@ -109,6 +110,7 @@ static int intel_memory_region_evict(struct intel_memory_region *mem,
 	struct list_head **phase;
 	resource_size_t found;
 	int pass;
+	int err = 0;
 
 	intel_gt_retire_requests(&i915->gt);
 
@@ -126,10 +128,11 @@ static int intel_memory_region_evict(struct intel_memory_region *mem,
 						mm.region_link))) {
 		list_move_tail(&obj->mm.region_link, &still_in_list);
 
-		if (!i915_gem_object_has_pages(obj))
+		if (i915_gem_object_is_framebuffer(obj))
 			continue;
 
-		if (i915_gem_object_is_framebuffer(obj))
+		/* Already locked this object? */
+		if (ww && ww == i915_gem_get_locking_ctx(obj))
 			continue;
 
 		/*
@@ -147,34 +150,51 @@ static int intel_memory_region_evict(struct intel_memory_region *mem,
 
 		mutex_unlock(&mem->objects.lock);
 
+		if (ww) {
+			err = i915_gem_object_lock_to_evict(obj, ww);
+			if (err)
+				goto put;
+		} else {
+			if (!i915_gem_object_trylock(obj))
+				goto put;
+		}
+
+		if (!i915_gem_object_has_pages(obj))
+			goto unlock;
+
 		/* tell callee to do swapping */
 		if (i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM)
 		    && pass == 1)
 			obj->do_swapping = true;
 
 		if (!i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE)) {
-			if (i915_gem_object_trylock(obj)) {
-				__i915_gem_object_put_pages(obj);
-				/* May arrive from get_pages on another bo */
-				if (!i915_gem_object_has_pages(obj)) {
-					found += obj->base.size;
-					if (obj->mm.madv == I915_MADV_DONTNEED)
-						obj->mm.madv = __I915_MADV_PURGED;
-				}
-				i915_gem_object_unlock(obj);
+			__i915_gem_object_put_pages(obj);
+			/* May arrive from get_pages on another bo */
+
+			if (!i915_gem_object_has_pages(obj)) {
+				found += obj->base.size;
+				if (obj->mm.madv == I915_MADV_DONTNEED)
+					obj->mm.madv = __I915_MADV_PURGED;
 			}
 		}
 
 		obj->do_swapping = false;
+unlock:
+		if (!ww)
+			i915_gem_object_unlock(obj);
+put:
 		i915_gem_object_put(obj);
 		mutex_lock(&mem->objects.lock);
 
-		if (found >= target)
+		if (err == -EDEADLK || err == -EINTR || found >= target)
 			break;
 	}
 	list_splice_tail(&still_in_list, *phase);
 	mutex_unlock(&mem->objects.lock);
 
+	if (err == -EDEADLK || err == -EINTR)
+		return err;
+
 	if (found < target && i915->params.enable_eviction) {
 		pass++;
 		phase++;
@@ -182,11 +202,15 @@ static int intel_memory_region_evict(struct intel_memory_region *mem,
 			goto next;
 	}
 
+	if (ww)
+		i915_gem_ww_ctx_unlock_evictions(ww);
+
 	return (found < target) ? -ENOSPC : 0;
 }
 
 int
 __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
+				      struct i915_gem_ww_ctx *ww,
 				      resource_size_t size,
 				      unsigned int flags,
 				      struct list_head *blocks)
@@ -194,6 +218,7 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
 	unsigned int min_order = 0;
 	unsigned int max_order;
 	unsigned long n_pages;
+	int err;
 
 	GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size));
 	GEM_BUG_ON(!list_empty(blocks));
@@ -241,12 +266,11 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
 
 			if (order-- == min_order) {
 				resource_size_t target;
-				int err;
 
 				target = n_pages * mem->mm.chunk_size;
 
 				mutex_unlock(&mem->mm_lock);
-				err = intel_memory_region_evict(mem,
+				err = intel_memory_region_evict(mem, ww,
 								target);
 				mutex_lock(&mem->mm_lock);
 				if (err)
@@ -272,6 +296,9 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
 err_free_blocks:
 	intel_memory_region_free_pages(mem, blocks);
 	mutex_unlock(&mem->mm_lock);
+	if (err == -EDEADLK || err == -EINTR)
+		return err;
+
 	return -ENXIO;
 }
 
@@ -284,7 +311,7 @@ __intel_memory_region_get_block_buddy(struct intel_memory_region *mem,
 	LIST_HEAD(blocks);
 	int ret;
 
-	ret = __intel_memory_region_get_pages_buddy(mem, size, flags, &blocks);
+	ret = __intel_memory_region_get_pages_buddy(mem, NULL, size, flags, &blocks);
 	if (ret)
 		return ERR_PTR(ret);
 
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 0bfc1fa36f74..ff1d97667618 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -16,6 +16,7 @@
 
 #include "i915_buddy.h"
 
+struct i915_gem_ww_ctx;
 struct drm_i915_private;
 struct drm_i915_gem_object;
 struct intel_memory_region;
@@ -116,6 +117,7 @@ int intel_memory_region_init_buddy(struct intel_memory_region *mem);
 void intel_memory_region_release_buddy(struct intel_memory_region *mem);
 
 int __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
+					  struct i915_gem_ww_ctx *ww,
 					  resource_size_t size,
 					  unsigned int flags,
 					  struct list_head *blocks);
-- 
2.26.2



More information about the dri-devel mailing list