[PATCH 16/21] drm/i915: Take object reference and trylock during eviction
Maarten Lankhorst
maarten.lankhorst at linux.intel.com
Mon Oct 4 07:30:02 UTC 2021
We want to add locking to i915_vma_unbind, but this requires a valid
object. Skip dead objects or busy objects from being evicted.
This unfortunately makes delayed destroyed objects unevictable,
but this will hopefully be fixed at a later date, when moving to ttm
style delayed destroy.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
---
drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 14 +++-
drivers/gpu/drm/i915/i915_gem_evict.c | 67 ++++++++++++++++++--
2 files changed, 73 insertions(+), 8 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 7f7849b6296d..71e855fcbd8c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -404,12 +404,22 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
list_for_each_entry_safe(vma, next,
&i915->ggtt.vm.bound_list, vm_link) {
unsigned long count = vma->node.size >> PAGE_SHIFT;
+ struct drm_i915_gem_object *obj;
if (!vma->iomap || i915_vma_is_active(vma))
continue;
- if (__i915_vma_unbind(vma) == 0)
- freed_pages += count;
+ obj = vma->obj;
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ continue;
+
+ if (i915_gem_object_trylock(obj)) {
+ if (__i915_vma_unbind(vma) == 0)
+ freed_pages += count;
+
+ i915_gem_object_unlock(obj);
+ }
+ i915_gem_object_put(obj);
}
mutex_unlock(&i915->ggtt.vm.mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 2b73ddb11c66..a73345b80efe 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -58,6 +58,14 @@ mark_free(struct drm_mm_scan *scan,
if (i915_vma_is_pinned(vma))
return false;
+ if (!kref_get_unless_zero(&vma->obj->base.refcount))
+ return false;
+
+ if (!i915_gem_object_trylock(vma->obj)) {
+ i915_vma_put(vma);
+ return false;
+ }
+
list_add(&vma->evict_link, unwind);
return drm_mm_scan_add_block(scan, &vma->node);
}
@@ -178,6 +186,8 @@ i915_gem_evict_something(struct i915_address_space *vm,
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
ret = drm_mm_scan_remove_block(&scan, &vma->node);
BUG_ON(ret);
+ i915_gem_object_unlock(vma->obj);
+ i915_gem_object_put(vma->obj);
}
/*
@@ -222,10 +232,13 @@ i915_gem_evict_something(struct i915_address_space *vm,
* of any of our objects, thus corrupting the list).
*/
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
- if (drm_mm_scan_remove_block(&scan, &vma->node))
+ if (drm_mm_scan_remove_block(&scan, &vma->node)) {
__i915_vma_pin(vma);
- else
+ } else {
list_del(&vma->evict_link);
+ i915_gem_object_unlock(vma->obj);
+ i915_gem_object_put(vma->obj);
+ }
}
/* Unbinding will emit any required flushes */
@@ -234,16 +247,28 @@ i915_gem_evict_something(struct i915_address_space *vm,
__i915_vma_unpin(vma);
if (ret == 0)
ret = __i915_vma_unbind(vma);
+
+ i915_gem_object_unlock(vma->obj);
+ i915_gem_object_put(vma->obj);
}
while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
vma = container_of(node, struct i915_vma, node);
+
/* If we find any non-objects (!vma), we cannot evict them */
- if (vma->node.color != I915_COLOR_UNEVICTABLE)
- ret = __i915_vma_unbind(vma);
- else
- ret = -ENOSPC; /* XXX search failed, try again? */
+ if (vma->node.color != I915_COLOR_UNEVICTABLE &&
+ !kref_get_unless_zero(&vma->obj->base.refcount)) {
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ if (i915_gem_object_trylock(obj)) {
+ ret = __i915_vma_unbind(vma);
+ i915_gem_object_unlock(obj);
+ } else
+ ret = -ENOSPC;
+ i915_gem_object_put(obj);
+ } else
+ ret = -ENOSPC;
}
return ret;
@@ -333,6 +358,17 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
break;
}
+ if (!kref_get_unless_zero(&vma->obj->base.refcount)) {
+ ret = -ENOSPC;
+ break;
+ }
+
+ if (!i915_gem_object_trylock(vma->obj)) {
+ ret = -ENOSPC;
+ i915_gem_object_put(vma->obj);
+ break;
+ }
+
/*
* Never show fear in the face of dragons!
*
@@ -347,9 +383,14 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
}
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
+
__i915_vma_unpin(vma);
if (ret == 0)
ret = __i915_vma_unbind(vma);
+
+ i915_gem_object_unlock(obj);
+ i915_gem_object_put(obj);
}
return ret;
@@ -393,6 +434,14 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
if (i915_vma_is_pinned(vma))
continue;
+ if (!kref_get_unless_zero(&vma->obj->base.refcount))
+ continue;
+
+ if (!i915_gem_object_trylock(vma->obj)) {
+ i915_gem_object_put(vma->obj);
+ continue;
+ }
+
__i915_vma_pin(vma);
list_add(&vma->evict_link, &eviction_list);
}
@@ -401,11 +450,17 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
ret = 0;
list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
+
__i915_vma_unpin(vma);
+
if (ret == 0)
ret = __i915_vma_unbind(vma);
if (ret != -EINTR) /* "Get me out of here!" */
ret = 0;
+
+ i915_gem_object_unlock(obj);
+ i915_gem_object_put(obj);
}
} while (ret == 0);
--
2.33.0
More information about the Intel-gfx-trybot
mailing list