[PATCH 2/2] drm/i915/HAX: Allow dead vm to unbind vma's without lock.
Maarten Lankhorst
maarten.lankhorst at linux.intel.com
Thu Jan 27 16:03:03 UTC 2022
Signed-off-by: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
---
drivers/gpu/drm/i915/i915_vma.c | 16 ++++++++++++++--
1 file changed, 14 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index b959e904c4d3..28b2b021dade 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -40,6 +40,18 @@
#include "i915_vma.h"
#include "i915_vma_resource.h"
+static inline void assert_vma_held_evict(const struct i915_vma *vma)
+{
+ /*
+ * We may be forced to unbind when the vm is dead, to clean it up.
+ * This is the only exception to the requirement of the object lock
+ * being held.
+ */
+ if (IS_ENABLED(CONFIG_LOCKDEP) &&
+ (atomic_read(&vma->vm->open) || !lockdep_is_held(&vma->vm->mutex)) &&
+ kref_read(&vma->obj->base.refcount) > 0)
+ assert_object_held(vma->obj);
+}
static struct kmem_cache *slab_vmas;
static struct i915_vma *i915_vma_alloc(void)
@@ -1779,7 +1791,7 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
struct dma_fence *unbind_fence;
GEM_BUG_ON(i915_vma_is_pinned(vma));
- assert_object_held_shared(vma->obj);
+ assert_vma_held_evict(vma);
if (i915_vma_is_map_and_fenceable(vma)) {
/* Force a pagefault for domain tracking on next user access */
@@ -1846,7 +1858,7 @@ int __i915_vma_unbind(struct i915_vma *vma)
int ret;
lockdep_assert_held(&vma->vm->mutex);
- assert_object_held_shared(vma->obj);
+ assert_vma_held_evict(vma);
if (!drm_mm_node_allocated(&vma->node))
return 0;
--
2.34.1
More information about the Intel-gfx-trybot
mailing list