[Intel-gfx] [PATCH 2/2] RFC: drm/i915: Switch obj->mm.lock lockdep annotations on its head
Daniel Vetter
daniel.vetter at ffwll.ch
Wed Aug 14 12:49:33 UTC 2019
The trouble with having a plain nesting flag for locks which do not
naturally nest (unlike block devices and their partitions, which is
the original motivation for nesting levels) is that lockdep will
never spot a true deadlock if you screw up.
This patch is an attempt at trying better, by highlighting a bit more
the actual nature of the nesting that's going on. Essentially we have
two kinds of objects:
- objects without pages allocated, which cannot be on any lru and are
hence inaccessible to the shrinker.
- objects which have pages allocated, which are on an lru, and which
the shrinker can decide to throw out.
For the former type of object, memory allcoations while holding
obj->mm.lock are permissible. For the latter they are not. And
get/put_pages transitions between the two types of objects.
This is still not entirely fool-proof since the rules might chance.
But as long as we run such a code ever at runtime lockdep should be
able to observe the inconsistency and complain (like with any other
lockdep class that we've split up in multiple classes). But there are
a few clear benefits:
- We can drop the nesting flag parameter from
__i915_gem_object_put_pages, because that function by definition is
never going allocate memory, and calling it on an object which
doesn't have its pages allocated would be a bug.
- We strictly catch more bugs, since there's not only one place in the
entire tree which is annotated with the special class. All the
other places that had explicit lockdep nesting annotations we're now
going to leave up to lockdep again.
- Specifically this catches stuff like calling get_pages from
put_pages (which isn't really a good idea, if we can call put_pages
so could the shrinker). I've seen patches do exactly that.
Of course I fully expect CI will show me for the fool I am with this
one here :-)
Cc: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter at intel.com>
---
drivers/gpu/drm/i915/gem/i915_gem_object.c | 2 +-
drivers/gpu/drm/i915/gem/i915_gem_object.h | 16 +++++++++++++---
drivers/gpu/drm/i915/gem/i915_gem_object_types.h | 10 +++++++++-
drivers/gpu/drm/i915/gem/i915_gem_pages.c | 7 +++----
drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 2 +-
drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 2 +-
drivers/gpu/drm/i915/gem/selftests/huge_pages.c | 12 ++++++------
7 files changed, 34 insertions(+), 17 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 3929c3a6b281..a1a835539e09 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -191,7 +191,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
GEM_BUG_ON(!list_empty(&obj->lut_list));
atomic_set(&obj->mm.pages_pin_count, 0);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
GEM_BUG_ON(i915_gem_object_has_pages(obj));
bitmap_free(obj->bit_17);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 3714cf234d64..3bde9a601eca 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -281,11 +281,21 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
I915_MM_NORMAL = 0,
- I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
+ /*
+ * Only used by struct_mutex, when called "recursively" from
+ * direct-reclaim-esque. Safe because there is only every one
+ * struct_mutex in the entire system. */
+ I915_MM_SHRINKER,
+ /*
+ * Used for obj->mm.lock when allocating pages. Safe because the object
+ * isn't yet on any LRU, and therefore the shrinker can't deadlock on
+ * it. As soon as the object has pages, obj->mm.lock nests within
+ * fs_reclaim.
+ */
+ I915_MM_GET_PAGES
};
-int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
- enum i915_mm_subclass subclass);
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index d474c6ac4100..1ea3c3c96a5a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -157,7 +157,15 @@ struct drm_i915_gem_object {
unsigned int pin_global;
struct {
- struct mutex lock; /* protects the pages and their use */
+ /*
+ * Protects the pages and their use.
+ *
+ * IMPORTANT: It is not allowed to allocate memory while holding
+ * this lock, because the shrinker might recurse on it, except
+ * when there are no pages allocated yet and the object isn't
+ * visible on any LRU.
+ */
+ struct mutex lock;
atomic_t pages_pin_count;
struct sg_table *pages;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 18f0ce0135c1..3b7ec6e6ea8b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -101,7 +101,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
int err;
- err = mutex_lock_interruptible(&obj->mm.lock);
+ err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
if (err)
return err;
@@ -179,8 +179,7 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
return pages;
}
-int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
- enum i915_mm_subclass subclass)
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
struct sg_table *pages;
int err;
@@ -191,7 +190,7 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
GEM_BUG_ON(atomic_read(&obj->bind_count));
/* May be called by shrinker from within get_pages() (on another bo) */
- mutex_lock_nested(&obj->mm.lock, subclass);
+ mutex_lock(&obj->mm.lock);
if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
err = -EBUSY;
goto unlock;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index edd21d14e64f..773f8927929d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -98,7 +98,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
flags = I915_GEM_OBJECT_UNBIND_ACTIVE;
if (i915_gem_object_unbind(obj, flags) == 0)
- __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
+ __i915_gem_object_put_pages(obj);
return !i915_gem_object_has_pages(obj);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 70dc506a5426..f7475f2c84d1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -158,7 +158,7 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
ret = i915_gem_object_unbind(obj,
I915_GEM_OBJECT_UNBIND_ACTIVE);
if (ret == 0)
- ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
+ ret = __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
if (ret)
goto unlock;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 6cbd4a668c9a..df586035c33e 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -562,7 +562,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
i915_vma_close(vma);
i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
}
@@ -590,7 +590,7 @@ static void close_object_list(struct list_head *objects,
list_del(&obj->st_link);
i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
}
}
@@ -860,7 +860,7 @@ static int igt_mock_ppgtt_64K(void *arg)
i915_vma_close(vma);
i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
}
}
@@ -1268,7 +1268,7 @@ static int igt_ppgtt_exhaust_huge(void *arg)
}
i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
}
}
@@ -1330,7 +1330,7 @@ static int igt_ppgtt_internal_huge(void *arg)
}
i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
}
@@ -1399,7 +1399,7 @@ static int igt_ppgtt_gemfs_huge(void *arg)
}
i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
}
--
2.22.0
More information about the Intel-gfx
mailing list