[PATCH 08/11] drm/i915: try to simplify make_{un}shrinkable
Matthew Auld
matthew.auld at intel.com
Fri Sep 3 14:42:05 UTC 2021
Drop the shrink_pin stuff, and just have make_{un}shrinkable update the
shrinker visible lists immediately. This at least simplifies the next
patch. The downside is that make_unshrinkable does grab a global lock
even when the object is no longer shrinkable, which can be called in
multiple places. There is some fallout in intel_context which relies on
nesting calls to shrink_pin. And in the ppGTT code patch we don't need
to call make_unshrinkable multiple times, especially since this now
always grabs a global lock.
Signed-off-by: Matthew Auld <matthew.auld at intel.com>
---
drivers/gpu/drm/i915/gem/i915_gem_object.c | 9 ----
.../gpu/drm/i915/gem/i915_gem_object_types.h | 1 -
drivers/gpu/drm/i915/gem/i915_gem_pages.c | 16 +-----
drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 53 +++++++++++++------
drivers/gpu/drm/i915/gt/gen6_ppgtt.c | 1 -
drivers/gpu/drm/i915/gt/gen8_ppgtt.c | 1 -
drivers/gpu/drm/i915/gt/intel_context.c | 9 +---
7 files changed, 41 insertions(+), 49 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 6fb9afb65034..e8265a432fcb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -305,15 +305,6 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
*/
atomic_inc(&i915->mm.free_count);
- /*
- * This serializes freeing with the shrinker. Since the free
- * is delayed, first by RCU then by the workqueue, we want the
- * shrinker to be able to free pages of unreferenced objects,
- * or else we may oom whilst there are plenty of deferred
- * freed objects.
- */
- i915_gem_object_make_unshrinkable(obj);
-
/*
* Since we require blocking on struct_mutex to unbind the freed
* object from the GPU before releasing resources back to the
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 2471f36aaff3..041b2b3e77eb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -453,7 +453,6 @@ struct drm_i915_gem_object {
* instead go through the pin/unpin interfaces.
*/
atomic_t pages_pin_count;
- atomic_t shrink_pin;
/**
* Priority list of potential placements for this object.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 8eb1c3a6fc9c..f0df1394d7f6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -64,28 +64,16 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
i915_gem_object_set_tiling_quirk(obj);
GEM_BUG_ON(!list_empty(&obj->mm.link));
- atomic_inc(&obj->mm.shrink_pin);
shrinkable = false;
}
if (shrinkable) {
- struct list_head *list;
- unsigned long flags;
-
assert_object_held(obj);
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
-
- i915->mm.shrink_count++;
- i915->mm.shrink_memory += obj->base.size;
if (obj->mm.madv != I915_MADV_WILLNEED)
- list = &i915->mm.purge_list;
+ i915_gem_object_make_purgeable(obj);
else
- list = &i915->mm.shrink_list;
- list_add_tail(&obj->mm.link, list);
-
- atomic_set(&obj->mm.shrink_pin, 0);
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ i915_gem_object_make_shrinkable(obj);
}
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index e382b7f2353b..11845eb311df 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -455,23 +455,25 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
#define obj_to_i915(obj__) to_i915((obj__)->base.dev)
+/**
+ * i915_gem_object_make_unshrinkable - Hide the object from the shrinker. By
+ * default all object backends that support shrinking(IS_SHRINKABLE), will make
+ * the object visible to the shrinker after allocating the system memory pages.
+ * @obj: The GEM object.
+ *
+ * This is typically used for special kernel internal objects that can't be
+ * easily processed by the shrinker, like if they are perma-pinned.
+ */
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = obj_to_i915(obj);
unsigned long flags;
- /*
- * We can only be called while the pages are pinned or when
- * the pages are released. If pinned, we should only be called
- * from a single caller under controlled conditions; and on release
- * only one caller may release us. Neither the two may cross.
- */
- if (atomic_add_unless(&obj->mm.shrink_pin, 1, 0))
+ if (!i915_gem_object_is_shrinkable(obj))
return;
spin_lock_irqsave(&i915->mm.obj_lock, flags);
- if (!atomic_fetch_inc(&obj->mm.shrink_pin) &&
- !list_empty(&obj->mm.link)) {
+ if (!list_empty(&obj->mm.link)) {
list_del_init(&obj->mm.link);
i915->mm.shrink_count--;
i915->mm.shrink_memory -= obj->base.size;
@@ -489,28 +491,47 @@ static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
if (!i915_gem_object_is_shrinkable(obj))
return;
- if (atomic_add_unless(&obj->mm.shrink_pin, -1, 1))
- return;
-
spin_lock_irqsave(&i915->mm.obj_lock, flags);
+
GEM_BUG_ON(!kref_read(&obj->base.refcount));
- if (atomic_dec_and_test(&obj->mm.shrink_pin)) {
- GEM_BUG_ON(!list_empty(&obj->mm.link));
- list_add_tail(&obj->mm.link, head);
+ if (list_empty(&obj->mm.link)) {
i915->mm.shrink_count++;
i915->mm.shrink_memory += obj->base.size;
-
+ list_add_tail(&obj->mm.link, head);
+ } else {
+ list_move_tail(&obj->mm.link, head);
}
+
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
+
+/**
+ * i915_gem_object_make_shrinkable - Move the object to the tail of the
+ * shrinkable LRU list. Objects that are no longer pinned or active can be
+ * swapped out. This should also only be used
+ * with WILLNEED objects.
+ * @obj: The GEM object.
+ *
+ * Should only be called on objects which have backing pages.
+ */
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
{
__i915_gem_object_make_shrinkable(obj,
&obj_to_i915(obj)->mm.shrink_list);
}
+/**
+ * i915_gem_object_make_purgeable - Move the object to the tail of the purgeable
+ * LRU list. Note that this is typically called with DONTNEED objects. Unlike
+ * with shrinkable objects, the shrinker will attempt to discard the backing
+ * pages immediately, instead of trying to swap them out, if the object is
+ * unpinned and no longer active.
+ * @obj: The GEM object.
+ *
+ * Should only be called on objects which have backing pages.
+ */
void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
{
__i915_gem_object_make_shrinkable(obj,
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index 1aee5e6b1b23..dd5ebc1659cf 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -185,7 +185,6 @@ static void gen6_alloc_va_range(struct i915_address_space *vm,
pt = stash->pt[0];
__i915_gem_object_pin_pages(pt->base);
- i915_gem_object_make_unshrinkable(pt->base);
fill32_px(pt, vm->scratch[0]->encode);
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 6e0e52eeb87a..9c92b6606ec1 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -301,7 +301,6 @@ static void __gen8_ppgtt_alloc(struct i915_address_space * const vm,
pt = stash->pt[!!lvl];
__i915_gem_object_pin_pages(pt->base);
- i915_gem_object_make_unshrinkable(pt->base);
fill_px(pt, vm->scratch[lvl]->encode);
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 745e84c72c90..8e161329dbb9 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -111,11 +111,6 @@ static int __context_pin_state(struct i915_vma *vma, struct i915_gem_ww_ctx *ww)
if (err)
goto err_unpin;
- /*
- * And mark it as a globally pinned object to let the shrinker know
- * it cannot reclaim the object until we release it.
- */
- i915_vma_make_unshrinkable(vma);
vma->obj->mm.dirty = true;
return 0;
@@ -127,7 +122,6 @@ static int __context_pin_state(struct i915_vma *vma, struct i915_gem_ww_ctx *ww)
static void __context_unpin_state(struct i915_vma *vma)
{
- i915_vma_make_shrinkable(vma);
i915_active_release(&vma->active);
__i915_vma_unpin(vma);
}
@@ -180,7 +174,6 @@ static int intel_context_pre_pin(struct intel_context *ce,
if (err)
goto err_timeline;
-
return 0;
err_timeline:
@@ -338,6 +331,8 @@ static void __intel_context_retire(struct i915_active *active)
set_bit(CONTEXT_VALID_BIT, &ce->flags);
intel_context_post_unpin(ce);
+ if (ce->state)
+ i915_vma_make_shrinkable(ce->state);
intel_context_put(ce);
}
--
2.26.3
More information about the Intel-gfx-trybot
mailing list