[PATCH 44/44] drm/i915: Remove second level open-coded rcu work

Chris Wilson chris at chris-wilson.co.uk
Tue Feb 26 11:20:15 UTC 2019


Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c | 91 +++++----------------------------
 1 file changed, 12 insertions(+), 79 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 22e2da3dd02e..c778a352705c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -101,7 +101,7 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
 	spin_unlock(&dev_priv->mm.object_stat_lock);
 }
 
-static u32 __i915_gem_park(struct drm_i915_private *i915)
+static void __i915_gem_park(struct drm_i915_private *i915)
 {
 	intel_wakeref_t wakeref;
 
@@ -112,7 +112,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
 	GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
 
 	if (!i915->gt.awake)
-		return I915_EPOCH_INVALID;
+		return;
 
 	GEM_BUG_ON(i915->gt.epoch == I915_EPOCH_INVALID);
 
@@ -143,7 +143,15 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
 
 	intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
 
-	return i915->gt.epoch;
+	/*
+	 * When we are idle, it is an opportune time to reap our caches.
+	 * However, we have many objects that utilise RCU and the ordered
+	 * i915->wq that this work is executing on. To try and flush any
+	 * pending frees now we are idle, we first wait for an RCU grace
+	 * period, and then queue a task (that will run last on the wq) to
+	 * shrink and re-optimize the caches.
+	 */
+	i915_globals_park();
 }
 
 void i915_gem_park(struct drm_i915_private *i915)
@@ -2877,62 +2885,6 @@ i915_gem_retire_work_handler(struct work_struct *work)
 				   round_jiffies_up_relative(HZ));
 }
 
-static void shrink_caches(struct drm_i915_private *i915)
-{
-	/*
-	 * kmem_cache_shrink() discards empty slabs and reorders partially
-	 * filled slabs to prioritise allocating from the mostly full slabs,
-	 * with the aim of reducing fragmentation.
-	 */
-	i915_globals_park();
-}
-
-struct sleep_rcu_work {
-	union {
-		struct rcu_head rcu;
-		struct work_struct work;
-	};
-	struct drm_i915_private *i915;
-	unsigned int epoch;
-};
-
-static inline bool
-same_epoch(struct drm_i915_private *i915, unsigned int epoch)
-{
-	/*
-	 * There is a small chance that the epoch wrapped since we started
-	 * sleeping. If we assume that epoch is at least a u32, then it will
-	 * take at least 2^32 * 100ms for it to wrap, or about 326 years.
-	 */
-	return epoch == READ_ONCE(i915->gt.epoch);
-}
-
-static void __sleep_work(struct work_struct *work)
-{
-	struct sleep_rcu_work *s = container_of(work, typeof(*s), work);
-	struct drm_i915_private *i915 = s->i915;
-	unsigned int epoch = s->epoch;
-
-	kfree(s);
-	if (same_epoch(i915, epoch))
-		shrink_caches(i915);
-}
-
-static void __sleep_rcu(struct rcu_head *rcu)
-{
-	struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
-	struct drm_i915_private *i915 = s->i915;
-
-	destroy_rcu_head(&s->rcu);
-
-	if (same_epoch(i915, s->epoch)) {
-		INIT_WORK(&s->work, __sleep_work);
-		queue_work(i915->wq, &s->work);
-	} else {
-		kfree(s);
-	}
-}
-
 static bool switch_to_kernel_context_sync(struct drm_i915_private *i915,
 					  unsigned long mask)
 {
@@ -2971,7 +2923,6 @@ i915_gem_idle_work_handler(struct work_struct *work)
 	struct drm_i915_private *i915 =
 		container_of(work, typeof(*i915), gt.idle_work.work);
 	typeof(i915->gt) *gt = &i915->gt;
-	unsigned int epoch = I915_EPOCH_INVALID;
 	bool rearm_hangcheck;
 
 	if (!READ_ONCE(gt->awake))
@@ -3010,7 +2961,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
 		i915_retire_requests(i915);
 
 		if (!--gt->active_requests) {
-			epoch = __i915_gem_park(i915);
+			__i915_gem_park(i915);
 			rearm_hangcheck = false;
 		}
 	}
@@ -3022,24 +2973,6 @@ i915_gem_idle_work_handler(struct work_struct *work)
 		GEM_BUG_ON(!gt->awake);
 		i915_queue_hangcheck(i915);
 	}
-
-	/*
-	 * When we are idle, it is an opportune time to reap our caches.
-	 * However, we have many objects that utilise RCU and the ordered
-	 * i915->wq that this work is executing on. To try and flush any
-	 * pending frees now we are idle, we first wait for an RCU grace
-	 * period, and then queue a task (that will run last on the wq) to
-	 * shrink and re-optimize the caches.
-	 */
-	if (same_epoch(i915, epoch)) {
-		struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
-		if (s) {
-			init_rcu_head(&s->rcu);
-			s->i915 = i915;
-			s->epoch = epoch;
-			call_rcu(&s->rcu, __sleep_rcu);
-		}
-	}
 }
 
 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list