[PATCH 41/74] drm/i915: Keep a recent cache of freed contexts objects for reuse

Chris Wilson chris at chris-wilson.co.uk
Sun Jul 16 19:00:42 UTC 2017


Keep the recently freed context objects for reuse. This allows us to use
the current GGTT bindings and dma bound pages, avoiding any clflushes as
required. We mark the objects as purgeable under memory pressure, and
reap the list of freed objects as soon as the device is idle.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h                  |  2 +
 drivers/gpu/drm/i915/i915_gem.c                  |  1 +
 drivers/gpu/drm/i915/i915_gem_context.c          | 59 ++++++++++++++++++++++--
 drivers/gpu/drm/i915/i915_gem_context.h          |  5 ++
 drivers/gpu/drm/i915/intel_lrc.c                 |  2 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c          |  2 +-
 drivers/gpu/drm/i915/selftests/mock_context.c    |  1 +
 drivers/gpu/drm/i915/selftests/mock_gem_device.c |  1 +
 8 files changed, 68 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6e789dc513ef..b485c826e9b3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2335,6 +2335,8 @@ struct drm_i915_private {
 		struct llist_head free_list;
 		struct work_struct free_work;
 
+		struct list_head freed_objects;
+
 		/* The hw wants to have a stable context identifier for the
 		 * lifetime of the context (for OA, PASID, faults, etc).
 		 * This is limited in execlists to 21 bits.
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0e63f06397f7..5a36ea6dc36a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3340,6 +3340,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
 		DRM_ERROR("Timeout waiting for engines to idle\n");
 
 	intel_engines_mark_idle(dev_priv);
+	i915_gem_contexts_mark_idle(dev_priv);
 	i915_gem_timelines_mark_idle(dev_priv);
 
 	GEM_BUG_ON(!dev_priv->gt.awake);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 846ee3fff71e..b56f1392eb0c 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -93,6 +93,48 @@
 
 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
 
+struct drm_i915_gem_object *
+i915_gem_context_create_object(struct drm_i915_private *i915,
+			       unsigned long size)
+{
+	struct drm_i915_gem_object *obj, *on;
+
+	lockdep_assert_held(&i915->drm.struct_mutex);
+
+	list_for_each_entry_safe(obj, on,
+				 &i915->contexts.freed_objects,
+				 batch_pool_link) {
+		if (obj->mm.madv != I915_MADV_DONTNEED) {
+			/* Purge the heretic! */
+			list_del(&obj->batch_pool_link);
+			i915_gem_object_put(obj);
+			continue;
+		}
+
+		if (obj->base.size == size) {
+			list_del(&obj->batch_pool_link);
+			obj->mm.madv = I915_MADV_WILLNEED;
+			return obj;
+		}
+	}
+
+	return i915_gem_object_create(i915, size);
+}
+
+void i915_gem_contexts_mark_idle(struct drm_i915_private *i915)
+{
+	struct drm_i915_gem_object *obj, *on;
+
+	lockdep_assert_held(&i915->drm.struct_mutex);
+
+	list_for_each_entry_safe(obj, on,
+				 &i915->contexts.freed_objects,
+				 batch_pool_link) {
+		list_del(&obj->batch_pool_link);
+		i915_gem_object_put(obj);
+	}
+}
+
 static void lut_close(struct i915_gem_context *ctx)
 {
 	struct i915_lut_handle *lut, *ln;
@@ -112,9 +154,10 @@ static void lut_close(struct i915_gem_context *ctx)
 
 static void i915_gem_context_free(struct i915_gem_context *ctx)
 {
+	struct drm_i915_private *i915 = ctx->i915;
 	int i;
 
-	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
+	lockdep_assert_held(&i915->drm.struct_mutex);
 	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
 
 	i915_ppgtt_put(ctx->ppgtt);
@@ -129,7 +172,11 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
 		if (ce->ring)
 			intel_ring_free(ce->ring);
 
-		__i915_gem_object_release_unless_active(ce->state->obj);
+		/* Keep the objects around for quick reuse */
+		GEM_BUG_ON(ce->state->obj->mm.madv != I915_MADV_WILLNEED);
+		ce->state->obj->mm.madv = I915_MADV_DONTNEED;
+		list_add(&ce->state->obj->batch_pool_link,
+			 &i915->contexts.freed_objects);
 	}
 
 	kfree(ctx->name);
@@ -137,7 +184,7 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
 
 	list_del(&ctx->link);
 
-	ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id);
+	ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id);
 	kfree_rcu(ctx, rcu);
 }
 
@@ -419,6 +466,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
 		return 0;
 
 	INIT_LIST_HEAD(&dev_priv->contexts.list);
+	INIT_LIST_HEAD(&dev_priv->contexts.freed_objects);
 	INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
 	init_llist_head(&dev_priv->contexts.free_list);
 
@@ -504,12 +552,17 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915)
 
 	lockdep_assert_held(&i915->drm.struct_mutex);
 
+	GEM_BUG_ON(work_pending(&i915->contexts.free_work));
+
 	/* Keep the context so that we can free it immediately ourselves */
 	ctx = i915_gem_context_get(fetch_and_zero(&i915->kernel_context));
 	GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
 	context_close(ctx);
 	i915_gem_context_free(ctx);
 
+	i915_gem_contexts_mark_idle(i915);
+	GEM_BUG_ON(!list_empty(&i915->contexts.freed_objects));
+
 	/* Must free all deferred contexts (via flush_workqueue) first */
 	ida_destroy(&i915->contexts.hw_ida);
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 44688e22a5c2..baa111749c85 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -281,6 +281,11 @@ void i915_gem_context_release(struct kref *ctx_ref);
 struct i915_gem_context *
 i915_gem_context_create_gvt(struct drm_device *dev);
 
+struct drm_i915_gem_object *
+i915_gem_context_create_object(struct drm_i915_private *i915,
+			       unsigned long size);
+void i915_gem_contexts_mark_idle(struct drm_i915_private *i915);
+
 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 				  struct drm_file *file);
 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index ad61d1998fb7..31ce26d0ce96 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2026,7 +2026,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 	 */
 	context_size += LRC_HEADER_PAGES * PAGE_SIZE;
 
-	ctx_obj = i915_gem_object_create(ctx->i915, context_size);
+	ctx_obj = i915_gem_context_create_object(ctx->i915, context_size);
 	if (IS_ERR(ctx_obj)) {
 		DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
 		return PTR_ERR(ctx_obj);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b2580539051e..85eca0d9670b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1348,7 +1348,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
 	struct drm_i915_gem_object *obj;
 	struct i915_vma *vma;
 
-	obj = i915_gem_object_create(i915, engine->context_size);
+	obj = i915_gem_context_create_object(i915, engine->context_size);
 	if (IS_ERR(obj))
 		return ERR_CAST(obj);
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index 098ce643ad07..082202e70ddc 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -85,6 +85,7 @@ void mock_init_contexts(struct drm_i915_private *i915)
 	INIT_LIST_HEAD(&i915->contexts.list);
 	ida_init(&i915->contexts.hw_ida);
 
+	INIT_LIST_HEAD(&i915->contexts.freed_objects);
 	INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
 	init_llist_head(&i915->contexts.free_list);
 }
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 4beed89b51e6..9344289daa5d 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -53,6 +53,7 @@ static void mock_device_release(struct drm_device *dev)
 
 	mutex_lock(&i915->drm.struct_mutex);
 	mock_device_flush(i915);
+	i915_gem_contexts_lost(i915);
 	mutex_unlock(&i915->drm.struct_mutex);
 
 	cancel_delayed_work_sync(&i915->gt.retire_work);
-- 
2.13.2



More information about the Intel-gfx-trybot mailing list