[PATCH 47/84] drm/i915: Split i915_gem_timeline into individual timelines

Chris Wilson chris at chris-wilson.co.uk
Sat Feb 3 10:26:00 UTC 2018


We need to move to a more flexible timeline that doesn't assume one
fence context per engine, and so allow for a single timeline to be used
across a combination of engines. This means that preallocating a fence
context per engine is now a hindrance, and we want to introduce the
singular timeline.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/Makefile                      |   2 +-
 drivers/gpu/drm/i915/i915_debugfs.c                |   2 +-
 drivers/gpu/drm/i915/i915_drv.h                    |   4 +-
 drivers/gpu/drm/i915/i915_gem.c                    | 113 +++++-------
 drivers/gpu/drm/i915/i915_gem_context.c            |  40 ++---
 drivers/gpu/drm/i915/i915_gem_context.h            |   2 -
 drivers/gpu/drm/i915/i915_gem_gtt.h                |   2 +-
 drivers/gpu/drm/i915/i915_gem_request.c            |  77 ++++----
 drivers/gpu/drm/i915/i915_gem_request.h            |   3 +-
 drivers/gpu/drm/i915/i915_gem_timeline.c           | 198 ---------------------
 drivers/gpu/drm/i915/i915_gpu_error.c              |   4 +-
 drivers/gpu/drm/i915/i915_perf.c                   |  10 +-
 drivers/gpu/drm/i915/i915_timeline.c               | 122 +++++++++++++
 .../i915/{i915_gem_timeline.h => i915_timeline.h}  |  67 +++----
 drivers/gpu/drm/i915/intel_engine_cs.c             |  30 ++--
 drivers/gpu/drm/i915/intel_guc_submission.c        |   4 +-
 drivers/gpu/drm/i915/intel_lrc.c                   |  48 ++---
 drivers/gpu/drm/i915/intel_ringbuffer.c            |  23 ++-
 drivers/gpu/drm/i915/intel_ringbuffer.h            |  11 +-
 drivers/gpu/drm/i915/selftests/i915_gem_context.c  |   5 +-
 .../{i915_gem_timeline.c => i915_timeline.c}       |  79 ++++----
 drivers/gpu/drm/i915/selftests/mock_context.c      |   3 +-
 drivers/gpu/drm/i915/selftests/mock_engine.c       |   3 +-
 drivers/gpu/drm/i915/selftests/mock_gem_device.c   |  12 +-
 drivers/gpu/drm/i915/selftests/mock_timeline.c     |  45 -----
 drivers/gpu/drm/i915/selftests/mock_timeline.h     |  33 ----
 26 files changed, 371 insertions(+), 571 deletions(-)
 delete mode 100644 drivers/gpu/drm/i915/i915_gem_timeline.c
 create mode 100644 drivers/gpu/drm/i915/i915_timeline.c
 rename drivers/gpu/drm/i915/{i915_gem_timeline.h => i915_timeline.h} (69%)
 rename drivers/gpu/drm/i915/selftests/{i915_gem_timeline.c => i915_timeline.c} (84%)
 delete mode 100644 drivers/gpu/drm/i915/selftests/mock_timeline.c
 delete mode 100644 drivers/gpu/drm/i915/selftests/mock_timeline.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 3bddd8a06806..987b116b825f 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -66,9 +66,9 @@ i915-y += i915_cmd_parser.o \
 	  i915_gem_shrinker.o \
 	  i915_gem_stolen.o \
 	  i915_gem_tiling.o \
-	  i915_gem_timeline.o \
 	  i915_gem_userptr.o \
 	  i915_gemfs.o \
+	  i915_timeline.o \
 	  i915_trace_points.o \
 	  i915_vma.o \
 	  intel_breadcrumbs.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 8f6754d54093..45f7b4a500b5 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1294,7 +1294,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
 		seq_printf(m, "\tseqno = %x [current %x, last %x], inflight %d\n",
 			   engine->hangcheck.seqno, seqno[id],
 			   intel_engine_last_submit(engine),
-			   engine->timeline->inflight_seqnos);
+			   engine->timeline.inflight_seqnos);
 		seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
 			   yesno(intel_engine_has_waiter(engine)),
 			   yesno(test_bit(engine->id,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fb751ce488a0..61879f157159 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -72,8 +72,8 @@
 #include "i915_gem_object.h"
 #include "i915_gem_gtt.h"
 #include "i915_gem_request.h"
-#include "i915_gem_timeline.h"
 
+#include "i915_timeline.h"
 #include "i915_vma.h"
 
 #include "intel_gvt.h"
@@ -2300,8 +2300,6 @@ struct drm_i915_private {
 		void (*cleanup_engine)(struct intel_engine_cs *engine);
 
 		struct list_head timelines;
-		struct i915_gem_timeline execution_timeline;
-		struct i915_gem_timeline legacy_timeline;
 		u32 active_requests;
 
 		/**
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 604f2dd09300..87fb216c321c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2872,8 +2872,8 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
 	 * extra delay for a recent interrupt is pointless. Hence, we do
 	 * not need an engine->irq_seqno_barrier() before the seqno reads.
 	 */
-	spin_lock_irqsave(&engine->timeline->lock, flags);
-	list_for_each_entry(request, &engine->timeline->requests, link) {
+	spin_lock_irqsave(&engine->timeline.lock, flags);
+	list_for_each_entry(request, &engine->timeline.requests, link) {
 		if (__i915_gem_request_completed(request,
 						 request->global_seqno))
 			continue;
@@ -2885,7 +2885,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
 		active = request;
 		break;
 	}
-	spin_unlock_irqrestore(&engine->timeline->lock, flags);
+	spin_unlock_irqrestore(&engine->timeline.lock, flags);
 
 	return active;
 }
@@ -2992,15 +2992,15 @@ static void engine_skip_context(struct drm_i915_gem_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
 	struct i915_gem_context *hung_ctx = request->ctx;
-	struct intel_timeline *timeline = request->timeline;
+	struct i915_timeline *timeline = request->timeline;
 	unsigned long flags;
 
-	GEM_BUG_ON(timeline == engine->timeline);
+	GEM_BUG_ON(timeline == &engine->timeline);
 
-	spin_lock_irqsave(&engine->timeline->lock, flags);
+	spin_lock_irqsave(&engine->timeline.lock, flags);
 	spin_lock(&timeline->lock);
 
-	list_for_each_entry_continue(request, &engine->timeline->requests, link)
+	list_for_each_entry_continue(request, &engine->timeline.requests, link)
 		if (request->ctx == hung_ctx)
 			i915_gem_request_cancel(request);
 
@@ -3008,7 +3008,7 @@ static void engine_skip_context(struct drm_i915_gem_request *request)
 		i915_gem_request_cancel(request);
 
 	spin_unlock(&timeline->lock);
-	spin_unlock_irqrestore(&engine->timeline->lock, flags);
+	spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
 /* Returns the request if it was guilty of the hang */
@@ -3056,11 +3056,11 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
 			dma_fence_set_error(&request->fence, -EAGAIN);
 
 			/* Rewind the engine to replay the incomplete rq */
-			spin_lock_irq(&engine->timeline->lock);
+			spin_lock_irq(&engine->timeline.lock);
 			request = list_prev_entry(request, link);
-			if (&request->link == &engine->timeline->requests)
+			if (&request->link == &engine->timeline.requests)
 				request = NULL;
-			spin_unlock_irq(&engine->timeline->lock);
+			spin_unlock_irq(&engine->timeline.lock);
 		}
 	}
 
@@ -3170,10 +3170,10 @@ static void nop_complete_submit_request(struct drm_i915_gem_request *request)
 
 	dma_fence_set_error(&request->fence, -EIO);
 
-	spin_lock_irqsave(&request->engine->timeline->lock, flags);
+	spin_lock_irqsave(&request->engine->timeline.lock, flags);
 	__i915_gem_request_submit(request);
 	intel_engine_init_global_seqno(request->engine, request->global_seqno);
-	spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
+	spin_unlock_irqrestore(&request->engine->timeline.lock, flags);
 }
 
 void i915_gem_set_wedged(struct drm_i915_private *i915)
@@ -3231,10 +3231,10 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
 		 * (lockless) lookup doesn't try and wait upon the request as we
 		 * reset it.
 		 */
-		spin_lock_irqsave(&engine->timeline->lock, flags);
+		spin_lock_irqsave(&engine->timeline.lock, flags);
 		intel_engine_init_global_seqno(engine,
 					       intel_engine_last_submit(engine));
-		spin_unlock_irqrestore(&engine->timeline->lock, flags);
+		spin_unlock_irqrestore(&engine->timeline.lock, flags);
 	}
 
 	set_bit(I915_WEDGED, &i915->gpu_error.flags);
@@ -3243,8 +3243,7 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
 
 bool i915_gem_unset_wedged(struct drm_i915_private *i915)
 {
-	struct i915_gem_timeline *tl;
-	int i;
+	struct i915_timeline *tl;
 
 	lockdep_assert_held(&i915->drm.struct_mutex);
 	if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
@@ -3260,28 +3259,26 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
 	 * No more can be submitted until we reset the wedged bit.
 	 */
 	list_for_each_entry(tl, &i915->gt.timelines, link) {
-		for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
-			struct drm_i915_gem_request *rq;
+		struct drm_i915_gem_request *rq;
 
-			rq = i915_gem_active_peek(&tl->engine[i].last_request,
-						  &i915->drm.struct_mutex);
-			if (!rq)
-				continue;
+		rq = i915_gem_active_peek(&tl->last_request,
+					  &i915->drm.struct_mutex);
+		if (!rq)
+			continue;
 
-			/* We can't use our normal waiter as we want to
-			 * avoid recursively trying to handle the current
-			 * reset. The basic dma_fence_default_wait() installs
-			 * a callback for dma_fence_signal(), which is
-			 * triggered by our nop handler (indirectly, the
-			 * callback enables the signaler thread which is
-			 * woken by the nop_submit_request() advancing the seqno
-			 * and when the seqno passes the fence, the signaler
-			 * then signals the fence waking us up).
-			 */
-			if (dma_fence_default_wait(&rq->fence, true,
-						   MAX_SCHEDULE_TIMEOUT) < 0)
-				return false;
-		}
+		/* We can't use our normal waiter as we want to
+		 * avoid recursively trying to handle the current
+		 * reset. The basic dma_fence_default_wait() installs
+		 * a callback for dma_fence_signal(), which is
+		 * triggered by our nop handler (indirectly, the
+		 * callback enables the signaler thread which is
+		 * woken by the nop_submit_request() advancing the seqno
+		 * and when the seqno passes the fence, the signaler
+		 * then signals the fence waking us up).
+		 */
+		if (dma_fence_default_wait(&rq->fence, true,
+					   MAX_SCHEDULE_TIMEOUT) < 0)
+			return false;
 	}
 
 	/* Undo nop_submit_request. We prevent all new i915 requests from
@@ -3447,7 +3444,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
 	synchronize_irq(dev_priv->drm.irq);
 
 	intel_engines_park(dev_priv);
-	i915_gem_timelines_park(dev_priv);
+	i915_timelines_park(dev_priv);
 
 	i915_pmu_gt_parked(dev_priv);
 
@@ -3608,19 +3605,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 	return ret;
 }
 
-static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
-{
-	int ret, i;
-
-	for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
-		ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
 static int wait_for_engines(struct drm_i915_private *i915)
 {
 	if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
@@ -3654,12 +3638,12 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
 		return 0;
 
 	if (flags & I915_WAIT_LOCKED) {
-		struct i915_gem_timeline *tl;
+		struct i915_timeline *tl;
 
 		lockdep_assert_held(&i915->drm.struct_mutex);
 
 		list_for_each_entry(tl, &i915->gt.timelines, link) {
-			ret = wait_for_timeline(tl, flags);
+			ret = i915_gem_active_wait(&tl->last_request, flags);
 			if (ret)
 				return ret;
 		}
@@ -3667,7 +3651,14 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
 
 		ret = wait_for_engines(i915);
 	} else {
-		ret = wait_for_timeline(&i915->gt.execution_timeline, flags);
+		struct intel_engine_cs *engine;
+		enum intel_engine_id id;
+
+		for_each_engine(engine, i915, id) {
+			ret = i915_gem_active_wait(&engine->timeline.last_request, flags);
+			if (ret)
+				return ret;
+		}
 	}
 
 	return ret;
@@ -4849,7 +4840,7 @@ static void assert_kernel_context_is_current(struct drm_i915_private *i915)
 	enum intel_engine_id id;
 
 	for_each_engine(engine, i915, id) {
-		GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline->last_request));
+		GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
 		GEM_BUG_ON(engine->last_retired_context != kernel_context);
 	}
 }
@@ -5480,12 +5471,7 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
 	if (!dev_priv->priorities)
 		goto err_dependencies;
 
-	mutex_lock(&dev_priv->drm.struct_mutex);
 	INIT_LIST_HEAD(&dev_priv->gt.timelines);
-	err = i915_gem_timeline_init__global(dev_priv);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
-	if (err)
-		goto err_priorities;
 
 	i915_gem_init__mm(dev_priv);
 
@@ -5506,8 +5492,6 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
 
 	return 0;
 
-err_priorities:
-	kmem_cache_destroy(dev_priv->priorities);
 err_dependencies:
 	kmem_cache_destroy(dev_priv->dependencies);
 err_requests:
@@ -5527,12 +5511,7 @@ void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
 	i915_gem_drain_freed_objects(dev_priv);
 	WARN_ON(!llist_empty(&dev_priv->mm.free_list));
 	WARN_ON(dev_priv->mm.object_count);
-
-	mutex_lock(&dev_priv->drm.struct_mutex);
-	i915_gem_timeline_fini(&dev_priv->gt.legacy_timeline);
-	i915_gem_timeline_fini(&dev_priv->gt.execution_timeline);
 	WARN_ON(!list_empty(&dev_priv->gt.timelines));
-	mutex_unlock(&dev_priv->drm.struct_mutex);
 
 	kmem_cache_destroy(dev_priv->priorities);
 	kmem_cache_destroy(dev_priv->dependencies);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 8f7ea2b34a0d..db4256e3f21c 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -123,7 +123,6 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
 	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
 	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
 
-	i915_gem_timeline_free(ctx->timeline);
 	i915_ppgtt_put(ctx->ppgtt);
 
 	for (i = 0; i < I915_NUM_ENGINES; i++) {
@@ -318,8 +317,7 @@ static void __destroy_hw_context(struct i915_gem_context *ctx,
 	context_close(ctx);
 }
 
-#define CREATE_TIMELINE BIT(0)
-#define CREATE_VM BIT(1)
+#define CREATE_VM BIT(0)
 
 /**
  * The default context needs to exist per ring that uses contexts. It stores the
@@ -357,18 +355,6 @@ i915_gem_create_context(struct drm_i915_private *dev_priv,
 		ctx->desc_template = default_desc_template(dev_priv, ppgtt);
 	}
 
-	if (flags & CREATE_TIMELINE && HAS_EXECLISTS(dev_priv)) {
-		struct i915_gem_timeline *timeline;
-
-		timeline = i915_gem_timeline_create(dev_priv, ctx->name);
-		if (IS_ERR(timeline)) {
-			__destroy_hw_context(ctx, file_priv);
-			return ERR_CAST(timeline);
-		}
-
-		ctx->timeline = timeline;
-	}
-
 	trace_i915_context_create(ctx);
 
 	return ctx;
@@ -433,7 +419,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
 	struct i915_gem_context *ctx;
 	int err;
 
-	ctx = i915_gem_create_context(i915, NULL, CREATE_VM | CREATE_TIMELINE);
+	ctx = i915_gem_create_context(i915, NULL, CREATE_VM);
 	if (IS_ERR(ctx))
 		return ctx;
 
@@ -550,8 +536,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
 	idr_init(&file_priv->context_idr);
 
 	mutex_lock(&i915->drm.struct_mutex);
-	ctx = i915_gem_create_context(i915, file_priv,
-				      CREATE_VM | CREATE_TIMELINE);
+	ctx = i915_gem_create_context(i915, file_priv, CREATE_VM);
 	mutex_unlock(&i915->drm.struct_mutex);
 	if (IS_ERR(ctx)) {
 		idr_destroy(&file_priv->context_idr);
@@ -575,16 +560,13 @@ void i915_gem_context_close(struct drm_file *file)
 
 static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
 {
-	struct i915_gem_timeline *timeline;
+	struct i915_timeline *timeline;
 
 	list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
-		struct intel_timeline *tl;
-
-		if (timeline == &engine->i915->gt.execution_timeline)
+		if (timeline == &engine->timeline)
 			continue;
 
-		tl = &timeline->engine[engine->id];
-		if (i915_gem_active_peek(&tl->last_request,
+		if (i915_gem_active_peek(&timeline->last_request,
 					 &engine->i915->drm.struct_mutex))
 			return false;
 	}
@@ -595,7 +577,7 @@ static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
 int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
-	struct i915_gem_timeline *timeline;
+	struct i915_timeline *timeline;
 	enum intel_engine_id id;
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -615,12 +597,10 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
 		/* Queue this switch after all other activity */
 		list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
 			struct drm_i915_gem_request *prev;
-			struct intel_timeline *tl;
 
-			tl = &timeline->engine[engine->id];
-			prev = i915_gem_active_raw(&tl->last_request,
+			prev = i915_gem_active_raw(&timeline->last_request,
 						   &dev_priv->drm.struct_mutex);
-			if (prev)
+			if (prev && prev->engine == engine)
 				i915_sw_fence_await_sw_fence_gfp(&req->submit,
 								 &prev->submit,
 								 I915_FENCE_GFP);
@@ -652,7 +632,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 	struct i915_gem_context *share = NULL;
 	struct i915_gem_context *ctx;
-	unsigned int flags = CREATE_VM | CREATE_TIMELINE;
+	unsigned int flags = CREATE_VM;
 	int err;
 
 	if (!dev_priv->engine[RCS]->context_size)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 18386511144b..e4c3d559de63 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -55,8 +55,6 @@ struct i915_gem_context {
 	/** file_priv: owning file descriptor */
 	struct drm_i915_file_private *file_priv;
 
-	struct i915_gem_timeline *timeline;
-
 	/**
 	 * @ppgtt: unique address space (GTT)
 	 *
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index e39d8e96e928..9a502947bc6f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -38,9 +38,9 @@
 #include <linux/mm.h>
 #include <linux/pagevec.h>
 
-#include "i915_gem_timeline.h"
 #include "i915_gem_request.h"
 #include "i915_selftest.h"
+#include "i915_timeline.h"
 
 #define I915_GTT_PAGE_SIZE_4K BIT(12)
 #define I915_GTT_PAGE_SIZE_64K BIT(16)
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 8f99f78219f1..b09add79ad7e 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -151,7 +151,7 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 		return "signaled";
 
-	return to_request(fence)->timeline->common->name;
+	return to_request(fence)->timeline->name;
 }
 
 static bool i915_fence_signaled(struct dma_fence *fence)
@@ -307,6 +307,7 @@ i915_priotree_init(struct i915_priotree *pt)
 static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
 {
 	struct intel_engine_cs *engine;
+	struct i915_timeline *timeline;
 	enum intel_engine_id id;
 	int ret;
 
@@ -319,10 +320,7 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
 
 	/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
 	for_each_engine(engine, i915, id) {
-		struct i915_gem_timeline *timeline;
-		struct intel_timeline *tl = engine->timeline;
-
-		if (!i915_seqno_passed(seqno, tl->seqno)) {
+		if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
 			/* spin until threads are complete */
 			while (intel_breadcrumbs_busy(engine))
 				cond_resched();
@@ -330,17 +328,16 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
 
 		/* Check we are idle before we fiddle with hw state! */
 		GEM_BUG_ON(!intel_engine_is_idle(engine));
-		GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
+		GEM_BUG_ON(i915_gem_active_isset(&engine->timeline.last_request));
 
 		/* Finally reset hw state */
 		intel_engine_init_global_seqno(engine, seqno);
-		tl->seqno = seqno;
-
-		list_for_each_entry(timeline, &i915->gt.timelines, link)
-			memset(timeline->engine[id].global_sync, 0,
-			       sizeof(timeline->engine[id].global_sync));
+		engine->timeline.seqno = seqno;
 	}
 
+	list_for_each_entry(timeline, &i915->gt.timelines, link)
+		memset(timeline->global_sync, 0, sizeof(timeline->global_sync));
+
 	return 0;
 }
 
@@ -403,15 +400,15 @@ static void mark_busy(struct drm_i915_private *i915)
 static int reserve_engine(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *i915 = engine->i915;
-	u32 active = ++engine->timeline->inflight_seqnos;
-	u32 seqno = engine->timeline->seqno;
+	u32 active = ++engine->timeline.inflight_seqnos;
+	u32 seqno = engine->timeline.seqno;
 	int ret;
 
 	/* Reservation is fine until we need to wrap around */
 	if (unlikely(add_overflows(seqno, active))) {
 		ret = reset_all_global_seqno(i915, 0);
 		if (ret) {
-			engine->timeline->inflight_seqnos--;
+			engine->timeline.inflight_seqnos--;
 			return ret;
 		}
 	}
@@ -434,8 +431,8 @@ static void unreserve_engine(struct intel_engine_cs *engine)
 				 msecs_to_jiffies(100));
 	}
 
-	GEM_BUG_ON(!engine->timeline->inflight_seqnos);
-	engine->timeline->inflight_seqnos--;
+	GEM_BUG_ON(!engine->timeline.inflight_seqnos);
+	engine->timeline.inflight_seqnos--;
 }
 
 void i915_gem_retire_noop(struct i915_gem_active *active,
@@ -500,9 +497,9 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
 	debug_request_set_state(request,
 				DEBUG_REQUEST_EXECUTING, DEBUG_REQUEST_RETIRED);
 
-	spin_lock_irq(&engine->timeline->lock);
+	spin_lock_irq(&engine->timeline.lock);
 	list_del_init(&request->link);
-	spin_unlock_irq(&engine->timeline->lock);
+	spin_unlock_irq(&engine->timeline.lock);
 
 	unreserve_engine(request->engine);
 	advance_ring(request);
@@ -577,14 +574,14 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
 		return;
 
 	do {
-		tmp = list_first_entry(&engine->timeline->requests,
+		tmp = list_first_entry(&engine->timeline.requests,
 				       typeof(*tmp), link);
 
 		i915_gem_request_retire(tmp);
 	} while (tmp != req);
 }
 
-static u32 timeline_get_seqno(struct intel_timeline *tl)
+static u32 timeline_get_seqno(struct i915_timeline *tl)
 {
 	return ++tl->seqno;
 }
@@ -592,18 +589,18 @@ static u32 timeline_get_seqno(struct intel_timeline *tl)
 void __i915_gem_request_submit(struct drm_i915_gem_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
-	struct intel_timeline *timeline;
+	struct i915_timeline *timeline;
 	u32 seqno;
 
 	GEM_BUG_ON(!irqs_disabled());
-	lockdep_assert_held(&engine->timeline->lock);
+	lockdep_assert_held(&engine->timeline.lock);
 
 	trace_i915_gem_request_execute(request);
 	debug_request_set_state(request,
 				DEBUG_REQUEST_READY, DEBUG_REQUEST_EXECUTING);
 
 	/* Transfer from per-context onto the global per-engine timeline */
-	timeline = engine->timeline;
+	timeline = &engine->timeline;
 	GEM_BUG_ON(timeline == request->timeline);
 	GEM_BUG_ON(request->global_seqno);
 
@@ -634,20 +631,20 @@ void i915_gem_request_submit(struct drm_i915_gem_request *request)
 	unsigned long flags;
 
 	/* Will be called from irq-context when using foreign fences. */
-	spin_lock_irqsave(&engine->timeline->lock, flags);
+	spin_lock_irqsave(&engine->timeline.lock, flags);
 
 	__i915_gem_request_submit(request);
 
-	spin_unlock_irqrestore(&engine->timeline->lock, flags);
+	spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
 void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
-	struct intel_timeline *timeline;
+	struct i915_timeline *timeline;
 
 	GEM_BUG_ON(!irqs_disabled());
-	lockdep_assert_held(&engine->timeline->lock);
+	lockdep_assert_held(&engine->timeline.lock);
 
 	debug_request_set_state(request,
 				DEBUG_REQUEST_EXECUTING, DEBUG_REQUEST_READY);
@@ -656,10 +653,10 @@ void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
 	 * is kept in seqno/ring order.
 	 */
 	GEM_BUG_ON(!request->global_seqno);
-	GEM_BUG_ON(request->global_seqno != engine->timeline->seqno);
+	GEM_BUG_ON(request->global_seqno != engine->timeline.seqno);
 	GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine),
 				     request->global_seqno));
-	engine->timeline->seqno--;
+	engine->timeline.seqno--;
 
 	/* We may be recursing from the signal callback of another i915 fence */
 	spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
@@ -670,7 +667,7 @@ void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
 
 	/* Transfer back from the global per-engine timeline to per-context */
 	timeline = request->timeline;
-	GEM_BUG_ON(timeline == engine->timeline);
+	GEM_BUG_ON(timeline == &engine->timeline);
 
 	spin_lock(&timeline->lock);
 	list_move(&request->link, &timeline->requests);
@@ -690,11 +687,11 @@ void i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
 	unsigned long flags;
 
 	/* Will be called from irq-context when using foreign fences. */
-	spin_lock_irqsave(&engine->timeline->lock, flags);
+	spin_lock_irqsave(&engine->timeline.lock, flags);
 
 	__i915_gem_request_unsubmit(request);
 
-	spin_unlock_irqrestore(&engine->timeline->lock, flags);
+	spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
 static int __i915_sw_fence_call
@@ -780,7 +777,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 		goto err_unreserve;
 
 	/* Move the oldest request to the slab-cache (if not in use!) */
-	req = list_first_entry_or_null(&engine->timeline->requests,
+	req = list_first_entry_or_null(&engine->timeline.requests,
 				       typeof(*req), link);
 	if (req && i915_gem_request_completed(req))
 		i915_gem_request_retire(req);
@@ -844,7 +841,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 	debug_request_init(req);
 
 	req->timeline = ring->timeline;
-	GEM_BUG_ON(req->timeline == engine->timeline);
+	GEM_BUG_ON(req->timeline == &engine->timeline);
 
 	spin_lock_init(&req->lock);
 	dma_fence_init(&req->fence,
@@ -1013,7 +1010,7 @@ i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
 
 		/* Squash repeated waits to the same timelines */
 		if (fence->context != req->i915->mm.unordered_timeline &&
-		    intel_timeline_sync_is_later(req->timeline, fence))
+		    i915_timeline_sync_is_later(req->timeline, fence))
 			continue;
 
 		if (dma_fence_is_i915(fence))
@@ -1028,7 +1025,7 @@ i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
 
 		/* Record the latest fence used against each timeline */
 		if (fence->context != req->i915->mm.unordered_timeline)
-			intel_timeline_sync_set(req->timeline, fence);
+			i915_timeline_sync_set(req->timeline, fence);
 	} while (--nchild);
 
 	return 0;
@@ -1124,7 +1121,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 {
 	struct intel_engine_cs *engine = request->engine;
 	struct intel_ring *ring = request->ring;
-	struct intel_timeline *timeline = request->timeline;
+	struct i915_timeline *timeline = request->timeline;
 	struct drm_i915_gem_request *prev;
 	u32 *cs;
 	int err;
@@ -1518,15 +1515,15 @@ static void engine_retire_requests(struct intel_engine_cs *engine)
 	u32 seqno = intel_engine_get_seqno(engine);
 	LIST_HEAD(retire);
 
-	spin_lock_irq(&engine->timeline->lock);
+	spin_lock_irq(&engine->timeline.lock);
 	list_for_each_entry_safe(request, next,
-				 &engine->timeline->requests, link) {
+				 &engine->timeline.requests, link) {
 		if (!i915_seqno_passed(seqno, request->global_seqno))
 			break;
 
 		list_move_tail(&request->link, &retire);
 	}
-	spin_unlock_irq(&engine->timeline->lock);
+	spin_unlock_irq(&engine->timeline.lock);
 
 	list_for_each_entry_safe(request, next, &retire, link)
 		i915_gem_request_retire(request);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index bbe2ce253f81..c4b0ed859823 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -35,6 +35,7 @@
 struct drm_file;
 struct drm_i915_gem_object;
 struct drm_i915_gem_request;
+struct i915_timeline;
 
 struct intel_wait {
 	struct rb_node node;
@@ -132,7 +133,7 @@ struct drm_i915_gem_request {
 	struct i915_gem_context *ctx;
 	struct intel_engine_cs *engine;
 	struct intel_ring *ring;
-	struct intel_timeline *timeline;
+	struct i915_timeline *timeline;
 	struct intel_signal_node signaling;
 
 	/* Fences for the various phases in the request's lifetime.
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.c b/drivers/gpu/drm/i915/i915_gem_timeline.c
deleted file mode 100644
index 1bf48bdb78c4..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_timeline.c
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "i915_drv.h"
-#include "i915_syncmap.h"
-
-static void __intel_timeline_init(struct intel_timeline *tl,
-				  struct i915_gem_timeline *parent,
-				  u64 context,
-				  struct lock_class_key *lockclass,
-				  const char *lockname)
-{
-	tl->fence_context = context;
-	tl->common = parent;
-	spin_lock_init(&tl->lock);
-	lockdep_set_class_and_name(&tl->lock, lockclass, lockname);
-	init_request_active(&tl->last_request, NULL);
-	INIT_LIST_HEAD(&tl->requests);
-	i915_syncmap_init(&tl->sync);
-}
-
-static void __intel_timeline_fini(struct intel_timeline *tl)
-{
-	GEM_BUG_ON(!list_empty(&tl->requests));
-
-	i915_syncmap_free(&tl->sync);
-}
-
-static int __i915_gem_timeline_init(struct drm_i915_private *i915,
-				    struct i915_gem_timeline *timeline,
-				    const char *name,
-				    struct lock_class_key *lockclass,
-				    const char *lockname)
-{
-	unsigned int i;
-	u64 fences;
-
-	lockdep_assert_held(&i915->drm.struct_mutex);
-
-	/*
-	 * Ideally we want a set of engines on a single leaf as we expect
-	 * to mostly be tracking synchronisation between engines. It is not
-	 * a huge issue if this is not the case, but we may want to mitigate
-	 * any page crossing penalties if they become an issue.
-	 */
-	BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
-
-	timeline->i915 = i915;
-	timeline->name = kstrdup(name ?: "[kernel]", GFP_KERNEL);
-	if (!timeline->name)
-		return -ENOMEM;
-
-	list_add(&timeline->link, &i915->gt.timelines);
-
-	/* Called during early_init before we know how many engines there are */
-	fences = dma_fence_context_alloc(ARRAY_SIZE(timeline->engine));
-	for (i = 0; i < ARRAY_SIZE(timeline->engine); i++)
-		__intel_timeline_init(&timeline->engine[i],
-				      timeline, fences++,
-				      lockclass, lockname);
-
-	return 0;
-}
-
-int i915_gem_timeline_init(struct drm_i915_private *i915,
-			   struct i915_gem_timeline *timeline,
-			   const char *name)
-{
-	static struct lock_class_key class;
-
-	return __i915_gem_timeline_init(i915, timeline, name,
-					&class, "&timeline->lock");
-}
-
-int i915_gem_timeline_init__global(struct drm_i915_private *i915)
-{
-	static struct lock_class_key class1, class2;
-	int err;
-
-	err = __i915_gem_timeline_init(i915,
-				       &i915->gt.execution_timeline,
-				       "[execution]", &class1,
-				       "i915_execution_timeline");
-	if (err)
-		return err;
-
-	err = __i915_gem_timeline_init(i915,
-				     &i915->gt.legacy_timeline,
-				     "[global]", &class2,
-				     "i915_global_timeline");
-	if (err)
-		goto err_exec_timeline;
-
-	return 0;
-
-err_exec_timeline:
-	i915_gem_timeline_fini(&i915->gt.execution_timeline);
-	return err;
-}
-
-/**
- * i915_gem_timelines_park - called when the driver idles
- * @i915: the drm_i915_private device
- *
- * When the driver is completely idle, we know that all of our sync points
- * have been signaled and our tracking is then entirely redundant. Any request
- * to wait upon an older sync point will be completed instantly as we know
- * the fence is signaled and therefore we will not even look them up in the
- * sync point map.
- */
-void i915_gem_timelines_park(struct drm_i915_private *i915)
-{
-	struct i915_gem_timeline *timeline;
-	int i;
-
-	lockdep_assert_held(&i915->drm.struct_mutex);
-
-	list_for_each_entry(timeline, &i915->gt.timelines, link) {
-		for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) {
-			struct intel_timeline *tl = &timeline->engine[i];
-
-			/*
-			 * All known fences are completed so we can scrap
-			 * the current sync point tracking and start afresh,
-			 * any attempt to wait upon a previous sync point
-			 * will be skipped as the fence was signaled.
-			 */
-			i915_syncmap_free(&tl->sync);
-		}
-	}
-}
-
-void i915_gem_timeline_fini(struct i915_gem_timeline *timeline)
-{
-	int i;
-
-	lockdep_assert_held(&timeline->i915->drm.struct_mutex);
-
-	for (i = 0; i < ARRAY_SIZE(timeline->engine); i++)
-		__intel_timeline_fini(&timeline->engine[i]);
-
-	list_del(&timeline->link);
-	kfree(timeline->name);
-}
-
-struct i915_gem_timeline *
-i915_gem_timeline_create(struct drm_i915_private *i915, const char *name)
-{
-	struct i915_gem_timeline *timeline;
-	int err;
-
-	timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
-	if (!timeline)
-		return ERR_PTR(-ENOMEM);
-
-	err = i915_gem_timeline_init(i915, timeline, name);
-	if (err) {
-		kfree(timeline);
-		return ERR_PTR(err);
-	}
-
-	return timeline;
-}
-
-void i915_gem_timeline_free(struct i915_gem_timeline *timeline)
-{
-	if (!timeline)
-		return;
-
-	i915_gem_timeline_fini(timeline);
-	kfree(timeline);
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/mock_timeline.c"
-#include "selftests/i915_gem_timeline.c"
-#endif
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 0ed2e679d739..204fe044bb58 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1302,7 +1302,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
 
 	count = 0;
 	request = first;
-	list_for_each_entry_from(request, &engine->timeline->requests, link)
+	list_for_each_entry_from(request, &engine->timeline.requests, link)
 		count++;
 	if (!count)
 		return;
@@ -1315,7 +1315,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
 
 	count = 0;
 	request = first;
-	list_for_each_entry_from(request, &engine->timeline->requests, link) {
+	list_for_each_entry_from(request, &engine->timeline.requests, link) {
 		if (count >= ee->num_requests) {
 			/*
 			 * If the ring request list was changed in
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index ae60a7d416ed..ef926e645fb9 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1680,7 +1680,7 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
 						 const struct i915_oa_config *oa_config)
 {
 	struct intel_engine_cs *engine = dev_priv->engine[RCS];
-	struct i915_gem_timeline *timeline;
+	struct i915_timeline *timeline;
 	struct drm_i915_gem_request *req;
 	int ret;
 
@@ -1701,15 +1701,11 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
 	/* Queue this switch after all other activity */
 	list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
 		struct drm_i915_gem_request *prev;
-		struct intel_timeline *tl;
 
-		tl = &timeline->engine[engine->id];
-		prev = i915_gem_active_raw(&tl->last_request,
+		prev = i915_gem_active_raw(&timeline->last_request,
 					   &dev_priv->drm.struct_mutex);
 		if (prev)
-			i915_sw_fence_await_sw_fence_gfp(&req->submit,
-							 &prev->submit,
-							 GFP_KERNEL);
+			i915_gem_request_await_dma_fence(req, &prev->fence);
 	}
 
 	i915_add_request(req);
diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c
new file mode 100644
index 000000000000..9ae597d87097
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_timeline.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+
+#include "i915_timeline.h"
+#include "i915_syncmap.h"
+
+void i915_timeline_init(struct drm_i915_private *i915,
+			struct i915_timeline *timeline,
+			const char *name)
+{
+	lockdep_assert_held(&i915->drm.struct_mutex);
+
+	/*
+	 * Ideally we want a set of engines on a single leaf as we expect
+	 * to mostly be tracking synchronisation between engines. It is not
+	 * a huge issue if this is not the case, but we may want to mitigate
+	 * any page crossing penalties if they become an issue.
+	 */
+	BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
+
+	timeline->name = name;
+
+	list_add(&timeline->link, &i915->gt.timelines);
+
+	/* Called during early_init before we know how many engines there are */
+
+	timeline->fence_context = dma_fence_context_alloc(1);
+
+	spin_lock_init(&timeline->lock);
+
+	init_request_active(&timeline->last_request, NULL);
+	INIT_LIST_HEAD(&timeline->requests);
+
+	i915_syncmap_init(&timeline->sync);
+}
+
+/**
+ * i915_timelines_park - called when the driver idles
+ * @i915: the drm_i915_private device
+ *
+ * When the driver is completely idle, we know that all of our sync points
+ * have been signaled and our tracking is then entirely redundant. Any request
+ * to wait upon an older sync point will be completed instantly as we know
+ * the fence is signaled and therefore we will not even look them up in the
+ * sync point map.
+ */
+void i915_timelines_park(struct drm_i915_private *i915)
+{
+	struct i915_timeline *timeline;
+
+	lockdep_assert_held(&i915->drm.struct_mutex);
+
+	list_for_each_entry(timeline, &i915->gt.timelines, link) {
+		/*
+		 * All known fences are completed so we can scrap
+		 * the current sync point tracking and start afresh,
+		 * any attempt to wait upon a previous sync point
+		 * will be skipped as the fence was signaled.
+		 */
+		i915_syncmap_free(&timeline->sync);
+	}
+}
+
+void i915_timeline_fini(struct i915_timeline *timeline)
+{
+	GEM_BUG_ON(!list_empty(&timeline->requests));
+
+	i915_syncmap_free(&timeline->sync);
+
+	list_del(&timeline->link);
+}
+
+struct i915_timeline *
+i915_timeline_create(struct drm_i915_private *i915, const char *name)
+{
+	struct i915_timeline *timeline;
+
+	timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
+	if (!timeline)
+		return ERR_PTR(-ENOMEM);
+
+	i915_timeline_init(i915, timeline, name);
+	kref_init(&timeline->kref);
+
+	return timeline;
+}
+
+void __i915_timeline_free(struct kref *kref)
+{
+	struct i915_timeline *timeline =
+	       	container_of(kref, typeof(*timeline), kref);
+
+	i915_timeline_fini(timeline);
+	kfree(timeline);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_timeline.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
similarity index 69%
rename from drivers/gpu/drm/i915/i915_gem_timeline.h
rename to drivers/gpu/drm/i915/i915_timeline.h
index 7ecf0a253d78..4e5a7a619279 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.h
+++ b/drivers/gpu/drm/i915/i915_timeline.h
@@ -22,18 +22,17 @@
  *
  */
 
-#ifndef I915_GEM_TIMELINE_H
-#define I915_GEM_TIMELINE_H
+#ifndef I915_TIMELINE_H
+#define I915_TIMELINE_H
 
 #include <linux/list.h>
+#include <linux/kref.h>
 
 #include "i915_utils.h"
 #include "i915_gem_request.h"
 #include "i915_syncmap.h"
 
-struct i915_gem_timeline;
-
-struct intel_timeline {
+struct i915_timeline {
 	u64 fence_context;
 	u32 seqno;
 
@@ -77,51 +76,57 @@ struct intel_timeline {
 	 */
 	u32 global_sync[I915_NUM_ENGINES];
 
-	struct i915_gem_timeline *common;
-};
-
-struct i915_gem_timeline {
 	struct list_head link;
-
-	struct drm_i915_private *i915;
 	const char *name;
 
-	struct intel_timeline engine[I915_NUM_ENGINES];
+	struct kref kref;
 };
 
-int i915_gem_timeline_init(struct drm_i915_private *i915,
-			   struct i915_gem_timeline *tl,
-			   const char *name);
-int i915_gem_timeline_init__global(struct drm_i915_private *i915);
-void i915_gem_timelines_park(struct drm_i915_private *i915);
-void i915_gem_timeline_fini(struct i915_gem_timeline *tl);
+void i915_timeline_init(struct drm_i915_private *i915,
+			struct i915_timeline *tl,
+			const char *name);
+void i915_timeline_fini(struct i915_timeline *tl);
 
-struct i915_gem_timeline *
-i915_gem_timeline_create(struct drm_i915_private *i915, const char *name);
-void i915_gem_timeline_free(struct i915_gem_timeline *timeline);
+struct i915_timeline *
+i915_timeline_create(struct drm_i915_private *i915, const char *name);
 
-static inline int __intel_timeline_sync_set(struct intel_timeline *tl,
-					    u64 context, u32 seqno)
+static inline struct i915_timeline *
+i915_timeline_get(struct i915_timeline *timeline)
+{
+	kref_get(&timeline->kref);
+	return timeline;
+}
+
+void __i915_timeline_free(struct kref *kref);
+static inline void i915_timeline_put(struct i915_timeline *timeline)
+{
+	kref_put(&timeline->kref, __i915_timeline_free);
+}
+
+static inline int __i915_timeline_sync_set(struct i915_timeline *tl,
+					   u64 context, u32 seqno)
 {
 	return i915_syncmap_set(&tl->sync, context, seqno);
 }
 
-static inline int intel_timeline_sync_set(struct intel_timeline *tl,
-					  const struct dma_fence *fence)
+static inline int i915_timeline_sync_set(struct i915_timeline *tl,
+					 const struct dma_fence *fence)
 {
-	return __intel_timeline_sync_set(tl, fence->context, fence->seqno);
+	return __i915_timeline_sync_set(tl, fence->context, fence->seqno);
 }
 
-static inline bool __intel_timeline_sync_is_later(struct intel_timeline *tl,
-						  u64 context, u32 seqno)
+static inline bool __i915_timeline_sync_is_later(struct i915_timeline *tl,
+						 u64 context, u32 seqno)
 {
 	return i915_syncmap_is_later(&tl->sync, context, seqno);
 }
 
-static inline bool intel_timeline_sync_is_later(struct intel_timeline *tl,
-						const struct dma_fence *fence)
+static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl,
+					       const struct dma_fence *fence)
 {
-	return __intel_timeline_sync_is_later(tl, fence->context, fence->seqno);
+	return __i915_timeline_sync_is_later(tl, fence->context, fence->seqno);
 }
 
+void i915_timelines_park(struct drm_i915_private *i915);
+
 #endif
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 90edbe9286c1..e8440e7b3f18 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -391,12 +391,6 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
 	GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
 }
 
-static void intel_engine_init_timeline(struct intel_engine_cs *engine)
-{
-	engine->timeline =
-		&engine->i915->gt.execution_timeline.engine[engine->id];
-}
-
 static bool csb_force_mmio(struct drm_i915_private *i915)
 {
 	/*
@@ -440,7 +434,7 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
 {
 	intel_engine_init_execlist(engine);
 
-	intel_engine_init_timeline(engine);
+	i915_timeline_init(engine->i915, &engine->timeline, engine->name);
 	intel_engine_init_hangcheck(engine);
 	i915_gem_batch_pool_init(engine, &engine->batch_pool);
 
@@ -689,6 +683,8 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 	if (engine->i915->preempt_context)
 		engine->context_unpin(engine, engine->i915->preempt_context);
 	engine->context_unpin(engine, engine->i915->kernel_context);
+
+	i915_timeline_fini(&engine->timeline);
 }
 
 u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
@@ -1563,7 +1559,7 @@ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
 	 * the last request that remains in the timeline. When idle, it is
 	 * the last executed context as tracked by retirement.
 	 */
-	rq = __i915_gem_active_peek(&engine->timeline->last_request);
+	rq = __i915_gem_active_peek(&engine->timeline.last_request);
 	if (rq)
 		return rq->ctx == kernel_context;
 	else
@@ -1675,7 +1671,7 @@ static void print_request(struct drm_printer *m,
 		   rq->ctx->hw_id, rq->fence.seqno,
 		   rq->priotree.priority,
 		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
-		   rq->timeline->common->name);
+		   rq->timeline->name);
 }
 
 static void hexdump(struct drm_printer *m, const void *buf, size_t len)
@@ -1736,7 +1732,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 		   intel_engine_last_submit(engine),
 		   engine->hangcheck.seqno,
 		   jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
-		   engine->timeline->inflight_seqnos);
+		   engine->timeline.inflight_seqnos);
 	drm_printf(m, "\tReset count: %d (global %d)\n",
 		   i915_reset_engine_count(error, engine),
 		   i915_reset_count(error));
@@ -1745,14 +1741,14 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 
 	drm_printf(m, "\tRequests:\n");
 
-	rq = list_first_entry(&engine->timeline->requests,
+	rq = list_first_entry(&engine->timeline.requests,
 			      struct drm_i915_gem_request, link);
-	if (&rq->link != &engine->timeline->requests)
+	if (&rq->link != &engine->timeline.requests)
 		print_request(m, rq, "\t\tfirst  ");
 
-	rq = list_last_entry(&engine->timeline->requests,
+	rq = list_last_entry(&engine->timeline.requests,
 			     struct drm_i915_gem_request, link);
-	if (&rq->link != &engine->timeline->requests)
+	if (&rq->link != &engine->timeline.requests)
 		print_request(m, rq, "\t\tlast   ");
 
 	rq = i915_gem_find_active_request(engine);
@@ -1878,8 +1874,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 			   I915_READ(RING_PP_DIR_DCLV(engine)));
 	}
 
-	spin_lock_irq(&engine->timeline->lock);
-	list_for_each_entry(rq, &engine->timeline->requests, link)
+	spin_lock_irq(&engine->timeline.lock);
+	list_for_each_entry(rq, &engine->timeline.requests, link)
 		print_request(m, rq, "\t\tE ");
 	for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
 		struct i915_priolist *p =
@@ -1888,7 +1884,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 		list_for_each_entry(rq, &p->requests, priotree.link)
 			print_request(m, rq, "\t\tQ ");
 	}
-	spin_unlock_irq(&engine->timeline->lock);
+	spin_unlock_irq(&engine->timeline.lock);
 
 	spin_lock_irq(&b->rb_lock);
 	for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index c727828e06d7..5833dc57f9c6 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -680,7 +680,7 @@ static void guc_dequeue(struct intel_engine_cs *engine)
 	bool submit = false;
 	struct rb_node *rb;
 
-	spin_lock_irq(&engine->timeline->lock);
+	spin_lock_irq(&engine->timeline.lock);
 	rb = rb_first_cached(&execlists->queue);
 	if (!rb)
 		goto unlock;
@@ -744,7 +744,7 @@ static void guc_dequeue(struct intel_engine_cs *engine)
 		guc_submit(engine);
 	}
 unlock:
-	spin_unlock_irq(&engine->timeline->lock);
+	spin_unlock_irq(&engine->timeline.lock);
 }
 
 static void guc_submission_tasklet(unsigned long data)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index e4be93d17eaf..4bdf95fc3253 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -276,10 +276,10 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
 	struct i915_priolist *uninitialized_var(p);
 	int last_prio = I915_PRIORITY_INVALID;
 
-	lockdep_assert_held(&engine->timeline->lock);
+	lockdep_assert_held(&engine->timeline.lock);
 
 	list_for_each_entry_safe_reverse(rq, rn,
-					 &engine->timeline->requests,
+					 &engine->timeline.requests,
 					 link) {
 		if (i915_gem_request_completed(rq))
 			return;
@@ -307,9 +307,9 @@ execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
 	struct intel_engine_cs *engine =
 		container_of(execlists, typeof(*engine), execlists);
 
-	spin_lock_irq(&engine->timeline->lock);
+	spin_lock_irq(&engine->timeline.lock);
 	__unwind_incomplete_requests(engine);
-	spin_unlock_irq(&engine->timeline->lock);
+	spin_unlock_irq(&engine->timeline.lock);
 }
 
 static inline void
@@ -491,7 +491,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	 * and context switches) submission.
 	 */
 
-	spin_lock_irq(&engine->timeline->lock);
+	spin_lock_irq(&engine->timeline.lock);
 	rb = rb_first_cached(&execlists->queue);
 	if (!rb)
 		goto unlock;
@@ -630,7 +630,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	if (submit)
 		port_assign(port, last);
 unlock:
-	spin_unlock_irq(&engine->timeline->lock);
+	spin_unlock_irq(&engine->timeline.lock);
 
 	if (submit) {
 		execlists_set_active(execlists, EXECLISTS_ACTIVE_USER);
@@ -664,13 +664,13 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 	struct rb_node *rb;
 	unsigned long flags;
 
-	spin_lock_irqsave(&engine->timeline->lock, flags);
+	spin_lock_irqsave(&engine->timeline.lock, flags);
 
 	/* Cancel the requests on the HW and clear the ELSP tracker. */
 	execlists_cancel_port_requests(execlists);
 
 	/* Mark all executing requests as skipped. */
-	list_for_each_entry(rq, &engine->timeline->requests, link) {
+	list_for_each_entry(rq, &engine->timeline.requests, link) {
 		GEM_BUG_ON(!rq->global_seqno);
 		if (!i915_gem_request_completed(rq))
 			dma_fence_set_error(&rq->fence, -EIO);
@@ -706,7 +706,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 	 */
 	clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
 
-	spin_unlock_irqrestore(&engine->timeline->lock, flags);
+	spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
 /*
@@ -923,14 +923,14 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
 	unsigned long flags;
 
 	/* Will be called from irq-context when using foreign fences. */
-	spin_lock_irqsave(&engine->timeline->lock, flags);
+	spin_lock_irqsave(&engine->timeline.lock, flags);
 
 	insert_request(engine, &request->priotree, request->priotree.priority);
 
 	GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
 	GEM_BUG_ON(list_empty(&request->priotree.link));
 
-	spin_unlock_irqrestore(&engine->timeline->lock, flags);
+	spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
 static struct drm_i915_gem_request *pt_to_request(struct i915_priotree *pt)
@@ -946,8 +946,8 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
 	GEM_BUG_ON(!locked);
 
 	if (engine != locked) {
-		spin_unlock(&locked->timeline->lock);
-		spin_lock(&engine->timeline->lock);
+		spin_unlock(&locked->timeline.lock);
+		spin_lock(&engine->timeline.lock);
 	}
 
 	return engine;
@@ -1031,7 +1031,7 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
 	}
 
 	engine = request->engine;
-	spin_lock_irq(&engine->timeline->lock);
+	spin_lock_irq(&engine->timeline.lock);
 
 	/* Fifo and depth-first replacement ensure our deps execute before us */
 	list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
@@ -1051,7 +1051,7 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
 		}
 	}
 
-	spin_unlock_irq(&engine->timeline->lock);
+	spin_unlock_irq(&engine->timeline.lock);
 }
 
 static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
@@ -1564,7 +1564,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
 
 	reset_irq(engine);
 
-	spin_lock_irqsave(&engine->timeline->lock, flags);
+	spin_lock_irqsave(&engine->timeline.lock, flags);
 
 	/*
 	 * Catch up with any missed context-switch interrupts.
@@ -1580,7 +1580,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
 	/* Push back any incomplete requests for replay after the reset. */
 	__unwind_incomplete_requests(engine);
 
-	spin_unlock_irqrestore(&engine->timeline->lock, flags);
+	spin_unlock_irqrestore(&engine->timeline.lock, flags);
 
 	/* Mark all CS interrupts as complete */
 	execlists->active = 0;
@@ -2300,6 +2300,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 	struct i915_vma *vma;
 	uint32_t context_size;
 	struct intel_ring *ring;
+	struct i915_timeline *timeline;
 	int ret;
 
 	if (ce->state)
@@ -2315,8 +2316,8 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 
 	ctx_obj = i915_gem_object_create(ctx->i915, context_size);
 	if (IS_ERR(ctx_obj)) {
-		DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
-		return PTR_ERR(ctx_obj);
+		ret = PTR_ERR(ctx_obj);
+		goto error_deref_obj;
 	}
 
 	vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
@@ -2325,7 +2326,14 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 		goto error_deref_obj;
 	}
 
-	ring = intel_engine_create_ring(engine, ctx->timeline, ctx->ring_size);
+	timeline = i915_timeline_create(ctx->i915, ctx->name);
+	if (IS_ERR(timeline)) {
+		ret = PTR_ERR(timeline);
+		goto error_deref_obj;
+	}
+
+	ring = intel_engine_create_ring(engine, timeline, ctx->ring_size);
+	i915_timeline_put(timeline);
 	if (IS_ERR(ring)) {
 		ret = PTR_ERR(ring);
 		goto error_deref_obj;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 03442d6e84a2..978098eb6516 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -693,17 +693,17 @@ static void cancel_requests(struct intel_engine_cs *engine)
 	struct drm_i915_gem_request *request;
 	unsigned long flags;
 
-	spin_lock_irqsave(&engine->timeline->lock, flags);
+	spin_lock_irqsave(&engine->timeline.lock, flags);
 
 	/* Mark all submitted requests as skipped. */
-	list_for_each_entry(request, &engine->timeline->requests, link) {
+	list_for_each_entry(request, &engine->timeline.requests, link) {
 		GEM_BUG_ON(!request->global_seqno);
 		if (!i915_gem_request_completed(request))
 			dma_fence_set_error(&request->fence, -EIO);
 	}
 	/* Remaining _unready_ requests will be nop'ed when submitted */
 
-	spin_unlock_irqrestore(&engine->timeline->lock, flags);
+	spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
 static void i9xx_submit_request(struct drm_i915_gem_request *request)
@@ -1125,7 +1125,7 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
 
 struct intel_ring *
 intel_engine_create_ring(struct intel_engine_cs *engine,
-			 struct i915_gem_timeline *timeline,
+			 struct i915_timeline *timeline,
 			 int size)
 {
 	struct intel_ring *ring;
@@ -1139,7 +1139,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
 		return ERR_PTR(-ENOMEM);
 
 	INIT_LIST_HEAD(&ring->request_list);
-	ring->timeline = &timeline->engine[engine->id];
+	ring->timeline = i915_timeline_get(timeline);
 
 	ring->size = size;
 	/* Workaround an erratum on the i830 which causes a hang if
@@ -1170,6 +1170,7 @@ intel_ring_free(struct intel_ring *ring)
 	i915_vma_close(ring->vma);
 	__i915_gem_object_release_unless_active(obj);
 
+	i915_timeline_put(ring->timeline);
 	kfree(ring);
 }
 
@@ -1328,6 +1329,7 @@ static void intel_ring_context_unpin(struct intel_engine_cs *engine,
 static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 {
 	struct intel_ring *ring;
+	struct i915_timeline *timeline;
 	int err;
 
 	intel_engine_setup_common(engine);
@@ -1336,9 +1338,14 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 	if (err)
 		goto err;
 
-	ring = intel_engine_create_ring(engine,
-					&engine->i915->gt.legacy_timeline,
-					32 * PAGE_SIZE);
+	timeline = i915_timeline_create(engine->i915, engine->name);
+	if (IS_ERR(timeline)) {
+		err = PTR_ERR(timeline);
+		goto err;
+	}
+
+	ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE);
+	i915_timeline_put(timeline);
 	if (IS_ERR(ring)) {
 		err = PTR_ERR(ring);
 		goto err;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index fa4ec6d1e638..e29b128db64d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -5,9 +5,9 @@
 #include <linux/hashtable.h>
 #include "i915_gem_batch_pool.h"
 #include "i915_gem_request.h"
-#include "i915_gem_timeline.h"
 #include "i915_pmu.h"
 #include "i915_selftest.h"
+#include "i915_timeline.h"
 
 struct drm_printer;
 
@@ -123,7 +123,7 @@ struct intel_ring {
 	struct i915_vma *vma;
 	void *vaddr;
 
-	struct intel_timeline *timeline;
+	struct i915_timeline *timeline;
 	struct list_head request_list;
 
 	u32 head;
@@ -302,7 +302,8 @@ struct intel_engine_cs {
 	unsigned int irq_shift;
 
 	struct intel_ring *buffer;
-	struct intel_timeline *timeline;
+
+	struct i915_timeline timeline;
 
 	struct drm_i915_gem_object *default_state;
 
@@ -723,7 +724,7 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
 
 struct intel_ring *
 intel_engine_create_ring(struct intel_engine_cs *engine,
-			 struct i915_gem_timeline *timeline,
+			 struct i915_timeline *timeline,
 			 int size);
 int intel_ring_pin(struct intel_ring *ring,
 		   struct drm_i915_private *i915,
@@ -846,7 +847,7 @@ static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
 	 * wtih serialising this hint with anything, so document it as
 	 * a hint and nothing more.
 	 */
-	return READ_ONCE(engine->timeline->seqno);
+	return READ_ONCE(engine->timeline.seqno);
 }
 
 int init_workarounds_ring(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 2058c5197689..3d68e721640d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -370,8 +370,7 @@ static int igt_ctx_exec(void *arg)
 			} else {
 				ctx = i915_gem_create_context(i915,
 							      file->driver_priv,
-							      CREATE_VM |
-							      CREATE_TIMELINE);
+							      CREATE_VM);
 			}
 			if (IS_ERR(ctx)) {
 				err = PTR_ERR(ctx);
@@ -464,7 +463,7 @@ static int igt_shared_ctx_exec(void *arg)
 		mutex_lock(&i915->drm.struct_mutex);
 
 		parent = i915_gem_create_context(i915, file->driver_priv,
-						 CREATE_VM | CREATE_TIMELINE);
+						 CREATE_VM);
 		if (IS_ERR(parent)) {
 			err = PTR_ERR(parent);
 			goto out_unlock;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c
similarity index 84%
rename from drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
rename to drivers/gpu/drm/i915/selftests/i915_timeline.c
index 3000e6a7d82d..92d319374e18 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c
@@ -26,7 +26,6 @@
 #include "i915_random.h"
 
 #include "mock_gem_device.h"
-#include "mock_timeline.h"
 
 struct __igt_sync {
 	const char *name;
@@ -35,21 +34,21 @@ struct __igt_sync {
 	bool set;
 };
 
-static int __igt_sync(struct intel_timeline *tl,
+static int __igt_sync(struct i915_timeline *tl,
 		      u64 ctx,
 		      const struct __igt_sync *p,
 		      const char *name)
 {
 	int ret;
 
-	if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
+	if (__i915_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
 		pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n",
 		       name, p->name, ctx, p->seqno, yesno(p->expected));
 		return -EINVAL;
 	}
 
 	if (p->set) {
-		ret = __intel_timeline_sync_set(tl, ctx, p->seqno);
+		ret = __i915_timeline_sync_set(tl, ctx, p->seqno);
 		if (ret)
 			return ret;
 	}
@@ -77,13 +76,13 @@ static int igt_sync(void *arg)
 		{ "unwrap", UINT_MAX, true, false },
 		{},
 	}, *p;
-	struct intel_timeline *tl;
+	struct i915_timeline *tl;
 	int order, offset;
 	int ret = -ENODEV;
 
-	tl = mock_timeline(0);
-	if (!tl)
-		return -ENOMEM;
+	tl = i915_timeline_create(NULL, "mock");
+	if (IS_ERR(tl))
+		return PTR_ERR(tl);
 
 	for (p = pass; p->name; p++) {
 		for (order = 1; order < 64; order++) {
@@ -96,11 +95,11 @@ static int igt_sync(void *arg)
 			}
 		}
 	}
-	mock_timeline_destroy(tl);
+	i915_timeline_put(tl);
 
-	tl = mock_timeline(0);
-	if (!tl)
-		return -ENOMEM;
+	tl = i915_timeline_create(NULL, "mock");
+	if (IS_ERR(tl))
+		return PTR_ERR(tl);
 
 	for (order = 1; order < 64; order++) {
 		for (offset = -1; offset <= (order > 1); offset++) {
@@ -115,7 +114,7 @@ static int igt_sync(void *arg)
 	}
 
 out:
-	mock_timeline_destroy(tl);
+	i915_timeline_put(tl);
 	return ret;
 }
 
@@ -127,15 +126,15 @@ static unsigned int random_engine(struct rnd_state *rnd)
 static int bench_sync(void *arg)
 {
 	struct rnd_state prng;
-	struct intel_timeline *tl;
+	struct i915_timeline *tl;
 	unsigned long end_time, count;
 	u64 prng32_1M;
 	ktime_t kt;
 	int order, last_order;
 
-	tl = mock_timeline(0);
-	if (!tl)
-		return -ENOMEM;
+	tl = i915_timeline_create(NULL, "mock");
+	if (IS_ERR(tl))
+		return PTR_ERR(tl);
 
 	/* Lookups from cache are very fast and so the random number generation
 	 * and the loop itself becomes a significant factor in the per-iteration
@@ -167,7 +166,7 @@ static int bench_sync(void *arg)
 	do {
 		u64 id = i915_prandom_u64_state(&prng);
 
-		__intel_timeline_sync_set(tl, id, 0);
+		__i915_timeline_sync_set(tl, id, 0);
 		count++;
 	} while (!time_after(jiffies, end_time));
 	kt = ktime_sub(ktime_get(), kt);
@@ -182,8 +181,8 @@ static int bench_sync(void *arg)
 	while (end_time--) {
 		u64 id = i915_prandom_u64_state(&prng);
 
-		if (!__intel_timeline_sync_is_later(tl, id, 0)) {
-			mock_timeline_destroy(tl);
+		if (!__i915_timeline_sync_is_later(tl, id, 0)) {
+			i915_timeline_put(tl);
 			pr_err("Lookup of %llu failed\n", id);
 			return -EINVAL;
 		}
@@ -193,19 +192,19 @@ static int bench_sync(void *arg)
 	pr_info("%s: %lu random lookups, %lluns/lookup\n",
 		__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
 
-	mock_timeline_destroy(tl);
+	i915_timeline_put(tl);
 	cond_resched();
 
-	tl = mock_timeline(0);
-	if (!tl)
-		return -ENOMEM;
+	tl = i915_timeline_create(NULL, "mock");
+	if (IS_ERR(tl))
+		return PTR_ERR(tl);
 
 	/* Benchmark setting the first N (in order) contexts */
 	count = 0;
 	kt = ktime_get();
 	end_time = jiffies + HZ/10;
 	do {
-		__intel_timeline_sync_set(tl, count++, 0);
+		__i915_timeline_sync_set(tl, count++, 0);
 	} while (!time_after(jiffies, end_time));
 	kt = ktime_sub(ktime_get(), kt);
 	pr_info("%s: %lu in-order insertions, %lluns/insert\n",
@@ -215,9 +214,9 @@ static int bench_sync(void *arg)
 	end_time = count;
 	kt = ktime_get();
 	while (end_time--) {
-		if (!__intel_timeline_sync_is_later(tl, end_time, 0)) {
+		if (!__i915_timeline_sync_is_later(tl, end_time, 0)) {
 			pr_err("Lookup of %lu failed\n", end_time);
-			mock_timeline_destroy(tl);
+			i915_timeline_put(tl);
 			return -EINVAL;
 		}
 	}
@@ -225,12 +224,12 @@ static int bench_sync(void *arg)
 	pr_info("%s: %lu in-order lookups, %lluns/lookup\n",
 		__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
 
-	mock_timeline_destroy(tl);
+	i915_timeline_put(tl);
 	cond_resched();
 
-	tl = mock_timeline(0);
-	if (!tl)
-		return -ENOMEM;
+	tl = i915_timeline_create(NULL, "mock");
+	if (IS_ERR(tl))
+		return PTR_ERR(tl);
 
 	/* Benchmark searching for a random context id and maybe changing it */
 	prandom_seed_state(&prng, i915_selftest.random_seed);
@@ -241,8 +240,8 @@ static int bench_sync(void *arg)
 		u32 id = random_engine(&prng);
 		u32 seqno = prandom_u32_state(&prng);
 
-		if (!__intel_timeline_sync_is_later(tl, id, seqno))
-			__intel_timeline_sync_set(tl, id, seqno);
+		if (!__i915_timeline_sync_is_later(tl, id, seqno))
+			__i915_timeline_sync_set(tl, id, seqno);
 
 		count++;
 	} while (!time_after(jiffies, end_time));
@@ -250,7 +249,7 @@ static int bench_sync(void *arg)
 	kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
 	pr_info("%s: %lu repeated insert/lookups, %lluns/op\n",
 		__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
-	mock_timeline_destroy(tl);
+	i915_timeline_put(tl);
 	cond_resched();
 
 	/* Benchmark searching for a known context id and changing the seqno */
@@ -258,9 +257,9 @@ static int bench_sync(void *arg)
 	     ({ int tmp = last_order; last_order = order; order += tmp; })) {
 		unsigned int mask = BIT(order) - 1;
 
-		tl = mock_timeline(0);
-		if (!tl)
-			return -ENOMEM;
+		tl = i915_timeline_create(NULL, "mock");
+		if (IS_ERR(tl))
+			return PTR_ERR(tl);
 
 		count = 0;
 		kt = ktime_get();
@@ -272,8 +271,8 @@ static int bench_sync(void *arg)
 			 */
 			u64 id = (u64)(count & mask) << order;
 
-			__intel_timeline_sync_is_later(tl, id, 0);
-			__intel_timeline_sync_set(tl, id, 0);
+			__i915_timeline_sync_is_later(tl, id, 0);
+			__i915_timeline_sync_set(tl, id, 0);
 
 			count++;
 		} while (!time_after(jiffies, end_time));
@@ -281,7 +280,7 @@ static int bench_sync(void *arg)
 		pr_info("%s: %lu cyclic/%d insert/lookups, %lluns/op\n",
 			__func__, count, order,
 			(long long)div64_ul(ktime_to_ns(kt), count));
-		mock_timeline_destroy(tl);
+		i915_timeline_put(tl);
 		cond_resched();
 	}
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index b8977cf2279c..b18a955b5dd5 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -88,8 +88,7 @@ live_context(struct drm_i915_private *i915, struct drm_file *file)
 {
 	lockdep_assert_held(&i915->drm.struct_mutex);
 
-	return i915_gem_create_context(i915, file->driver_priv,
-				       CREATE_VM | CREATE_TIMELINE);
+	return i915_gem_create_context(i915, file->driver_priv, CREATE_VM);
 }
 
 struct i915_gem_context *
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 19c0d662f351..0dc704929fb0 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -174,8 +174,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
 	engine->base.emit_breadcrumb = mock_emit_breadcrumb;
 	engine->base.submit_request = mock_submit_request;
 
-	intel_engine_init_timeline(&engine->base);
-
+	i915_timeline_init(i915, &engine->base.timeline, engine->base.name);
 	intel_engine_init_breadcrumbs(&engine->base);
 	engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 147a64b065f4..b405cafc193e 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -72,10 +72,8 @@ static void mock_device_release(struct drm_device *dev)
 
 	mutex_lock(&i915->drm.struct_mutex);
 	mock_fini_ggtt(i915);
-	i915_gem_timeline_fini(&i915->gt.legacy_timeline);
-	i915_gem_timeline_fini(&i915->gt.execution_timeline);
-	WARN_ON(!list_empty(&i915->gt.timelines));
 	mutex_unlock(&i915->drm.struct_mutex);
+	WARN_ON(!list_empty(&i915->gt.timelines));
 
 	destroy_workqueue(i915->wq);
 
@@ -225,14 +223,8 @@ struct drm_i915_private *mock_gem_device(void)
 	if (!i915->priorities)
 		goto err_dependencies;
 
-	mutex_lock(&i915->drm.struct_mutex);
 	INIT_LIST_HEAD(&i915->gt.timelines);
-	err = i915_gem_timeline_init__global(i915);
-	if (err) {
-		mutex_unlock(&i915->drm.struct_mutex);
-		goto err_priorities;
-	}
-
+	mutex_lock(&i915->drm.struct_mutex);
 	mock_init_ggtt(i915);
 	mutex_unlock(&i915->drm.struct_mutex);
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/selftests/mock_timeline.c
deleted file mode 100644
index 47b1f47c5812..000000000000
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.c
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "mock_timeline.h"
-
-struct intel_timeline *mock_timeline(u64 context)
-{
-	static struct lock_class_key class;
-	struct intel_timeline *tl;
-
-	tl = kzalloc(sizeof(*tl), GFP_KERNEL);
-	if (!tl)
-		return NULL;
-
-	__intel_timeline_init(tl, NULL, context, &class, "mock");
-
-	return tl;
-}
-
-void mock_timeline_destroy(struct intel_timeline *tl)
-{
-	__intel_timeline_fini(tl);
-	kfree(tl);
-}
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.h b/drivers/gpu/drm/i915/selftests/mock_timeline.h
deleted file mode 100644
index c27ff4639b8b..000000000000
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __MOCK_TIMELINE__
-#define __MOCK_TIMELINE__
-
-#include "../i915_gem_timeline.h"
-
-struct intel_timeline *mock_timeline(u64 context);
-void mock_timeline_destroy(struct intel_timeline *tl);
-
-#endif /* !__MOCK_TIMELINE__ */
-- 
2.15.1



More information about the Intel-gfx-trybot mailing list