[PATCH 01/17] drm/i915: Use 0 for the unordered context

Chris Wilson chris at chris-wilson.co.uk
Sun Aug 18 18:21:23 UTC 2019


Since commit 078dec3326e2 ("dma-buf: add dma_fence_get_stub") the 0
fence context became an impossible match as it is used for an always
signaled fence. We can simplify our timeline tracking by knowing that 0
always means no match.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_clflush.c    | 3 +--
 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c | 7 +------
 drivers/gpu/drm/i915/gem/i915_gem_fence.c      | 3 +--
 drivers/gpu/drm/i915/i915_drv.h                | 2 --
 drivers/gpu/drm/i915/i915_gem.c                | 2 --
 drivers/gpu/drm/i915/i915_request.c            | 4 ++--
 6 files changed, 5 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index 724242ec3972..fb0ef176ba5b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -134,8 +134,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
 		dma_fence_init(&clflush->dma,
 			       &i915_clflush_ops,
 			       &clflush_lock,
-			       to_i915(obj->base.dev)->mm.unordered_timeline,
-			       0);
+			       0, 0);
 		i915_sw_fence_init(&clflush->wait, i915_clflush_notify);
 
 		clflush->obj = i915_gem_object_get(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 818ac6915bc5..f99920652751 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -267,7 +267,6 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
 				     struct i915_page_sizes *page_sizes,
 				     u32 value)
 {
-	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	struct clear_pages_work *work;
 	struct i915_sleeve *sleeve;
 	int err;
@@ -290,11 +289,7 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
 
 	init_irq_work(&work->irq_work, clear_pages_signal_irq_worker);
 
-	dma_fence_init(&work->dma,
-		       &clear_pages_work_ops,
-		       &fence_lock,
-		       i915->mm.unordered_timeline,
-		       0);
+	dma_fence_init(&work->dma, &clear_pages_work_ops, &fence_lock, 0, 0);
 	i915_sw_fence_init(&work->wait, clear_pages_work_notify);
 
 	i915_gem_object_lock(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_fence.c b/drivers/gpu/drm/i915/gem/i915_gem_fence.c
index 5496f33a9064..2f6100ec2608 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_fence.c
@@ -69,8 +69,7 @@ i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
 
 	i915_sw_fence_init(&stub->chain, stub_notify);
 	dma_fence_init(&stub->dma, &stub_fence_ops, &stub->chain.wait.lock,
-		       to_i915(obj->base.dev)->mm.unordered_timeline,
-		       0);
+		       0, 0);
 
 	if (i915_sw_fence_await_reservation(&stub->chain,
 					    obj->base.resv, NULL,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0f81ced0a062..b6032af904bc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -748,8 +748,6 @@ struct i915_gem_mm {
 	 */
 	struct workqueue_struct *userptr_wq;
 
-	u64 unordered_timeline;
-
 	/** Bit 6 swizzling required for X tiling */
 	u32 bit_6_swizzle_x;
 	/** Bit 6 swizzling required for Y tiling */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index cdf2dbf84aa3..22021da28239 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1411,8 +1411,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 		mkwrite_device_info(dev_priv)->page_sizes =
 			I915_GTT_PAGE_SIZE_4K;
 
-	dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
-
 	intel_timelines_init(dev_priv);
 
 	ret = i915_gem_init_userptr(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 8fe37ab30d90..86c8dd44216b 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -915,7 +915,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
 			continue;
 
 		/* Squash repeated waits to the same timelines */
-		if (fence->context != rq->i915->mm.unordered_timeline &&
+		if (fence->context &&
 		    intel_timeline_sync_is_later(rq->timeline, fence))
 			continue;
 
@@ -929,7 +929,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
 			return ret;
 
 		/* Record the latest fence used against each timeline */
-		if (fence->context != rq->i915->mm.unordered_timeline)
+		if (fence->context)
 			intel_timeline_sync_set(rq->timeline, fence);
 	} while (--nchild);
 
-- 
2.23.0.rc1



More information about the Intel-gfx-trybot mailing list