[PATCH 2/2] drm/i915: Remove intel_timeline_hwsp

Maarten Lankhorst maarten.lankhorst at linux.intel.com
Wed Oct 21 08:10:47 UTC 2020


Now that we no longer share the hwsp, we can remove the indirections
used in intel_timeline_hwsp, and just free it.

Use a rolling counter to prevent a circular dependency between
intel_timeline and intel_timeline_cacheline.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
---
 drivers/gpu/drm/i915/gt/intel_timeline.c      | 141 ++++--------------
 .../gpu/drm/i915/gt/intel_timeline_types.h    |   5 +-
 drivers/gpu/drm/i915/gt/selftest_timeline.c   |  11 +-
 3 files changed, 39 insertions(+), 118 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 2f49551c43bc..4796b9fd0a20 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -18,16 +18,9 @@
 #define CACHELINE_BITS 6
 #define CACHELINE_FREE CACHELINE_BITS
 
-struct intel_timeline_hwsp {
-	struct intel_gt *gt;
-	struct intel_gt_timelines *gt_timelines;
-	struct i915_vma *vma;
-	u64 free_bitmap;
-};
-
-static struct i915_vma *__hwsp_alloc(struct intel_gt *gt)
+static struct i915_vma *hwsp_alloc(struct intel_timeline *timeline)
 {
-	struct drm_i915_private *i915 = gt->i915;
+	struct drm_i915_private *i915 = timeline->gt->i915;
 	struct drm_i915_gem_object *obj;
 	struct i915_vma *vma;
 
@@ -37,70 +30,13 @@ static struct i915_vma *__hwsp_alloc(struct intel_gt *gt)
 
 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
 
-	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+	vma = i915_vma_instance(obj, &timeline->gt->ggtt->vm, NULL);
 	if (IS_ERR(vma))
 		i915_gem_object_put(obj);
 
 	return vma;
 }
 
-static struct i915_vma *
-hwsp_alloc(struct intel_timeline *timeline, unsigned int *cacheline)
-{
-	struct intel_gt_timelines *gt = &timeline->gt->timelines;
-	struct intel_timeline_hwsp *hwsp = NULL;
-
-	BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE);
-
-	/* hwsp_free_list only contains HWSP that have available cachelines */
-	if (timeline->hwsp_cacheline)
-		hwsp = timeline->hwsp_cacheline->hwsp;
-
-	if (!hwsp || !hwsp->free_bitmap) {
-		struct i915_vma *vma;
-
-		hwsp = kmalloc(sizeof(*hwsp), GFP_KERNEL);
-		if (!hwsp)
-			return ERR_PTR(-ENOMEM);
-
-		vma = __hwsp_alloc(timeline->gt);
-		if (IS_ERR(vma)) {
-			kfree(hwsp);
-			return vma;
-		}
-
-		GT_TRACE(timeline->gt, "new HWSP allocated\n");
-
-		vma->private = hwsp;
-		hwsp->gt = timeline->gt;
-		hwsp->vma = vma;
-		hwsp->free_bitmap = ~0ull;
-		hwsp->gt_timelines = gt;
-
-	}
-
-	GEM_BUG_ON(!hwsp->free_bitmap);
-	*cacheline = __ffs64(hwsp->free_bitmap);
-	hwsp->free_bitmap &= ~BIT_ULL(*cacheline);
-
-	GEM_BUG_ON(hwsp->vma->private != hwsp);
-	return hwsp->vma;
-}
-
-static void __idle_hwsp_free(struct intel_timeline_hwsp *hwsp, int cacheline)
-{
-	/* As a cacheline becomes available, publish the HWSP on the freelist */
-
-	GEM_BUG_ON(cacheline >= BITS_PER_TYPE(hwsp->free_bitmap));
-	hwsp->free_bitmap |= BIT_ULL(cacheline);
-
-	/* And if no one is left using it, give the page back to the system */
-	if (hwsp->free_bitmap == ~0ull) {
-		i915_vma_put(hwsp->vma);
-		kfree(hwsp);
-	}
-}
-
 static void __rcu_cacheline_free(struct rcu_head *rcu)
 {
 	struct intel_timeline_cacheline *cl =
@@ -114,9 +50,8 @@ static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
 {
 	GEM_BUG_ON(!i915_active_is_idle(&cl->active));
 
-	i915_gem_object_unpin_map(cl->hwsp->vma->obj);
-	i915_vma_put(cl->hwsp->vma);
-	__idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
+	i915_gem_object_unpin_map(cl->vma->obj);
+	i915_vma_put(cl->vma);
 
 	call_rcu(&cl->rcu, __rcu_cacheline_free);
 }
@@ -127,7 +62,7 @@ static void __cacheline_retire(struct i915_active *active)
 	struct intel_timeline_cacheline *cl =
 		container_of(active, typeof(*cl), active);
 
-	i915_vma_unpin(cl->hwsp->vma);
+	i915_vma_unpin(cl->vma);
 	if (ptr_test_bit(cl->vaddr, CACHELINE_FREE))
 		__idle_cacheline_free(cl);
 }
@@ -137,12 +72,12 @@ static int __cacheline_active(struct i915_active *active)
 	struct intel_timeline_cacheline *cl =
 		container_of(active, typeof(*cl), active);
 
-	__i915_vma_pin(cl->hwsp->vma);
+	__i915_vma_pin(cl->vma);
 	return 0;
 }
 
 static struct intel_timeline_cacheline *
-cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
+cacheline_alloc(struct intel_timeline *tl, unsigned int cacheline)
 {
 	struct intel_timeline_cacheline *cl;
 	void *vaddr;
@@ -153,14 +88,13 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
 	if (!cl)
 		return ERR_PTR(-ENOMEM);
 
-	vaddr = i915_gem_object_pin_map(hwsp->vma->obj, I915_MAP_WB);
+	vaddr = i915_gem_object_pin_map(tl->hwsp_ggtt->obj, I915_MAP_WB);
 	if (IS_ERR(vaddr)) {
 		kfree(cl);
 		return ERR_CAST(vaddr);
 	}
 
-	i915_vma_get(hwsp->vma);
-	cl->hwsp = hwsp;
+	cl->vma = i915_vma_get(tl->hwsp_ggtt);
 	cl->vaddr = page_pack_bits(vaddr, cacheline);
 
 	i915_active_init(&cl->active, __cacheline_active, __cacheline_retire);
@@ -193,6 +127,11 @@ static void cacheline_free(struct intel_timeline_cacheline *cl)
 	i915_active_release(&cl->active);
 }
 
+static void hwsp_free(struct intel_timeline *timeline)
+{
+	i915_vma_put(timeline->hwsp_ggtt);
+}
+
 static int intel_timeline_init(struct intel_timeline *timeline,
 			       struct intel_gt *gt,
 			       struct i915_vma *hwsp,
@@ -210,15 +149,17 @@ static int intel_timeline_init(struct intel_timeline *timeline,
 
 	if (!hwsp) {
 		struct intel_timeline_cacheline *cl;
-		unsigned int cacheline;
+		const unsigned int cacheline = 0;
 
-		hwsp = hwsp_alloc(timeline, &cacheline);
+		hwsp = hwsp_alloc(timeline);
 		if (IS_ERR(hwsp))
 			return PTR_ERR(hwsp);
 
-		cl = cacheline_alloc(hwsp->private, cacheline);
+		timeline->hwsp_ggtt = hwsp;
+
+		cl = cacheline_alloc(timeline, cacheline);
 		if (IS_ERR(cl)) {
-			__idle_hwsp_free(hwsp->private, cacheline);
+			hwsp_free(timeline);
 			return PTR_ERR(cl);
 		}
 
@@ -231,12 +172,13 @@ static int intel_timeline_init(struct intel_timeline *timeline,
 		vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB);
 		if (IS_ERR(vaddr))
 			return PTR_ERR(vaddr);
+
+		timeline->hwsp_ggtt = i915_vma_get(hwsp);
 	}
 
 	timeline->hwsp_seqno =
 		memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES);
 
-	timeline->hwsp_ggtt = i915_vma_get(hwsp);
 	GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size);
 
 	timeline->fence_context = dma_fence_context_alloc(1);
@@ -270,7 +212,7 @@ static void intel_timeline_fini(struct intel_timeline *timeline)
 	else
 		i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
 
-	i915_vma_put(timeline->hwsp_ggtt);
+	hwsp_free(timeline);
 }
 
 struct intel_timeline *
@@ -418,7 +360,6 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
 {
 	struct intel_timeline_cacheline *cl;
 	unsigned int cacheline;
-	struct i915_vma *vma;
 	void *vaddr;
 	int err;
 
@@ -443,26 +384,13 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
 	 * That seems unlikely for a busy timeline that needed to wrap in
 	 * the first place, so just replace the cacheline.
 	 */
+	cacheline = tl->hwsp_cl_counter;
 
-	vma = hwsp_alloc(tl, &cacheline);
-	if (IS_ERR(vma)) {
-		err = PTR_ERR(vma);
-		goto err_rollback;
-	}
-
-	err = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
-	if (err) {
-		__idle_hwsp_free(vma->private, cacheline);
-		goto err_rollback;
-	}
-
-	cl = cacheline_alloc(vma->private, cacheline);
+	cl = cacheline_alloc(tl, cacheline);
 	if (IS_ERR(cl)) {
 		err = PTR_ERR(cl);
-		__idle_hwsp_free(vma->private, cacheline);
-		goto err_unpin;
+		goto err_rollback;
 	}
-	GEM_BUG_ON(cl->hwsp->vma != vma);
 
 	/*
 	 * Attach the old cacheline to the current request, so that we only
@@ -475,20 +403,19 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
 	if (err)
 		goto err_cacheline;
 
+	/* now that we advanced, bump free counter */
+	if (++tl->hwsp_cl_counter == PAGE_SIZE / CACHELINE_BYTES)
+		tl->hwsp_cl_counter = 0;
+
 	cacheline_release(tl->hwsp_cacheline); /* ownership now xfered to rq */
 	cacheline_free(tl->hwsp_cacheline);
 
-	i915_vma_unpin(tl->hwsp_ggtt); /* binding kept alive by old cacheline */
-	i915_vma_put(tl->hwsp_ggtt);
-
-	tl->hwsp_ggtt = i915_vma_get(vma);
-
 	vaddr = page_mask_bits(cl->vaddr);
 	tl->hwsp_offset = cacheline * CACHELINE_BYTES;
 	tl->hwsp_seqno =
 		memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES);
 
-	tl->hwsp_offset += i915_ggtt_offset(vma);
+	tl->hwsp_offset += i915_ggtt_offset(tl->hwsp_ggtt);
 	GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
 		 tl->fence_context, tl->hwsp_offset);
 
@@ -501,8 +428,6 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
 
 err_cacheline:
 	cacheline_free(cl);
-err_unpin:
-	i915_vma_unpin(vma);
 err_rollback:
 	timeline_rollback(tl);
 	return err;
@@ -550,7 +475,7 @@ int intel_timeline_read_hwsp(struct i915_request *from,
 	if (err)
 		goto out;
 
-	*hwsp = i915_ggtt_offset(cl->hwsp->vma) +
+	*hwsp = i915_ggtt_offset(cl->vma) +
 		ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * CACHELINE_BYTES;
 
 out:
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
index 02181c5020db..1328e681e859 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -18,7 +18,6 @@
 struct i915_vma;
 struct i915_syncmap;
 struct intel_gt;
-struct intel_timeline_hwsp;
 
 struct intel_timeline {
 	u64 fence_context;
@@ -47,7 +46,7 @@ struct intel_timeline {
 
 	const u32 *hwsp_seqno;
 	struct i915_vma *hwsp_ggtt;
-	u32 hwsp_offset;
+	u32 hwsp_offset, hwsp_cl_counter;
 
 	struct intel_timeline_cacheline *hwsp_cacheline;
 
@@ -91,7 +90,7 @@ struct intel_timeline {
 struct intel_timeline_cacheline {
 	struct i915_active active;
 
-	struct intel_timeline_hwsp *hwsp;
+	struct i915_vma *vma;
 	void *vaddr;
 
 	struct rcu_head rcu;
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index 19c2cb166e7c..98cd161b3925 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -664,7 +664,7 @@ static int live_hwsp_wrap(void *arg)
 	if (IS_ERR(tl))
 		return PTR_ERR(tl);
 
-	if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
+	if (!tl->has_initial_breadcrumb)
 		goto out_free;
 
 	err = intel_timeline_pin(tl, NULL);
@@ -780,9 +780,7 @@ static int live_hwsp_rollover_kernel(void *arg)
 		}
 
 		GEM_BUG_ON(i915_active_fence_isset(&tl->last_request));
-		tl->seqno = 0;
-		timeline_rollback(tl);
-		timeline_rollback(tl);
+		tl->seqno = -2u;
 		WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
 
 		for (i = 0; i < ARRAY_SIZE(rq); i++) {
@@ -862,11 +860,10 @@ static int live_hwsp_rollover_user(void *arg)
 			goto out;
 
 		tl = ce->timeline;
-		if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
+		if (!tl->has_initial_breadcrumb)
 			goto out;
 
-		timeline_rollback(tl);
-		timeline_rollback(tl);
+		tl->seqno = -4u;
 		WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
 
 		for (i = 0; i < ARRAY_SIZE(rq); i++) {
-- 
2.28.0



More information about the Intel-gfx-trybot mailing list