[Intel-gfx] [PATCH 22/36] drm/i915: Serialise the fill BLT with the vma pinning

Chris Wilson chris at chris-wilson.co.uk
Thu Aug 29 08:11:36 UTC 2019


Make sure that we wait for the vma to be pinned prior to telling the GPU
to fill the pages through that vma.

However, since our async operations fight over obj->resv->excl_fence we
must manually order them. This makes it much more fragile, and gives an
outside observer the chance to see the intermediate fences. To be
discussed!

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld at intel.com>
Reviewed-by: Matthew Auld <matthew.auld at intel.com>
---
 .../gpu/drm/i915/gem/i915_gem_client_blt.c    | 46 ++++++++++++++-----
 drivers/gpu/drm/i915/gt/intel_ringbuffer.c    |  3 +-
 2 files changed, 36 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index ec4ae2c5eb29..c673cc26dbad 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -71,10 +71,30 @@ static struct i915_sleeve *create_sleeve(struct i915_address_space *vm,
 		goto err_free;
 	}
 
+	/*
+	 * XXX fix scheduling with get_pages & clear workers
+	 *
+	 * The complication is that we end up overwriting the same
+	 * obj->resv->excl_fence for each stage of the operation. That fence
+	 * should be set on scheduling the work, and only signaled upon
+	 * completion of the entire workqueue.
+	 *
+	 * Within the workqueue, we use the fence to schedule each individual
+	 * task. Each individual task knows to use obj->resv->fence.
+	 *
+	 * To an outsider, they must wait until the end and so the
+	 * obj->resv->fence must be the composite.
+	 *
+	 * Ideas?
+	 */
+	err = i915_vma_pin(vma, 0, 0, PIN_USER);
+	if (unlikely(err))
+		goto err_free;
+
 	vma->private = sleeve;
 	vma->ops = &proxy_vma_ops;
 
-	sleeve->vma = vma;
+	sleeve->vma = i915_vma_get(vma);
 	sleeve->pages = pages;
 	sleeve->page_sizes = *page_sizes;
 
@@ -87,6 +107,13 @@ static struct i915_sleeve *create_sleeve(struct i915_address_space *vm,
 
 static void destroy_sleeve(struct i915_sleeve *sleeve)
 {
+	struct i915_vma *vma = sleeve->vma;
+
+	if (vma) {
+		i915_vma_unpin(vma);
+		i915_vma_put(vma);
+	}
+
 	kfree(sleeve);
 }
 
@@ -175,8 +202,8 @@ static int move_to_active(struct i915_vma *vma, struct i915_request *rq)
 static void clear_pages_worker(struct work_struct *work)
 {
 	struct clear_pages_work *w = container_of(work, typeof(*w), work);
-	struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
-	struct i915_vma *vma = w->sleeve->vma;
+	struct i915_vma *vma = fetch_and_zero(&w->sleeve->vma);
+	struct drm_i915_gem_object *obj = vma->obj;
 	struct i915_request *rq;
 	struct i915_vma *batch;
 	int err = w->dma.error;
@@ -186,20 +213,16 @@ static void clear_pages_worker(struct work_struct *work)
 
 	if (obj->cache_dirty) {
 		if (i915_gem_object_has_struct_page(obj))
-			drm_clflush_sg(w->sleeve->pages);
+			drm_clflush_sg(vma->pages);
 		obj->cache_dirty = false;
 	}
 	obj->read_domains = I915_GEM_GPU_DOMAINS;
 	obj->write_domain = 0;
 
-	err = i915_vma_pin(vma, 0, 0, PIN_USER);
-	if (unlikely(err))
-		goto out_signal;
-
 	batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
 	if (IS_ERR(batch)) {
 		err = PTR_ERR(batch);
-		goto out_unpin;
+		goto out_signal;
 	}
 
 	rq = intel_context_create_request(w->ce);
@@ -244,14 +267,15 @@ static void clear_pages_worker(struct work_struct *work)
 	i915_request_add(rq);
 out_batch:
 	intel_emit_vma_release(w->ce, batch);
-out_unpin:
-	i915_vma_unpin(vma);
 out_signal:
 	if (unlikely(err)) {
 		dma_fence_set_error(&w->dma, err);
 		dma_fence_signal(&w->dma);
 		dma_fence_put(&w->dma);
 	}
+
+	i915_vma_unpin(vma);
+	i915_vma_put(vma);
 }
 
 static int __i915_sw_fence_call
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index 855e97ccaf9f..bbda85dcaa42 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -941,13 +941,12 @@ static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
 	GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
 
 	*cs++ = MI_FLUSH;
-
 	*cs++ = MI_STORE_DWORD_INDEX;
 	*cs++ = I915_GEM_HWS_SEQNO_ADDR;
 	*cs++ = rq->fence.seqno;
 
+	*cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
 	*cs++ = MI_USER_INTERRUPT;
-	*cs++ = MI_NOOP;
 
 	rq->tail = intel_ring_offset(rq, cs);
 	assert_ring_tail_valid(rq->ring, rq->tail);
-- 
2.23.0



More information about the Intel-gfx mailing list