[Intel-gfx] [PATCH v4] drm/i915/guc: Move GuC wq_check_space to alloc_request_extras
Daniel Vetter
daniel at ffwll.ch
Tue Jan 5 02:07:53 PST 2016
On Wed, Dec 23, 2015 at 03:39:04PM +0000, Dave Gordon wrote:
> On 16/12/15 19:45, yu.dai at intel.com wrote:
> >From: Alex Dai <yu.dai at intel.com>
> >
> >Split GuC work queue space checking from submission and move it to
> >ring_alloc_request_extras. The reason is that failure in later
> >i915_add_request() won't be handled. In the case timeout happens,
> >driver can return early in order to handle the error.
> >
> >v1: Move wq_reserve_space to ring_reserve_space
> >v2: Move wq_reserve_space to alloc_request_extras (Chris Wilson)
> >v3: The work queue head pointer is cached by driver now. So we can
> > quickly return if space is available.
> > s/reserve/check/g (Dave Gordon)
> >v4: Update cached wq head after ring doorbell; check wq space before
> > ring doorbell in case unexpected error happens; call wq space
> > check only when GuC submission is enabled. (Dave Gordon)
> >
> >Signed-off-by: Alex Dai <yu.dai at intel.com>
>
> LGTM.
> Reviewed-by: Dave Gordon <david.s.gordon at intel.com>
Queued for -next, thanks for the patch.
-Daniel
>
> >diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
> >index ef20071..7554d16 100644
> >--- a/drivers/gpu/drm/i915/i915_guc_submission.c
> >+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
> >@@ -247,6 +247,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
> > db_exc.cookie = 1;
> > }
> >
> >+ /* Finally, update the cached copy of the GuC's WQ head */
> >+ gc->wq_head = desc->head;
> >+
> > kunmap_atomic(base);
> > return ret;
> > }
> >@@ -472,28 +475,30 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
> > sizeof(desc) * client->ctx_index);
> > }
> >
> >-/* Get valid workqueue item and return it back to offset */
> >-static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
> >+int i915_guc_wq_check_space(struct i915_guc_client *gc)
> > {
> > struct guc_process_desc *desc;
> > void *base;
> > u32 size = sizeof(struct guc_wq_item);
> > int ret = -ETIMEDOUT, timeout_counter = 200;
> >
> >+ if (!gc)
> >+ return 0;
> >+
> >+ /* Quickly return if wq space is available since last time we cache the
> >+ * head position. */
> >+ if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size)
> >+ return 0;
> >+
> > base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
> > desc = base + gc->proc_desc_offset;
> >
> > while (timeout_counter-- > 0) {
> >- if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
> >- *offset = gc->wq_tail;
> >+ gc->wq_head = desc->head;
> >
> >- /* advance the tail for next workqueue item */
> >- gc->wq_tail += size;
> >- gc->wq_tail &= gc->wq_size - 1;
> >-
> >- /* this will break the loop */
> >- timeout_counter = 0;
> >+ if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) {
> > ret = 0;
> >+ break;
> > }
> >
> > if (timeout_counter)
> >@@ -511,12 +516,16 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
> > enum intel_ring_id ring_id = rq->ring->id;
> > struct guc_wq_item *wqi;
> > void *base;
> >- u32 tail, wq_len, wq_off = 0;
> >- int ret;
> >+ u32 tail, wq_len, wq_off, space;
> >+
> >+ space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size);
> >+ if (WARN_ON(space < sizeof(struct guc_wq_item)))
> >+ return -ENOSPC; /* shouldn't happen */
> >
> >- ret = guc_get_workqueue_space(gc, &wq_off);
> >- if (ret)
> >- return ret;
> >+ /* postincrement WQ tail for next time */
> >+ wq_off = gc->wq_tail;
> >+ gc->wq_tail += sizeof(struct guc_wq_item);
> >+ gc->wq_tail &= gc->wq_size - 1;
> >
> > /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
> > * should not have the case where structure wqi is across page, neither
> >diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
> >index 0e048bf..5cf555d 100644
> >--- a/drivers/gpu/drm/i915/intel_guc.h
> >+++ b/drivers/gpu/drm/i915/intel_guc.h
> >@@ -43,6 +43,7 @@ struct i915_guc_client {
> > uint32_t wq_offset;
> > uint32_t wq_size;
> > uint32_t wq_tail;
> >+ uint32_t wq_head;
> >
> > /* GuC submission statistics & status */
> > uint64_t submissions[I915_NUM_RINGS];
> >@@ -123,5 +124,6 @@ int i915_guc_submit(struct i915_guc_client *client,
> > struct drm_i915_gem_request *rq);
> > void i915_guc_submission_disable(struct drm_device *dev);
> > void i915_guc_submission_fini(struct drm_device *dev);
> >+int i915_guc_wq_check_space(struct i915_guc_client *client);
> >
> > #endif
> >diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> >index 272f36f..cd232d2 100644
> >--- a/drivers/gpu/drm/i915/intel_lrc.c
> >+++ b/drivers/gpu/drm/i915/intel_lrc.c
> >@@ -667,6 +667,19 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
> > return ret;
> > }
> >
> >+ if (i915.enable_guc_submission) {
> >+ /*
> >+ * Check that the GuC has space for the request before
> >+ * going any further, as the i915_add_request() call
> >+ * later on mustn't fail ...
> >+ */
> >+ struct intel_guc *guc = &request->i915->guc;
> >+
> >+ ret = i915_guc_wq_check_space(guc->execbuf_client);
> >+ if (ret)
> >+ return ret;
> >+ }
> >+
> > return 0;
> > }
> >
> >
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
More information about the Intel-gfx
mailing list