[Intel-gfx] [PATCH v4 5/7] drm/i915/guc: don't spinwait if the GuC's workqueue is full
Tvrtko Ursulin
tvrtko.ursulin at linux.intel.com
Fri May 13 15:32:43 UTC 2016
On 13/05/16 15:36, Dave Gordon wrote:
> Rather than wait to see whether more space becomes available in the GuC
> submission workqueue, we can just return -EAGAIN and let the caller try
> again in a little while. This gets rid of an uninterruptable sleep in
> the polling code :)
>
> We'll also add a counter to the GuC client statistics, to see how often
> we find the WQ full.
>
> v2:
> Flag the likely() code path (Tvtrko Ursulin).
>
> v4:
> Add/update comments about failure counters (Tvtrko Ursulin).
>
> Signed-off-by: Dave Gordon <david.s.gordon at intel.com>
> ---
> drivers/gpu/drm/i915/i915_debugfs.c | 1 +
> drivers/gpu/drm/i915/i915_guc_submission.c | 18 +++++++-----------
> drivers/gpu/drm/i915/intel_guc.h | 22 ++++++++++++++++------
> 3 files changed, 24 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index 24f4105..de05841 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -2506,6 +2506,7 @@ static void i915_guc_client_info(struct seq_file *m,
> seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
> client->wq_size, client->wq_offset, client->wq_tail);
>
> + seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space);
> seq_printf(m, "\tFailed to queue: %u\n", client->q_fail);
> seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
> seq_printf(m, "\tLast submission result: %d\n", client->retcode);
> diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
> index 87cb739..85b2b89 100644
> --- a/drivers/gpu/drm/i915/i915_guc_submission.c
> +++ b/drivers/gpu/drm/i915/i915_guc_submission.c
> @@ -468,26 +468,22 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
> */
> int i915_guc_wq_check_space(struct drm_i915_gem_request *request)
> {
> - const size_t size = sizeof(struct guc_wq_item);
> + const size_t wqi_size = sizeof(struct guc_wq_item);
> struct i915_guc_client *gc = request->i915->guc.execbuf_client;
> struct guc_process_desc *desc;
> - int ret = -ETIMEDOUT, timeout_counter = 200;
> + u32 freespace;
>
> GEM_BUG_ON(gc == NULL);
>
> desc = gc->client_base + gc->proc_desc_offset;
>
> - while (timeout_counter-- > 0) {
> - if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
> - ret = 0;
> - break;
> - }
> + freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
> + if (likely(freespace >= wqi_size))
> + return 0;
>
> - if (timeout_counter)
> - usleep_range(1000, 2000);
> - };
> + gc->no_wq_space += 1;
>
> - return ret;
> + return -EAGAIN;
> }
>
> static int guc_add_workqueue_item(struct i915_guc_client *gc,
> diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
> index 91f315c..380a743 100644
> --- a/drivers/gpu/drm/i915/intel_guc.h
> +++ b/drivers/gpu/drm/i915/intel_guc.h
> @@ -48,9 +48,18 @@ struct drm_i915_gem_request;
> * queue (a circular array of work items), again described in the process
> * descriptor. Work queue pages are mapped momentarily as required.
> *
> - * Finally, we also keep a few statistics here, including the number of
> - * submissions to each engine, and a record of the last submission failure
> - * (if any).
> + * We also keep a few statistics on failures. Ideally, these should all
> + * be zero!
> + * no_wq_space: times that the submission pre-check found no space was
> + * available in the work queue (note, the queue is shared,
> + * not per-engine). It is OK for this to be nonzero, but
> + * it should not be huge!
> + * q_fail: failed to enqueue a work item. This should never happen,
> + * because we check for space beforehand.
> + * b_fail: failed to ring the doorbell. This should never happen, unless
> + * somehow the hardware misbehaves, or maybe if the GuC firmware
> + * crashes? We probably need to reset the GPU to recover.
> + * retcode: errno from last guc_submit()
> */
> struct i915_guc_client {
> struct drm_i915_gem_object *client_obj;
> @@ -71,12 +80,13 @@ struct i915_guc_client {
> uint32_t wq_tail;
> uint32_t unused; /* Was 'wq_head' */
>
> - /* GuC submission statistics & status */
> - uint64_t submissions[GUC_MAX_ENGINES_NUM];
> + uint32_t no_wq_space;
> uint32_t q_fail;
> uint32_t b_fail;
> int retcode;
> - int spare; /* pad to 32 DWords */
> +
> + /* Per-engine counts of GuC submissions */
> + uint64_t submissions[GUC_MAX_ENGINES_NUM];
> };
>
> enum intel_guc_fw_status {
>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Regards,
Tvrtko
More information about the Intel-gfx
mailing list