[Intel-gfx] [RFC 04/39] drm/i915: Prelude to splitting i915_gem_do_execbuffer in two
Daniel Vetter
daniel at ffwll.ch
Tue Jul 21 01:06:25 PDT 2015
On Fri, Jul 17, 2015 at 03:33:13PM +0100, John.C.Harrison at Intel.com wrote:
> From: John Harrison <John.C.Harrison at Intel.com>
>
> The scheduler decouples the submission of batch buffers to the driver with their
> submission to the hardware. This basically means splitting the execbuffer()
> function in half. This change rearranges some code ready for the split to occur.
Would be nice to explain what and why moves so reviewers don't have to
reverse-engineer the idea behind this patch.
>
> Change-Id: Icc9c8afaac18821f3eb8a151a49f918f90c068a3
> For: VIZ-1587
> Signed-off-by: John Harrison <John.C.Harrison at Intel.com>
Moving move_to_active around is really scary and should be a separate
patch. The reasons it is where it currently is is that move_to_active is
somewhat destructive and can't be undone. Hence why it is past the point
of no return.
Also why can't we just pull this out into generic code?
-Daniel
> ---
> drivers/gpu/drm/i915/i915_gem_execbuffer.c | 57 ++++++++++++++++++------------
> drivers/gpu/drm/i915/intel_lrc.c | 18 +++++++---
> 2 files changed, 47 insertions(+), 28 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index d95d472..988ecd4 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -926,10 +926,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
> if (flush_domains & I915_GEM_DOMAIN_GTT)
> wmb();
>
> - /* Unconditionally invalidate gpu caches and ensure that we do flush
> - * any residual writes from the previous batch.
> - */
> - return intel_ring_invalidate_all_caches(req);
> + return 0;
> }
>
> static bool
> @@ -1253,17 +1250,6 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
> }
> }
>
> - ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
> - if (ret)
> - goto error;
> -
> - ret = i915_switch_context(params->request);
> - if (ret)
> - goto error;
> -
> - WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
> - "%s didn't clear reload\n", ring->name);
> -
> instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
> instp_mask = I915_EXEC_CONSTANTS_MASK;
> switch (instp_mode) {
> @@ -1301,6 +1287,32 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
> goto error;
> }
>
> + ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
> + if (ret)
> + goto error;
> +
> + i915_gem_execbuffer_move_to_active(vmas, params->request);
> +
> + /* To be split into two functions here... */
> +
> + intel_runtime_pm_get(dev_priv);
> +
> + /*
> + * Unconditionally invalidate gpu caches and ensure that we do flush
> + * any residual writes from the previous batch.
> + */
> + ret = intel_ring_invalidate_all_caches(params->request);
> + if (ret)
> + goto error;
> +
> + /* Switch to the correct context for the batch */
> + ret = i915_switch_context(params->request);
> + if (ret)
> + goto error;
> +
> + WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
> + "%s didn't clear reload\n", ring->name);
> +
> if (ring == &dev_priv->ring[RCS] &&
> instp_mode != dev_priv->relative_constants_mode) {
> ret = intel_ring_begin(params->request, 4);
> @@ -1344,15 +1356,20 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
> exec_start, exec_len,
> params->dispatch_flags);
> if (ret)
> - return ret;
> + goto error;
> }
>
> trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
>
> - i915_gem_execbuffer_move_to_active(vmas, params->request);
> i915_gem_execbuffer_retire_commands(params);
>
> error:
> + /*
> + * intel_gpu_busy should also get a ref, so it will free when the device
> + * is really idle.
> + */
> + intel_runtime_pm_put(dev_priv);
> +
> kfree(cliprects);
> return ret;
> }
> @@ -1563,8 +1580,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
> }
> #endif
>
> - intel_runtime_pm_get(dev_priv);
> -
> ret = i915_mutex_lock_interruptible(dev);
> if (ret)
> goto pre_mutex_err;
> @@ -1759,10 +1774,6 @@ err:
> mutex_unlock(&dev->struct_mutex);
>
> pre_mutex_err:
> - /* intel_gpu_busy should also get a ref, so it will free when the device
> - * is really idle. */
> - intel_runtime_pm_put(dev_priv);
> -
> if (fd_fence_complete != -1) {
> sys_close(fd_fence_complete);
> args->rsvd2 = (__u64) -1;
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 8aa9a18..89f3bcd 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -613,10 +613,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
> if (flush_domains & I915_GEM_DOMAIN_GTT)
> wmb();
>
> - /* Unconditionally invalidate gpu caches and ensure that we do flush
> - * any residual writes from the previous batch.
> - */
> - return logical_ring_invalidate_all_caches(req);
> + return 0;
> }
>
> int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
> @@ -889,6 +886,18 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
> if (ret)
> return ret;
>
> + i915_gem_execbuffer_move_to_active(vmas, params->request);
> +
> + /* To be split into two functions here... */
> +
> + /*
> + * Unconditionally invalidate gpu caches and ensure that we do flush
> + * any residual writes from the previous batch.
> + */
> + ret = logical_ring_invalidate_all_caches(params->request);
> + if (ret)
> + return ret;
> +
> if (ring == &dev_priv->ring[RCS] &&
> instp_mode != dev_priv->relative_constants_mode) {
> ret = intel_logical_ring_begin(params->request, 4);
> @@ -913,7 +922,6 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
>
> trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
>
> - i915_gem_execbuffer_move_to_active(vmas, params->request);
> i915_gem_execbuffer_retire_commands(params);
>
> return 0;
> --
> 1.9.1
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
More information about the Intel-gfx
mailing list