[PATCH 5/6] drm/i915/gvt: Refine error handling path in dispatch_workload
Zhenyu Wang
zhenyuw at linux.intel.com
Wed Aug 9 03:19:01 UTC 2017
On 2017.08.08 23:31:04 +0800, fred gao wrote:
> When an error occurs in dispatch_workload, this patch is to do the
> proper cleanup and rollback to the original states before the workload
> is abandoned.
>
> v2:
> - split the mixed several error paths for better review. (Zhenyu)
>
> Signed-off-by: fred gao <fred.gao at intel.com>
> ---
> drivers/gpu/drm/i915/gvt/scheduler.c | 63 ++++++++++++++++++++++++++++--------
> 1 file changed, 50 insertions(+), 13 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
> index 273a275..db49a61 100644
> --- a/drivers/gpu/drm/i915/gvt/scheduler.c
> +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
> @@ -188,16 +188,17 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
> {
> struct intel_vgpu *vgpu = workload->vgpu;
> int ring_id = workload->ring_id;
> - struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
> void *shadow_ring_buffer_va;
> u32 *cs;
> + int ret = 0;
>
> /* allocate shadow ring buffer */
> cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
> if (IS_ERR(cs)) {
> gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
> workload->rb_len);
> - return PTR_ERR(cs);
> + ret = PTR_ERR(cs);
> + goto out;
> }
Not need this change and original return is good enough.
>
> shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
> @@ -211,10 +212,10 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
> cs += workload->rb_len / sizeof(u32);
> intel_ring_advance(workload->req, cs);
>
> - return 0;
> +out:
> + return ret;
> }
>
> -
> /**
> * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
> * shadow it as well, include ringbuffer,wa_ctx and ctx.
> @@ -243,13 +244,13 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
>
> ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
> if (ret)
> - goto out;
> + goto err_scan;
>
> if ((workload->ring_id == RCS) &&
> (workload->wa_ctx.indirect_ctx.size != 0)) {
> ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
> if (ret)
> - goto out;
> + goto err_scan;
> }
>
> /* pin shadow context by gvt even the shadow context will be pinned
> @@ -268,28 +269,61 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
>
> ret = populate_shadow_context(workload);
> if (ret)
> - goto err_shadow;
> + goto err_unpin;
>
> rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
> if (IS_ERR(rq)) {
> gvt_vgpu_err("fail to allocate gem request\n");
> ret = PTR_ERR(rq);
> - goto err_shadow;
> + goto err_unpin;
> }
>
> gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
>
> workload->req = i915_gem_request_get(rq);
> - copy_workload_to_ring_buffer(workload);
> + ret = copy_workload_to_ring_buffer(workload);
> + if (ret)
> + goto err_copy;
This handle should be added in previous patch in this series that adds "copy_workload_to_ring_buffer"
instead of extra patch to fix that.
>
> workload->shadowed = true;
> + return 0;
>
> +err_copy:
> + i915_add_request(workload->req);
> + workload->req = NULL;
> +err_unpin:
> + engine->context_unpin(engine, shadow_ctx);
> err_shadow:
> intel_gvt_release_shadow_wa_ctx(&workload->wa_ctx);
> -out:
> +err_scan:
> return ret;
> }
>
> +static void intel_gvt_release_shadow_workload(struct intel_vgpu_workload *workload)
> +{
> + int ring_id = workload->ring_id;
> + struct intel_vgpu *vgpu = workload->vgpu;
> + struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
> + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
> + struct intel_engine_cs *engine = dev_priv->engine[ring_id];
> +
> + /* error happened in workload->prepare, so shaodwed */
> + if (!workload->shadowed)
> + return;
> +
> + /* free the dynamical ring buffer every time to save the memory */
> + if (vgpu->dynamic_ring_buffer_va[ring_id]) {
> + free_pages((unsigned long)vgpu->dynamic_ring_buffer_va[ring_id],
> + get_order(workload->rb_len));
> + vgpu->dynamic_ring_buffer_va[workload->ring_id] = NULL;
> + }
What's this? Have you tested this series?
> +
> + i915_add_request(workload->req);
> + workload->req = NULL;
> + engine->context_unpin(engine, shadow_ctx);
> + intel_gvt_release_shadow_wa_ctx(&workload->wa_ctx);
> +}
> +
> static int dispatch_workload(struct intel_vgpu_workload *workload)
> {
> int ring_id = workload->ring_id;
> @@ -303,15 +337,18 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
>
> ret = intel_gvt_scan_and_shadow_workload(workload);
> if (ret)
> - goto out;
> + goto err_scan;
>
> if (workload->prepare) {
> ret = workload->prepare(workload);
> if (ret)
> - goto out;
> + goto err_shadow;
> }
>
> -out:
> +err_shadow:
> + if (ret)
> + intel_gvt_release_shadow_workload(workload);
> +err_scan:
> if (ret)
> workload->status = ret;
>
> --
> 2.7.4
>
> _______________________________________________
> intel-gvt-dev mailing list
> intel-gvt-dev at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev
--
Open Source Technology Center, Intel ltd.
$gpg --keyserver wwwkeys.pgp.net --recv-keys 4D781827
-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 195 bytes
Desc: not available
URL: <https://lists.freedesktop.org/archives/intel-gvt-dev/attachments/20170809/87246862/attachment.sig>
More information about the intel-gvt-dev
mailing list