[PATCH 5/6] drm/i915/gvt: Refine error handling in dispatch_workload
fred gao
fred.gao at intel.com
Thu Aug 10 15:31:39 UTC 2017
When an error occurs in dispatch_workload, this patch is to do the
proper cleanup and rollback to the original states before the workload
is abandoned.
v2:
- split the mixed several error paths for better review. (Zhenyu)
v3:
- original PTR_ERR(cs) is good and code cleanup. (Zhenyu)
Signed-off-by: fred gao <fred.gao at intel.com>
---
drivers/gpu/drm/i915/gvt/scheduler.c | 44 ++++++++++++++++++++++++++++--------
1 file changed, 35 insertions(+), 9 deletions(-)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index aeb99cdac..26c955d 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -263,13 +263,13 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
if (ret)
- goto out;
+ goto err_scan;
if ((workload->ring_id == RCS) &&
(workload->wa_ctx.indirect_ctx.size != 0)) {
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
- goto out;
+ goto err_scan;
}
/* pin shadow context by gvt even the shadow context will be pinned
@@ -288,13 +288,13 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
ret = populate_shadow_context(workload);
if (ret)
- goto err_shadow;
+ goto err_unpin;
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) {
gvt_vgpu_err("fail to allocate gem request\n");
ret = PTR_ERR(rq);
- goto err_shadow;
+ goto err_unpin;
}
gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
@@ -302,15 +302,38 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
workload->req = i915_gem_request_get(rq);
ret = copy_workload_to_ring_buffer(workload);
if (ret)
- goto err_shadow;
+ goto err_copy;
workload->shadowed = true;
+err_copy:
+ i915_add_request(workload->req);
+ workload->req = NULL;
+err_unpin:
+ engine->context_unpin(engine, shadow_ctx);
err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
-out:
+err_scan:
return ret;
}
+static void intel_gvt_release_shadow_workload(struct intel_vgpu_workload *workload)
+{
+ int ring_id = workload->ring_id;
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine = dev_priv->engine[ring_id];
+
+ /* error happened in workload->prepare, so shaodwed */
+ if (!workload->shadowed)
+ return;
+
+ i915_add_request(workload->req);
+ workload->req = NULL;
+ engine->context_unpin(engine, shadow_ctx);
+ release_shadow_wa_ctx(&workload->wa_ctx);
+}
+
static int dispatch_workload(struct intel_vgpu_workload *workload)
{
int ring_id = workload->ring_id;
@@ -324,15 +347,18 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
ret = intel_gvt_scan_and_shadow_workload(workload);
if (ret)
- goto out;
+ goto err_scan;
if (workload->prepare) {
ret = workload->prepare(workload);
if (ret)
- goto out;
+ goto err_shadow;
}
-out:
+err_shadow:
+ if (ret)
+ intel_gvt_release_shadow_workload(workload);
+err_scan:
if (ret)
workload->status = ret;
--
2.7.4
More information about the intel-gvt-dev
mailing list