[PATCH 5/6] drm/i915/gvt: Refine error handling path in dispatch_workload

fred gao fred.gao at intel.com
Tue Aug 8 15:31:04 UTC 2017


When an error occurs in dispatch_workload, this patch is to do the
proper cleanup and rollback to the original states before the workload
is abandoned.

v2:
- split the mixed several error paths for better review. (Zhenyu)

Signed-off-by: fred gao <fred.gao at intel.com>
---
 drivers/gpu/drm/i915/gvt/scheduler.c | 63 ++++++++++++++++++++++++++++--------
 1 file changed, 50 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 273a275..db49a61 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -188,16 +188,17 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
 {
 	struct intel_vgpu *vgpu = workload->vgpu;
 	int ring_id = workload->ring_id;
-	struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
 	void *shadow_ring_buffer_va;
 	u32 *cs;
+	int ret = 0;
 
 	/* allocate shadow ring buffer */
 	cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
 	if (IS_ERR(cs)) {
 		gvt_vgpu_err("fail to alloc size =%ld shadow  ring buffer\n",
 			workload->rb_len);
-		return PTR_ERR(cs);
+		ret = PTR_ERR(cs);
+		goto out;
 	}
 
 	shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
@@ -211,10 +212,10 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
 	cs += workload->rb_len / sizeof(u32);
 	intel_ring_advance(workload->req, cs);
 
-	return 0;
+out:
+	return ret;
 }
 
-
 /**
  * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
  * shadow it as well, include ringbuffer,wa_ctx and ctx.
@@ -243,13 +244,13 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
 
 	ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
 	if (ret)
-		goto out;
+		goto err_scan;
 
 	if ((workload->ring_id == RCS) &&
 	    (workload->wa_ctx.indirect_ctx.size != 0)) {
 		ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
 		if (ret)
-			goto out;
+			goto err_scan;
 	}
 
 	/* pin shadow context by gvt even the shadow context will be pinned
@@ -268,28 +269,61 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
 
 	ret = populate_shadow_context(workload);
 	if (ret)
-		goto err_shadow;
+		goto err_unpin;
 
 	rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
 	if (IS_ERR(rq)) {
 		gvt_vgpu_err("fail to allocate gem request\n");
 		ret = PTR_ERR(rq);
-		goto err_shadow;
+		goto err_unpin;
 	}
 
 	gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
 
 	workload->req = i915_gem_request_get(rq);
-	copy_workload_to_ring_buffer(workload);
+	ret = copy_workload_to_ring_buffer(workload);
+	if (ret)
+		goto err_copy;
 
 	workload->shadowed = true;
+	return 0;
 
+err_copy:
+	i915_add_request(workload->req);
+	workload->req = NULL;
+err_unpin:
+	engine->context_unpin(engine, shadow_ctx);
 err_shadow:
 	intel_gvt_release_shadow_wa_ctx(&workload->wa_ctx);
-out:
+err_scan:
 	return ret;
 }
 
+static void intel_gvt_release_shadow_workload(struct intel_vgpu_workload *workload)
+{
+	int ring_id = workload->ring_id;
+	struct intel_vgpu *vgpu = workload->vgpu;
+	struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
+
+	/* error happened in workload->prepare, so shaodwed */
+	if (!workload->shadowed)
+		return;
+
+	/* free the dynamical ring buffer every time to save the memory */
+	if (vgpu->dynamic_ring_buffer_va[ring_id]) {
+		free_pages((unsigned long)vgpu->dynamic_ring_buffer_va[ring_id],
+				get_order(workload->rb_len));
+		 vgpu->dynamic_ring_buffer_va[workload->ring_id] = NULL;
+	}
+
+	i915_add_request(workload->req);
+	workload->req = NULL;
+	engine->context_unpin(engine, shadow_ctx);
+	intel_gvt_release_shadow_wa_ctx(&workload->wa_ctx);
+}
+
 static int dispatch_workload(struct intel_vgpu_workload *workload)
 {
 	int ring_id = workload->ring_id;
@@ -303,15 +337,18 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
 
 	ret = intel_gvt_scan_and_shadow_workload(workload);
 	if (ret)
-		goto out;
+		goto err_scan;
 
 	if (workload->prepare) {
 		ret = workload->prepare(workload);
 		if (ret)
-			goto out;
+			goto err_shadow;
 	}
 
-out:
+err_shadow:
+	if (ret)
+		intel_gvt_release_shadow_workload(workload);
+err_scan:
 	if (ret)
 		workload->status = ret;
 
-- 
2.7.4



More information about the intel-gvt-dev mailing list