[PATCH 2/3] drm/i915/gvt: Correct error handling for dipatch_workload path

fred gao fred.gao at intel.com
Wed Jul 19 06:46:49 UTC 2017


When an error occurs in dispatch_workload, this patch is to do the
proper cleanup and rollback to the original states before the workload
is abandoned.

Signed-off-by: fred gao <fred.gao at intel.com>
---
 drivers/gpu/drm/i915/gvt/cmd_parser.c | 36 ++++++++++++++++
 drivers/gpu/drm/i915/gvt/cmd_parser.h |  2 +
 drivers/gpu/drm/i915/gvt/execlist.c   | 79 +++++++++++++++++++++++++----------
 drivers/gpu/drm/i915/gvt/gtt.c        |  4 +-
 drivers/gpu/drm/i915/gvt/scheduler.c  | 69 +++++++++++++++++++++++-------
 5 files changed, 153 insertions(+), 37 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 157a718..df300d4 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2724,6 +2724,29 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 	return ret;
 }
 
+/*clean up of shadow_indirect_ctx */
+static int release_shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+	struct drm_i915_gem_object *obj;
+	struct intel_vgpu_workload *workload = container_of(wa_ctx,
+				struct intel_vgpu_workload,
+				wa_ctx);
+	struct intel_vgpu *vgpu = workload->vgpu;
+	int ret = 0;
+
+	obj = wa_ctx->indirect_ctx.obj;
+	if (obj == NULL) {
+		gvt_vgpu_err("obj should not be NULL\n");
+		WARN_ON(1);
+		ret = -EINVAL;
+	} else {
+		i915_gem_object_unpin_map(obj);
+		i915_gem_object_put(obj);
+	}
+
+	return ret;
+}
+
 static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 {
 	uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
@@ -2768,6 +2791,19 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 	return 0;
 }
 
+int intel_gvt_release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+	int ret;
+
+	if (wa_ctx->indirect_ctx.size == 0)
+		return 0;
+
+	ret = release_shadow_indirect_ctx(wa_ctx);
+
+	return ret;
+}
+
+
 static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
 		unsigned int opcode, int rings)
 {
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h
index 2867036..b1a1d82 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.h
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h
@@ -45,5 +45,7 @@ int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
 int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload);
 
 int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+int intel_gvt_release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+
 
 #endif
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 95f1607..ec8ff32 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -392,6 +392,7 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
 	}
 }
 
+
 static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 {
 	struct intel_vgpu_workload *workload = container_of(wa_ctx,
@@ -447,26 +448,6 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 	update_wa_ctx_2_shadow_ctx(wa_ctx);
 }
 
-static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
-{
-	struct intel_vgpu *vgpu = workload->vgpu;
-	struct execlist_ctx_descriptor_format ctx[2];
-	int ring_id = workload->ring_id;
-
-	intel_vgpu_pin_mm(workload->shadow_mm);
-	intel_vgpu_sync_oos_pages(workload->vgpu);
-	intel_vgpu_flush_post_shadow(workload->vgpu);
-	prepare_shadow_batch_buffer(workload);
-	prepare_shadow_wa_ctx(&workload->wa_ctx);
-	if (!workload->emulate_schedule_in)
-		return 0;
-
-	ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
-	ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
-
-	return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
-}
-
 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
 {
 	/* release all the shadow batch buffer */
@@ -496,6 +477,58 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 	i915_gem_object_put(wa_ctx->indirect_ctx.obj);
 }
 
+static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
+{
+	struct intel_vgpu *vgpu = workload->vgpu;
+	struct execlist_ctx_descriptor_format ctx[2];
+	int ring_id = workload->ring_id;
+	int ret;
+
+	ret = intel_vgpu_pin_mm(workload->shadow_mm);
+	if (ret) {
+		gvt_vgpu_err("fail to vgpu pin mm\n");
+		goto err_ret;
+	}
+
+	ret = intel_vgpu_sync_oos_pages(workload->vgpu);
+	if (ret) {
+		gvt_vgpu_err("fail to vgpu sync oos pages\n");
+		goto err_unpin_mm;
+	}
+
+	ret = intel_vgpu_flush_post_shadow(workload->vgpu);
+	if (ret) {
+		gvt_vgpu_err("fail to flush post shadow\n");
+		goto err_unpin_mm;
+	}
+
+	/* ggtt_pin may fail, so ignore the pin error*/
+	prepare_shadow_batch_buffer(workload);
+	prepare_shadow_wa_ctx(&workload->wa_ctx);
+
+	if (!workload->emulate_schedule_in)
+		return 0;
+
+	ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
+	ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
+
+	ret = emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
+	if (ret) {
+		gvt_vgpu_err("fail to emulate execlist schedule in\n");
+		goto err_unpin_wa;
+	}
+	return 0;
+
+err_unpin_wa:
+	release_shadow_wa_ctx(&workload->wa_ctx);
+	release_shadow_batch_buffer(workload);
+err_unpin_mm:
+	intel_vgpu_unpin_mm(workload->shadow_mm);
+
+err_ret:
+	return ret;
+}
+
 static int complete_execlist_workload(struct intel_vgpu_workload *workload)
 {
 	struct intel_vgpu *vgpu = workload->vgpu;
@@ -509,8 +542,10 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
 	gvt_dbg_el("complete workload %p status %d\n", workload,
 			workload->status);
 
-	release_shadow_batch_buffer(workload);
-	release_shadow_wa_ctx(&workload->wa_ctx);
+	if (workload->req) {
+		release_shadow_batch_buffer(workload);
+		release_shadow_wa_ctx(&workload->wa_ctx);
+	}
 
 	if (workload->status || vgpu->resetting)
 		goto out;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 66374db..8f46019 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1630,8 +1630,10 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
 
 	if (!mm->shadowed) {
 		ret = shadow_mm(mm);
-		if (ret)
+		if (ret) {
+			atomic_dec(&mm->pincount);
 			return ret;
+		}
 	}
 
 	list_del_init(&mm->lru_list);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 6d05ad4..6a7d405 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -191,13 +191,15 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
 	struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
 	void *shadow_ring_buffer_va;
 	u32 *cs;
+	int ret = 0;
 
 	/* allocate shadow ring buffer */
 	cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
 	if (IS_ERR(cs)) {
 		gvt_vgpu_err("fail to alloc size =%ld shadow  ring buffer\n",
 			workload->rb_len);
-		return PTR_ERR(cs);
+		ret = PTR_ERR(cs);
+		goto out;
 	}
 
 	shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
@@ -211,17 +213,17 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
 	cs += workload->rb_len / sizeof(u32);
 	intel_ring_advance(workload->req, cs);
 
-	/* free the dynamical rinb buffer every time to save the memory */
+out:
+	/* free the dynamical ring buffer every time to save the memory */
 	if (workload->rb_len > RESERVE_RING_BUFFER_SIZE) {
 		free_pages((unsigned long)vgpu->dynamic_ring_buffer_va[ring_id],
 				get_order(workload->rb_len));
 		 vgpu->dynamic_ring_buffer_va[workload->ring_id] = NULL;
 	}
 
-	return 0;
+	return ret;
 }
 
-
 /**
  * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
  * shadow it as well, include ringbuffer,wa_ctx and ctx.
@@ -250,13 +252,13 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
 
 	ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
 	if (ret)
-		goto out;
+		goto err_scan;
 
 	if ((workload->ring_id == RCS) &&
 	    (workload->wa_ctx.indirect_ctx.size != 0)) {
 		ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
 		if (ret)
-			goto out;
+			goto err_scan;
 	}
 
 	/* pin shadow context by gvt even the shadow context will be pinned
@@ -270,31 +272,67 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
 	if (IS_ERR(ring)) {
 		ret = PTR_ERR(ring);
 		gvt_vgpu_err("fail to pin shadow context\n");
-		goto out;
+		goto err_shadow;
 	}
 
 	ret = populate_shadow_context(workload);
 	if (ret)
-		goto out;
+		goto err_unpin;
 
 	rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
 	if (IS_ERR(rq)) {
 		gvt_vgpu_err("fail to allocate gem request\n");
 		ret = PTR_ERR(rq);
-		goto out;
+		goto err_unpin;
 	}
 
 	gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
 
 	workload->req = i915_gem_request_get(rq);
-	copy_workload_to_ring_buffer(workload);
+	ret = copy_workload_to_ring_buffer(workload);
+	if (ret)
+		goto err_copy;
 
 	workload->shadowed = true;
+	return 0;
 
-out:
+err_copy:
+	i915_add_request(workload->req);
+	workload->req = NULL;
+err_unpin:
+	engine->context_unpin(engine, shadow_ctx);
+err_shadow:
+	intel_gvt_release_shadow_wa_ctx(&workload->wa_ctx);
+err_scan:
 	return ret;
 }
 
+static void intel_gvt_release_shadow_workload(struct intel_vgpu_workload *workload)
+{
+	int ring_id = workload->ring_id;
+	struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
+	struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
+	struct drm_i915_gem_request *rq;
+
+	if (workload->shadowed)
+		return;
+
+	/* free the dynamical ring buffer every time to save the memory */
+	if (workload->rb_len > RESERVE_RING_BUFFER_SIZE) {
+		struct intel_vgpu *vgpu = workload->vgpu;
+
+		free_pages((unsigned long)vgpu->dynamic_ring_buffer_va[ring_id],
+				get_order(workload->rb_len));
+		 vgpu->dynamic_ring_buffer_va[workload->ring_id] = NULL;
+	}
+
+	i915_add_request(workload->req);
+	workload->req = NULL;
+	engine->context_unpin(engine, shadow_ctx);
+	intel_gvt_release_shadow_wa_ctx(&workload->wa_ctx);
+}
+
 static int dispatch_workload(struct intel_vgpu_workload *workload)
 {
 	int ring_id = workload->ring_id;
@@ -308,15 +346,18 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
 
 	ret = intel_gvt_scan_and_shadow_workload(workload);
 	if (ret)
-		goto out;
+		goto err_scan;
 
 	if (workload->prepare) {
 		ret = workload->prepare(workload);
 		if (ret)
-			goto out;
+			goto err_shadow;
 	}
 
-out:
+err_shadow:
+	if (ret)
+		intel_gvt_release_shadow_workload(workload);
+err_scan:
 	if (ret)
 		workload->status = ret;
 
-- 
2.7.4



More information about the intel-gvt-dev mailing list