[PATCH 2/6] drm/i915/gvt: Add error handling for shadow_indirect_ctx

fred gao fred.gao at intel.com
Mon Aug 7 04:13:19 UTC 2017


When an error occurs after shadow_indirect_ctx, this patch is to do the
proper cleanup and rollback to the original states for shadowed indirect
context before the workload is abandoned.

v2:
- split the mixed several error paths for better review. (Zhenyu)

Signed-off-by: fred gao <fred.gao at intel.com>
---
 drivers/gpu/drm/i915/gvt/cmd_parser.c | 36 +++++++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/gvt/cmd_parser.h |  2 ++
 drivers/gpu/drm/i915/gvt/scheduler.c  |  8 +++++---
 3 files changed, 43 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index fd8a6ba..03c82df 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2737,6 +2737,29 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 	return ret;
 }
 
+/*clean up of shadow_indirect_ctx */
+static int release_shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+	struct drm_i915_gem_object *obj;
+	struct intel_vgpu_workload *workload = container_of(wa_ctx,
+				struct intel_vgpu_workload,
+				wa_ctx);
+	struct intel_vgpu *vgpu = workload->vgpu;
+	int ret = 0;
+
+	obj = wa_ctx->indirect_ctx.obj;
+	if (obj == NULL) {
+		gvt_vgpu_err("obj should not be NULL\n");
+		WARN_ON(1);
+		ret = -EINVAL;
+	} else {
+		i915_gem_object_unpin_map(obj);
+		i915_gem_object_put(obj);
+	}
+
+	return ret;
+}
+
 static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 {
 	uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
@@ -2781,6 +2804,19 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 	return 0;
 }
 
+int intel_gvt_release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+	int ret;
+
+	if (wa_ctx->indirect_ctx.size == 0)
+		return 0;
+
+	ret = release_shadow_indirect_ctx(wa_ctx);
+
+	return ret;
+}
+
+
 static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
 		unsigned int opcode, int rings)
 {
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h
index 2867036..b1a1d82 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.h
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h
@@ -45,5 +45,7 @@ int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
 int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload);
 
 int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+int intel_gvt_release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+
 
 #endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 6d05ad4..279bd9b 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -270,18 +270,18 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
 	if (IS_ERR(ring)) {
 		ret = PTR_ERR(ring);
 		gvt_vgpu_err("fail to pin shadow context\n");
-		goto out;
+		goto err_shadow;
 	}
 
 	ret = populate_shadow_context(workload);
 	if (ret)
-		goto out;
+		goto err_shadow;
 
 	rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
 	if (IS_ERR(rq)) {
 		gvt_vgpu_err("fail to allocate gem request\n");
 		ret = PTR_ERR(rq);
-		goto out;
+		goto err_shadow;
 	}
 
 	gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
@@ -291,6 +291,8 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
 
 	workload->shadowed = true;
 
+err_shadow:
+	intel_gvt_release_shadow_wa_ctx(&workload->wa_ctx);
 out:
 	return ret;
 }
-- 
2.7.4



More information about the intel-gvt-dev mailing list