[PATCH 2/6] drm/i915/gvt: Add error handling for shadow_indirect_ctx
fred gao
fred.gao at intel.com
Tue Aug 8 15:31:01 UTC 2017
When an error occurs after shadow_indirect_ctx, this patch is to do the
proper cleanup and rollback to the original states for shadowed indirect
context before the workload is abandoned.
v2:
- split the mixed several error paths for better review. (Zhenyu)
v3:
- no return check for clean up functions. (Changbin)
CC: Changbin Du <changbin.du at intel.com>
Signed-off-by: fred gao <fred.gao at intel.com>
---
drivers/gpu/drm/i915/gvt/cmd_parser.c | 31 +++++++++++++++++++++++++++++++
drivers/gpu/drm/i915/gvt/cmd_parser.h | 2 ++
drivers/gpu/drm/i915/gvt/scheduler.c | 8 +++++---
3 files changed, 38 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 448ab02..084d6d3 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2720,6 +2720,26 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
return ret;
}
+/*clean up of shadow_indirect_ctx */
+static void release_shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ struct drm_i915_gem_object *obj;
+ struct intel_vgpu_workload *workload = container_of(wa_ctx,
+ struct intel_vgpu_workload,
+ wa_ctx);
+ struct intel_vgpu *vgpu = workload->vgpu;
+
+ obj = wa_ctx->indirect_ctx.obj;
+ if (obj == NULL) {
+ gvt_vgpu_err("obj should not be NULL\n");
+ WARN_ON(1);
+ } else {
+ i915_gem_object_unpin_map(obj);
+ i915_gem_object_put(obj);
+ }
+
+}
+
static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
@@ -2764,6 +2784,17 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
return 0;
}
+void intel_gvt_release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+
+ if (wa_ctx->indirect_ctx.size == 0)
+ return;
+
+ release_shadow_indirect_ctx(wa_ctx);
+
+}
+
+
static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
unsigned int opcode, int rings)
{
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h
index 2867036..44262902 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.h
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h
@@ -45,5 +45,7 @@ int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload);
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+void intel_gvt_release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+
#endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 88ef48a..273a275 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -263,18 +263,18 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
gvt_vgpu_err("fail to pin shadow context\n");
- goto out;
+ goto err_shadow;
}
ret = populate_shadow_context(workload);
if (ret)
- goto out;
+ goto err_shadow;
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) {
gvt_vgpu_err("fail to allocate gem request\n");
ret = PTR_ERR(rq);
- goto out;
+ goto err_shadow;
}
gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
@@ -284,6 +284,8 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
workload->shadowed = true;
+err_shadow:
+ intel_gvt_release_shadow_wa_ctx(&workload->wa_ctx);
out:
return ret;
}
--
2.7.4
More information about the intel-gvt-dev
mailing list