[PATCH 3/6] drm/i915/gvt: Refine error handling for prepare_execlist_workload
fred gao
fred.gao at intel.com
Tue Aug 8 15:31:02 UTC 2017
refine the error handling for prepare_execlist_workload to restore to the
original states once error occurs.
only release the shadowed batch buffer and wa ctx when request is
allocated in complete_execlist_workload.
v2:
- split the mixed several error paths for better review. (Zhenyu)
Signed-off-by: fred gao <fred.gao at intel.com>
---
drivers/gpu/drm/i915/gvt/execlist.c | 79 ++++++++++++++++++++++++++-----------
1 file changed, 57 insertions(+), 22 deletions(-)
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 4369686..ed7f8ca 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -392,6 +392,7 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
}
}
+
static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
struct intel_vgpu_workload *workload = container_of(wa_ctx,
@@ -447,26 +448,6 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
update_wa_ctx_2_shadow_ctx(wa_ctx);
}
-static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
-{
- struct intel_vgpu *vgpu = workload->vgpu;
- struct execlist_ctx_descriptor_format ctx[2];
- int ring_id = workload->ring_id;
-
- intel_vgpu_pin_mm(workload->shadow_mm);
- intel_vgpu_sync_oos_pages(workload->vgpu);
- intel_vgpu_flush_post_shadow(workload->vgpu);
- prepare_shadow_batch_buffer(workload);
- prepare_shadow_wa_ctx(&workload->wa_ctx);
- if (!workload->emulate_schedule_in)
- return 0;
-
- ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
- ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
-
- return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
-}
-
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
/* release all the shadow batch buffer */
@@ -496,6 +477,58 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
i915_gem_object_put(wa_ctx->indirect_ctx.obj);
}
+static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct execlist_ctx_descriptor_format ctx[2];
+ int ring_id = workload->ring_id;
+ int ret;
+
+ ret = intel_vgpu_pin_mm(workload->shadow_mm);
+ if (ret) {
+ gvt_vgpu_err("fail to vgpu pin mm\n");
+ goto err_ret;
+ }
+
+ ret = intel_vgpu_sync_oos_pages(workload->vgpu);
+ if (ret) {
+ gvt_vgpu_err("fail to vgpu sync oos pages\n");
+ goto err_unpin_mm;
+ }
+
+ ret = intel_vgpu_flush_post_shadow(workload->vgpu);
+ if (ret) {
+ gvt_vgpu_err("fail to flush post shadow\n");
+ goto err_unpin_mm;
+ }
+
+ /* ggtt_pin may fail, so ignore the pin error*/
+ prepare_shadow_batch_buffer(workload);
+ prepare_shadow_wa_ctx(&workload->wa_ctx);
+
+ if (!workload->emulate_schedule_in)
+ return 0;
+
+ ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
+ ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
+
+ ret = emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
+ if (ret) {
+ gvt_vgpu_err("fail to emulate execlist schedule in\n");
+ goto err_unpin_wa;
+ }
+ return 0;
+
+err_unpin_wa:
+ release_shadow_wa_ctx(&workload->wa_ctx);
+ release_shadow_batch_buffer(workload);
+err_unpin_mm:
+ intel_vgpu_unpin_mm(workload->shadow_mm);
+
+err_ret:
+ return ret;
+}
+
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
@@ -509,8 +542,10 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
gvt_dbg_el("complete workload %p status %d\n", workload,
workload->status);
- release_shadow_batch_buffer(workload);
- release_shadow_wa_ctx(&workload->wa_ctx);
+ if (workload->req) {
+ release_shadow_batch_buffer(workload);
+ release_shadow_wa_ctx(&workload->wa_ctx);
+ }
if (workload->status || vgpu->resetting)
goto out;
--
2.7.4
More information about the intel-gvt-dev
mailing list