[PATCH v6 3/3] drm/i915/gvt: Add error handling for submit_context

fred gao fred.gao at intel.com
Wed Sep 6 02:12:52 UTC 2017


When a scan error occurs in submit_context, this patch is to
decrease the mm ref count and free the workload struct before
the workload is abandoned.

v2:
- submit_context related code should be combined together. (Zhenyu)

v3:
- free all the unsubmitted workloads. (Zhenyu)

Signed-off-by: fred gao <fred.gao at intel.com>
---
 drivers/gpu/drm/i915/gvt/execlist.c | 27 ++++++++++++++++++---------
 1 file changed, 18 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 5ec07ec..979a9a0 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -358,9 +358,10 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
 	return 0;
 }
 
-static void free_workload(struct intel_vgpu_workload *workload)
+static void free_workload(struct intel_vgpu_workload *workload, bool unpined)
 {
-	intel_vgpu_unpin_mm(workload->shadow_mm);
+	if (unpined)
+		intel_vgpu_unpin_mm(workload->shadow_mm);
 	intel_gvt_mm_unreference(workload->shadow_mm);
 	kmem_cache_free(workload->vgpu->workloads, workload);
 }
@@ -578,7 +579,7 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
 
 	if (lite_restore) {
 		gvt_dbg_el("next context == current - no schedule-out\n");
-		free_workload(workload);
+		free_workload(workload, true);
 		return 0;
 	}
 
@@ -586,10 +587,10 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
 	if (ret)
 		goto err;
 out:
-	free_workload(workload);
+	free_workload(workload, true);
 	return 0;
 err:
-	free_workload(workload);
+	free_workload(workload, true);
 	return ret;
 }
 
@@ -759,13 +760,21 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
 	if (list_empty(workload_q_head(vgpu, ring_id))) {
 		intel_runtime_pm_get(dev_priv);
 		mutex_lock(&dev_priv->drm.struct_mutex);
-		intel_gvt_scan_and_shadow_workload(workload);
+		ret = intel_gvt_scan_and_shadow_workload(workload);
 		mutex_unlock(&dev_priv->drm.struct_mutex);
 		intel_runtime_pm_put(dev_priv);
 	}
 
-	queue_workload(workload);
-	return 0;
+	if (ret == 0)
+		queue_workload(workload);
+	else {
+		free_workload(workload, false);
+		if (vgpu_is_vm_unhealthy(ret)) {
+			intel_vgpu_clean_execlist(vgpu);
+			enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
+		}
+	}
+	return ret;
 }
 
 int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
@@ -844,7 +853,7 @@ static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
 		list_for_each_entry_safe(pos, n,
 			&vgpu->workload_q_head[engine->id], list) {
 			list_del_init(&pos->list);
-			free_workload(pos);
+			free_workload(pos, false);
 		}
 
 		clear_bit(engine->id, vgpu->shadow_ctx_desc_updated);
-- 
2.7.4



More information about the intel-gvt-dev mailing list