[PATCH] drm/i915/gvt: Move request alloc to dispatch_workload path only

Zhenyu Wang zhenyuw at linux.intel.com
Tue Nov 14 09:09:35 UTC 2017


From: fred gao <fred.gao at intel.com>

Previously the performance is improved through the workload auditing
and shadowing ahead of vGPU scheduling, however, there is the case that
more requests are allocated in submit_context before the previous request
is added, the timeline will hold its seqno which is later.

This patch is to move the request alloc to dispatch_workload function,
where is the same place as request is added.

It will fix the issue of kernel BUG for (timeline->seqno != request->fence.seqno)
check when add_request.

Fixes: 89ea20b930cb ("drm/i915/gvt: Factor out scan and shadow from workload dispatch")
Signed-off-by: Chuanxiao Dong <chuanxiao.dong at intel.com>
Signed-off-by: fred gao <fred.gao at intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw at linux.intel.com>
---
 drivers/gpu/drm/i915/gvt/scheduler.c | 31 +++++++++++++++++++++++++++----
 1 file changed, 27 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 77ca557475cf..d6177a0baeec 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -271,7 +271,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	int ring_id = workload->ring_id;
 	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
-	struct drm_i915_gem_request *rq;
 	struct intel_ring *ring;
 	int ret;
 
@@ -316,6 +315,27 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
 	ret = populate_shadow_context(workload);
 	if (ret)
 		goto err_unpin;
+	workload->shadowed = true;
+	return 0;
+
+err_unpin:
+	engine->context_unpin(engine, shadow_ctx);
+err_shadow:
+	release_shadow_wa_ctx(&workload->wa_ctx);
+err_scan:
+	return ret;
+}
+
+static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
+{
+	int ring_id = workload->ring_id;
+	struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
+	struct drm_i915_gem_request *rq;
+	struct intel_vgpu *vgpu = workload->vgpu;
+	struct intel_vgpu_submission *s = &vgpu->submission;
+	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+	int ret;
 
 	rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
 	if (IS_ERR(rq)) {
@@ -330,14 +350,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
 	ret = copy_workload_to_ring_buffer(workload);
 	if (ret)
 		goto err_unpin;
-	workload->shadowed = true;
 	return 0;
 
 err_unpin:
 	engine->context_unpin(engine, shadow_ctx);
-err_shadow:
 	release_shadow_wa_ctx(&workload->wa_ctx);
-err_scan:
 	return ret;
 }
 
@@ -497,6 +514,12 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
 		goto err_unpin_mm;
 	}
 
+	ret = intel_gvt_generate_request(workload);
+	if (ret) {
+		gvt_vgpu_err("fail to generate request\n");
+		goto err_unpin_mm;
+	}
+
 	ret = prepare_shadow_batch_buffer(workload);
 	if (ret) {
 		gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
-- 
2.15.0.rc2



More information about the intel-gvt-dev mailing list