<html>
  <head>
    <meta content="text/html; charset=UTF-8" http-equiv="Content-Type">
  </head>
  <body text="#000000" bgcolor="#FFFFFF">
    I suggest we should keep dispatch_workload() as it's a mandatory
    step in workload scheduler, maybe we will extend it in future.
    Better not remove it at this time.<br>
    <br>
    <div class="moz-cite-prefix">On 06/06/17 13:54, Du, Changbin wrote:<br>
    </div>
    <blockquote cite="mid:20170606055451.GA17529@intel.com" type="cite">
      <pre wrap="">On Wed, May 24, 2017 at 08:49:47PM +0800, Ping Gao wrote:
</pre>
      <blockquote type="cite">
        <pre wrap="">The workload audit and shadow could execute in earlier ELSP
writing stage for performance consideration, it's need to factor
out it from workload dispatch first.

Signed-off-by: Ping Gao <a class="moz-txt-link-rfc2396E" href="mailto:ping.a.gao@intel.com"><ping.a.gao@intel.com></a>
---
 drivers/gpu/drm/i915/gvt/gvt.h       |  2 ++
 drivers/gpu/drm/i915/gvt/scheduler.c | 44 ++++++++++++++++++++++++------------
 2 files changed, 32 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 07ecd73..accd9c4 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -455,6 +455,8 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
 void populate_pvinfo_page(struct intel_vgpu *vgpu);
 
+int intel_gvt_audit_and_shadow_workload(struct intel_vgpu_workload *workload);
+
 struct intel_gvt_ops {
        int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
                                unsigned int);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index e63b1d8..d6bfdfe 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -172,7 +172,8 @@ static int shadow_context_status_change(struct notifier_block *nb,
        return NOTIFY_OK;
 }
 
-static int dispatch_workload(struct intel_vgpu_workload *workload)
+
+int intel_gvt_audit_and_shadow_workload(struct intel_vgpu_workload *workload)
 {
        int ring_id = workload->ring_id;
        struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
@@ -183,15 +184,10 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
        struct intel_ring *ring;
        int ret;
 
-       gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
-               ring_id, workload);
-
        shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
        shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
                                    GEN8_CTX_ADDRESSING_MODE_SHIFT;
 
-       mutex_lock(&dev_priv->drm.struct_mutex);
-
        /* pin shadow context by gvt even the shadow context will be pinned
         * when i915 alloc request. That is because gvt will update the guest
         * context from shadow context when workload is completed, and at that
@@ -234,6 +230,29 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
        if (ret)
                goto out;

</pre>
      </blockquote>
      <pre wrap="">The 'out' lable is just followed, no goto need.

</pre>
      <blockquote type="cite">
        <pre wrap="">+out:
+       workload->status = ret;
+       return ret;
+}
+
+
+static int dispatch_workload(struct intel_vgpu_workload *workload)
+{
+       int ring_id = workload->ring_id;
+       struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
+       struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+       struct intel_engine_cs *engine = dev_priv->engine[ring_id];
+       int ret = 0;
+
+       gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
+               ring_id, workload);
+
+       mutex_lock(&dev_priv->drm.struct_mutex);
+
+       ret = intel_gvt_audit_and_shadow_workload(workload);
+       if (ret)
+               goto out;
+
        if (workload->prepare) {
                ret = workload->prepare(workload);
                if (ret)
@@ -243,16 +262,13 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
        gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
                        ring_id, workload->req);
 
-       ret = 0;
-       workload->dispatched = true;
 out:
-       if (ret)
-               workload->status = ret;
-
-       if (!IS_ERR_OR_NULL(rq))
-               i915_add_request(rq);
-       else
+       if (ret) {
                engine->context_unpin(engine, shadow_ctx);
+       } else {
+               i915_add_request(workload->req);
+               workload->dispatched = true;
+       }
 
</pre>
      </blockquote>
      <pre wrap="">I think you can handle shadow failure inside your intel_gvt_audit_and_shadow_workload.
So the api doesn't have side effect if it return failure.

And this the dispatch_workload() only has a single call to workload->prepare.
Then dispatch_workload fucntion turns out not neccessary.

</pre>
      <blockquote type="cite">
        <pre wrap="">   mutex_unlock(&dev_priv->drm.struct_mutex);
        return ret;
-- 
2.7.4

_______________________________________________
intel-gvt-dev mailing list
<a class="moz-txt-link-abbreviated" href="mailto:intel-gvt-dev@lists.freedesktop.org">intel-gvt-dev@lists.freedesktop.org</a>
<a class="moz-txt-link-freetext" href="https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev">https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev</a>
</pre>
      </blockquote>
      <pre wrap="">
</pre>
      <br>
      <fieldset class="mimeAttachmentHeader"></fieldset>
      <br>
      <pre wrap="">_______________________________________________
intel-gvt-dev mailing list
<a class="moz-txt-link-abbreviated" href="mailto:intel-gvt-dev@lists.freedesktop.org">intel-gvt-dev@lists.freedesktop.org</a>
<a class="moz-txt-link-freetext" href="https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev">https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev</a>
</pre>
    </blockquote>
    <br>
  </body>
</html>