[PATCH v2 1/2] drm/i915/gvt: Factor out scan and shadow from workload dispatch
Gao, Ping A
ping.a.gao at intel.com
Thu Jun 8 07:05:00 UTC 2017
On 2017/6/8 14:46, Dong, Chuanxiao wrote:
>> -----Original Message-----
>> From: intel-gvt-dev [mailto:intel-gvt-dev-bounces at lists.freedesktop.org] On
>> Behalf Of Ping Gao
>> Sent: Thursday, June 8, 2017 10:31 AM
>> To: intel-gvt-dev at lists.freedesktop.org
>> Cc: Gao, Ping A <ping.a.gao at intel.com>
>> Subject: [PATCH v2 1/2] drm/i915/gvt: Factor out scan and shadow from
>> workload dispatch
>>
>> To perform the workload scan and shadow in ELSP writing stage for
>> performance consideration, the workload scan and shadow stuffs should be
>> factored out from dispatch_workload().
>>
>> v2:Put context pin before i915_add_request;
>> Refine the comments;
>> Rename some APIs;
>>
>> Signed-off-by: Ping Gao <ping.a.gao at intel.com>
>> ---
>> drivers/gpu/drm/i915/gvt/cmd_parser.c | 2 +-
>> drivers/gpu/drm/i915/gvt/cmd_parser.h | 2 +-
>> drivers/gpu/drm/i915/gvt/gvt.h | 2 +
>> drivers/gpu/drm/i915/gvt/scheduler.c | 82 ++++++++++++++++++++----------
>> -----
>> 4 files changed, 52 insertions(+), 36 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c
>> b/drivers/gpu/drm/i915/gvt/cmd_parser.c
>> index 5634eb1..e68fda2 100644
>> --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
>> +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
>> @@ -2643,7 +2643,7 @@ static int shadow_workload_ring_buffer(struct
>> intel_vgpu_workload *workload)
>> return 0;
>> }
>>
>> -int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload
>> *workload)
>> +int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload
>> +*workload)
>> {
>> int ret;
>> struct intel_vgpu *vgpu = workload->vgpu; diff --git
>> a/drivers/gpu/drm/i915/gvt/cmd_parser.h
>> b/drivers/gpu/drm/i915/gvt/cmd_parser.h
>> index bed3351..2867036 100644
>> --- a/drivers/gpu/drm/i915/gvt/cmd_parser.h
>> +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h
>> @@ -42,7 +42,7 @@ void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
>>
>> int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
>>
>> -int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload
>> *workload);
>> +int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload
>> +*workload);
>>
>> int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx
>> *wa_ctx);
>>
>> diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
>> index 930732e..ae6e7a4 100644
>> --- a/drivers/gpu/drm/i915/gvt/gvt.h
>> +++ b/drivers/gpu/drm/i915/gvt/gvt.h
>> @@ -450,6 +450,8 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu,
>> u32 gpa); int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu,
>> u32 swsci); void populate_pvinfo_page(struct intel_vgpu *vgpu);
>>
>> +int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload
>> +*workload);
>> +
>> struct intel_gvt_ops {
>> int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
>> unsigned int);
>> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c
>> b/drivers/gpu/drm/i915/gvt/scheduler.c
>> index 6ae286c..3a78e7e 100644
>> --- a/drivers/gpu/drm/i915/gvt/scheduler.c
>> +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
>> @@ -172,42 +172,27 @@ static int shadow_context_status_change(struct
>> notifier_block *nb,
>> return NOTIFY_OK;
>> }
>>
>> -static int dispatch_workload(struct intel_vgpu_workload *workload)
>> +/**
>> + * intel_gvt_scan_and_shadow_workload - audit the workload by scanning
>> +and
>> + * shadow it as well, include ringbuffer,wa_ctx and ctx.
>> + * @workload: an abstract entity for each execlist submission.
>> + *
>> + * This function is called before the workload submitting to i915, to
>> +make
>> + * sure the content of the workload is valid.
>> + */
>> +int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload
>> +*workload)
>> {
>> int ring_id = workload->ring_id;
>> struct i915_gem_context *shadow_ctx = workload->vgpu-
>>> shadow_ctx;
>> struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
>> - struct intel_engine_cs *engine = dev_priv->engine[ring_id];
>> struct drm_i915_gem_request *rq;
>> struct intel_vgpu *vgpu = workload->vgpu;
>> - struct intel_ring *ring;
>> int ret;
>>
>> - gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
>> - ring_id, workload);
>> -
>> shadow_ctx->desc_template &= ~(0x3 <<
>> GEN8_CTX_ADDRESSING_MODE_SHIFT);
>> shadow_ctx->desc_template |= workload-
>>> ctx_desc.addressing_mode <<
>> GEN8_CTX_ADDRESSING_MODE_SHIFT;
>>
>> - mutex_lock(&dev_priv->drm.struct_mutex);
>> -
>> - /* pin shadow context by gvt even the shadow context will be pinned
>> - * when i915 alloc request. That is because gvt will update the guest
>> - * context from shadow context when workload is completed, and at
>> that
>> - * moment, i915 may already unpined the shadow context to make
>> the
>> - * shadow_ctx pages invalid. So gvt need to pin itself. After update
>> - * the guest context, gvt can unpin the shadow_ctx safely.
>> - */
>> - ring = engine->context_pin(engine, shadow_ctx);
>> - if (IS_ERR(ring)) {
>> - ret = PTR_ERR(ring);
>> - gvt_vgpu_err("fail to pin shadow context\n");
>> - workload->status = ret;
>> - mutex_unlock(&dev_priv->drm.struct_mutex);
>> - return ret;
>> - }
>> -
>> rq = i915_gem_request_alloc(dev_priv->engine[ring_id],
>> shadow_ctx);
>> if (IS_ERR(rq)) {
>> gvt_vgpu_err("fail to allocate gem request\n"); @@ -219,7
>> +204,7 @@ static int dispatch_workload(struct intel_vgpu_workload
>> *workload)
>>
>> workload->req = i915_gem_request_get(rq);
>>
>> - ret = intel_gvt_scan_and_shadow_workload(workload);
>> + ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
>> if (ret)
>> goto out;
>>
>> @@ -231,6 +216,27 @@ static int dispatch_workload(struct
>> intel_vgpu_workload *workload)
>> }
>>
>> ret = populate_shadow_context(workload);
>> +
>> +out:
>> + return ret;
>> +}
>> +
>> +static int dispatch_workload(struct intel_vgpu_workload *workload) {
>> + int ring_id = workload->ring_id;
>> + struct i915_gem_context *shadow_ctx = workload->vgpu-
>>> shadow_ctx;
>> + struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
>> + struct intel_engine_cs *engine = dev_priv->engine[ring_id];
>> + struct intel_vgpu *vgpu = workload->vgpu;
>> + struct intel_ring *ring;
>> + int ret = 0;
>> +
>> + gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
>> + ring_id, workload);
>> +
>> + mutex_lock(&dev_priv->drm.struct_mutex);
>> +
>> + ret = intel_gvt_scan_and_shadow_workload(workload);
>> if (ret)
>> goto out;
>>
>> @@ -240,20 +246,28 @@ static int dispatch_workload(struct
>> intel_vgpu_workload *workload)
>> goto out;
>> }
>>
>> + /* pin shadow context by gvt even the shadow context will be pinned
>> + * when i915 alloc request. That is because gvt will update the guest
>> + * context from shadow context when workload is completed, and at
>> that
>> + * moment, i915 may already unpined the shadow context to make
>> the
>> + * shadow_ctx pages invalid. So gvt need to pin itself. After update
>> + * the guest context, gvt can unpin the shadow_ctx safely.
>> + */
>> + ring = engine->context_pin(engine, shadow_ctx);
>> + if (IS_ERR(ring)) {
>> + ret = PTR_ERR(ring);
>> + gvt_vgpu_err("fail to pin shadow context\n");
>> + goto out;
>> + }
>> +
>> gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
>> ring_id, workload->req);
>>
>> - ret = 0;
>> + i915_add_request(workload->req);
>> workload->dispatched = true;
>> -out:
>> - if (ret)
>> - workload->status = ret;
>> -
>> - if (!IS_ERR_OR_NULL(rq))
>> - i915_add_request(rq);
>> - else
>> - engine->context_unpin(engine, shadow_ctx);
>>
>> +out:
>> + workload->status = ret;
> Hi Ping,
>
> If there is no error happened in the above dispatching sequence, workload->status will be set to 0, right? In this case, looks like below part of code in shadow_context_status_change() is useless as workload->status will never be -EINPROGRESS. It is either some error code or 0 during dispatching.
>
> case INTEL_CONTEXT_SCHEDULE_OUT:
> /* If the status is -EINPROGRESS means this workload
> * doesn't meet any issue during dispatching so when
> * get the SCHEDULE_OUT set the status to be zero for
> * good. If the status is NOT -EINPROGRESS means there
> * is something wrong happened during dispatching and
> * the status should not be set to zero
> */
> if (workload->status == -EINPROGRESS)
> workload->status = 0;
>
> Thanks
> Chuanxiao
Good catch, thanks Chuangxiao.
The workload->status should only be set if there is error happens, I
change it next version.
if (ret)
workload->status = ret;
>> mutex_unlock(&dev_priv->drm.struct_mutex);
>> return ret;
>> }
>> --
>> 2.7.4
>>
>> _______________________________________________
>> intel-gvt-dev mailing list
>> intel-gvt-dev at lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev
More information about the intel-gvt-dev
mailing list