[PATCH 1/2] drm/i915/gvt: Factor out audit and shadow from workload dispatch
Zhi Wang
zhi.a.wang at intel.com
Tue Jun 6 06:59:32 UTC 2017
Another idea I thought before is the workload callbacks are for virtual
submission interface to deal with the workload during the workload
dispatch, they might be changed and refined later. So does
workload->prepare().
:P
On 06/06/17 14:38, Zhi Wang wrote:
> I suggest we should keep dispatch_workload() as it's a mandatory step
> in workload scheduler, maybe we will extend it in future. Better not
> remove it at this time.
>
> On 06/06/17 13:54, Du, Changbin wrote:
>> On Wed, May 24, 2017 at 08:49:47PM +0800, Ping Gao wrote:
>>> The workload audit and shadow could execute in earlier ELSP
>>> writing stage for performance consideration, it's need to factor
>>> out it from workload dispatch first.
>>>
>>> Signed-off-by: Ping Gao<ping.a.gao at intel.com>
>>> ---
>>> drivers/gpu/drm/i915/gvt/gvt.h | 2 ++
>>> drivers/gpu/drm/i915/gvt/scheduler.c | 44 ++++++++++++++++++++++++------------
>>> 2 files changed, 32 insertions(+), 14 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
>>> index 07ecd73..accd9c4 100644
>>> --- a/drivers/gpu/drm/i915/gvt/gvt.h
>>> +++ b/drivers/gpu/drm/i915/gvt/gvt.h
>>> @@ -455,6 +455,8 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
>>> int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
>>> void populate_pvinfo_page(struct intel_vgpu *vgpu);
>>>
>>> +int intel_gvt_audit_and_shadow_workload(struct intel_vgpu_workload *workload);
>>> +
>>> struct intel_gvt_ops {
>>> int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
>>> unsigned int);
>>> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
>>> index e63b1d8..d6bfdfe 100644
>>> --- a/drivers/gpu/drm/i915/gvt/scheduler.c
>>> +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
>>> @@ -172,7 +172,8 @@ static int shadow_context_status_change(struct notifier_block *nb,
>>> return NOTIFY_OK;
>>> }
>>>
>>> -static int dispatch_workload(struct intel_vgpu_workload *workload)
>>> +
>>> +int intel_gvt_audit_and_shadow_workload(struct intel_vgpu_workload *workload)
>>> {
>>> int ring_id = workload->ring_id;
>>> struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
>>> @@ -183,15 +184,10 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
>>> struct intel_ring *ring;
>>> int ret;
>>>
>>> - gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
>>> - ring_id, workload);
>>> -
>>> shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
>>> shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
>>> GEN8_CTX_ADDRESSING_MODE_SHIFT;
>>>
>>> - mutex_lock(&dev_priv->drm.struct_mutex);
>>> -
>>> /* pin shadow context by gvt even the shadow context will be pinned
>>> * when i915 alloc request. That is because gvt will update the guest
>>> * context from shadow context when workload is completed, and at that
>>> @@ -234,6 +230,29 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
>>> if (ret)
>>> goto out;
>>>
>> The 'out' lable is just followed, no goto need.
>>
>>> +out:
>>> + workload->status = ret;
>>> + return ret;
>>> +}
>>> +
>>> +
>>> +static int dispatch_workload(struct intel_vgpu_workload *workload)
>>> +{
>>> + int ring_id = workload->ring_id;
>>> + struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
>>> + struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
>>> + struct intel_engine_cs *engine = dev_priv->engine[ring_id];
>>> + int ret = 0;
>>> +
>>> + gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
>>> + ring_id, workload);
>>> +
>>> + mutex_lock(&dev_priv->drm.struct_mutex);
>>> +
>>> + ret = intel_gvt_audit_and_shadow_workload(workload);
>>> + if (ret)
>>> + goto out;
>>> +
>>> if (workload->prepare) {
>>> ret = workload->prepare(workload);
>>> if (ret)
>>> @@ -243,16 +262,13 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
>>> gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
>>> ring_id, workload->req);
>>>
>>> - ret = 0;
>>> - workload->dispatched = true;
>>> out:
>>> - if (ret)
>>> - workload->status = ret;
>>> -
>>> - if (!IS_ERR_OR_NULL(rq))
>>> - i915_add_request(rq);
>>> - else
>>> + if (ret) {
>>> engine->context_unpin(engine, shadow_ctx);
>>> + } else {
>>> + i915_add_request(workload->req);
>>> + workload->dispatched = true;
>>> + }
>>>
>> I think you can handle shadow failure inside your intel_gvt_audit_and_shadow_workload.
>> So the api doesn't have side effect if it return failure.
>>
>> And this the dispatch_workload() only has a single call to workload->prepare.
>> Then dispatch_workload fucntion turns out not neccessary.
>>
>>> mutex_unlock(&dev_priv->drm.struct_mutex);
>>> return ret;
>>> --
>>> 2.7.4
>>>
>>> _______________________________________________
>>> intel-gvt-dev mailing list
>>> intel-gvt-dev at lists.freedesktop.org
>>> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev
>>
>>
>> _______________________________________________
>> intel-gvt-dev mailing list
>> intel-gvt-dev at lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev
>
>
>
> _______________________________________________
> intel-gvt-dev mailing list
> intel-gvt-dev at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.freedesktop.org/archives/intel-gvt-dev/attachments/20170606/4f976a82/attachment.html>
More information about the intel-gvt-dev
mailing list