[PATCH v3 1/2] drm/i915/gvt: Factor out scan and shadow from workload dispatch

Zhi Wang zhi.a.wang at intel.com
Thu Jun 15 05:30:19 UTC 2017


On 06/15/17 11:02, Gao, Ping A wrote:
> On 2017/6/15 10:03, Wang, Zhi A wrote:
>> On 06/09/17 13:52, Ping Gao wrote:
>>> To perform the workload scan and shadow in ELSP writing stage for
>>> performance consideration, the workload scan and shadow stuffs
>>> should be factored out from dispatch_workload().
>>>
>>> v2:Put context pin before i915_add_request;
>>>      Refine the comments;
>>>      Rename some APIs;
>>>
>>> v3:workload->status should set only when error happens.
>>>
>>> Signed-off-by: Ping Gao <ping.a.gao at intel.com>
>>> ---
>>>    drivers/gpu/drm/i915/gvt/cmd_parser.c |  2 +-
>>>    drivers/gpu/drm/i915/gvt/cmd_parser.h |  2 +-
>>>    drivers/gpu/drm/i915/gvt/gvt.h        |  2 +
>>>    drivers/gpu/drm/i915/gvt/scheduler.c  | 79 +++++++++++++++++++++--------------
>>>    4 files changed, 51 insertions(+), 34 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
>>> index 5634eb1..e68fda2 100644
>>> --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
>>> +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
>>> @@ -2643,7 +2643,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
>>>    	return 0;
>>>    }
>>>    
>>> -int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
>>> +int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload)
>>>    {
>>>    	int ret;
>>>    	struct intel_vgpu *vgpu = workload->vgpu;
>>> diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h
>>> index bed3351..2867036 100644
>>> --- a/drivers/gpu/drm/i915/gvt/cmd_parser.h
>>> +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h
>>> @@ -42,7 +42,7 @@ void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
>>>    
>>>    int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
>>>    
>>> -int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
>>> +int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload);
>>>    
>>>    int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
>>>    
>>> diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
>>> index 930732e..ae6e7a4 100644
>>> --- a/drivers/gpu/drm/i915/gvt/gvt.h
>>> +++ b/drivers/gpu/drm/i915/gvt/gvt.h
>>> @@ -450,6 +450,8 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
>>>    int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
>>>    void populate_pvinfo_page(struct intel_vgpu *vgpu);
>>>    
>>> +int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
>>> +
>>>    struct intel_gvt_ops {
>>>    	int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
>>>    				unsigned int);
>>> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
>>> index 6ae286c..e986d86 100644
>>> --- a/drivers/gpu/drm/i915/gvt/scheduler.c
>>> +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
>>> @@ -172,42 +172,27 @@ static int shadow_context_status_change(struct notifier_block *nb,
>>>    	return NOTIFY_OK;
>>>    }
>>>    
>>> -static int dispatch_workload(struct intel_vgpu_workload *workload)
>>> +/**
>>> + * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
>>> + * shadow it as well, include ringbuffer,wa_ctx and ctx.
>>> + * @workload: an abstract entity for each execlist submission.
>>> + *
>>> + * This function is called before the workload submitting to i915, to make
>>> + * sure the content of the workload is valid.
>>> + */
>>> +int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
>>>    {
>>>    	int ring_id = workload->ring_id;
>>>    	struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
>>>    	struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
>>> -	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
>>>    	struct drm_i915_gem_request *rq;
>>>    	struct intel_vgpu *vgpu = workload->vgpu;
>>> -	struct intel_ring *ring;
>>>    	int ret;
>>>    
>>> -	gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
>>> -		ring_id, workload);
>>> -
>>>    	shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
>>>    	shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
>>>    				    GEN8_CTX_ADDRESSING_MODE_SHIFT;
>>>    
>>> -	mutex_lock(&dev_priv->drm.struct_mutex);
>>> -
>>> -	/* pin shadow context by gvt even the shadow context will be pinned
>>> -	 * when i915 alloc request. That is because gvt will update the guest
>>> -	 * context from shadow context when workload is completed, and at that
>>> -	 * moment, i915 may already unpined the shadow context to make the
>>> -	 * shadow_ctx pages invalid. So gvt need to pin itself. After update
>>> -	 * the guest context, gvt can unpin the shadow_ctx safely.
>>> -	 */
>>> -	ring = engine->context_pin(engine, shadow_ctx);
>>> -	if (IS_ERR(ring)) {
>>> -		ret = PTR_ERR(ring);
>>> -		gvt_vgpu_err("fail to pin shadow context\n");
>>> -		workload->status = ret;
>>> -		mutex_unlock(&dev_priv->drm.struct_mutex);
>>> -		return ret;
>>> -	}
>>> -
>>>    	rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
>>>    	if (IS_ERR(rq)) {
>>>    		gvt_vgpu_err("fail to allocate gem request\n");
>>> @@ -219,7 +204,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
>>>    
>>>    	workload->req = i915_gem_request_get(rq);
>>>    
>>> -	ret = intel_gvt_scan_and_shadow_workload(workload);
>>> +	ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
>>>    	if (ret)
>>>    		goto out;
>>>    
>>> @@ -231,6 +216,27 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
>>>    	}
>>>    
>>>    	ret = populate_shadow_context(workload);
>>> +
>>> +out:
>>> +	return ret;
>>> +}
>>> +
>>> +static int dispatch_workload(struct intel_vgpu_workload *workload)
>>> +{
>>> +	int ring_id = workload->ring_id;
>>> +	struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
>>> +	struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
>>> +	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
>>> +	struct intel_vgpu *vgpu = workload->vgpu;
>>> +	struct intel_ring *ring;
>>> +	int ret = 0;
>>> +
>>> +	gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
>>> +		ring_id, workload);
>>> +
>>> +	mutex_lock(&dev_priv->drm.struct_mutex);
>>> +
>>> +	ret = intel_gvt_scan_and_shadow_workload(workload);
>>>    	if (ret)
>>>    		goto out;
>>>    
>>> @@ -240,20 +246,29 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
>>>    			goto out;
>>>    	}
>>>    
>> Is there any reason why you move context_pin to this place?
> No other errors happen after context_pin, means no need to do
> unpin_context in these error handlers. Just want to simplify the logic.
Will you touch the ring buffer related to the shadow context in workload 
scan? If so, better pin context earlier. The context pin() will pin the 
ring buffer as well. :)
>>> +	/* pin shadow context by gvt even the shadow context will be pinned
>>> +	 * when i915 alloc request. That is because gvt will update the guest
>>> +	 * context from shadow context when workload is completed, and at that
>>> +	 * moment, i915 may already unpined the shadow context to make the
>>> +	 * shadow_ctx pages invalid. So gvt need to pin itself. After update
>>> +	 * the guest context, gvt can unpin the shadow_ctx safely.
>>> +	 */
>>> +	ring = engine->context_pin(engine, shadow_ctx);
>>> +	if (IS_ERR(ring)) {
>>> +		ret = PTR_ERR(ring);
>>> +		gvt_vgpu_err("fail to pin shadow context\n");
>>> +		goto out;
>>> +	}
>>> +
>>>    	gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
>>>    			ring_id, workload->req);
>>>    
>>> -	ret = 0;
>>> +	i915_add_request(workload->req);
>>>    	workload->dispatched = true;
>>> +
>>>    out:
>>>    	if (ret)
>>>    		workload->status = ret;
>>> -
>>> -	if (!IS_ERR_OR_NULL(rq))
>>> -		i915_add_request(rq);
>>> -	else
>>> -		engine->context_unpin(engine, shadow_ctx);
>>> -
>>>    	mutex_unlock(&dev_priv->drm.struct_mutex);
>>>    	return ret;
>>>    }



More information about the intel-gvt-dev mailing list