[PATCH] drm/i915/gvt: Introduce per object locking in GVT scheduler.

Zhenyu Wang zhenyuw at linux.intel.com
Mon Jan 18 05:17:57 UTC 2021


On 2021.01.10 23:43:05 +0200, Zhi Wang wrote:
> To support ww locking and per-object implemented in i915, GVT scheduler needs
> to be refined. Most of the changes are located in shadow batch buffer, shadow
> wa context in GVT-g, where use quite a lot of i915 gem object APIs.
> 
> v2:
> 
> - Adjust the usage of ww lock on context pin/unpin. (maarten)
> - Rebase the patch on the newest staging branch.
> 
> Cc: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
> Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
> Cc: Zhenyu Wang <zhenyuw at linux.intel.com>
> Signed-off-by: Zhi Wang <zhi.a.wang at intel.com>
> ---

Looks fine to me.

Reviewed-by: Zhenyu Wang <zhenyuw at linux.intel.com>

>  drivers/gpu/drm/i915/gvt/scheduler.c | 49 +++++++++++++++++++++++++++++-------
>  1 file changed, 40 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
> index 43f31c2..4dfa418 100644
> --- a/drivers/gpu/drm/i915/gvt/scheduler.c
> +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
> @@ -412,7 +412,9 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
>  	if (!wa_ctx->indirect_ctx.obj)
>  		return;
>  
> +	i915_gem_object_lock(wa_ctx->indirect_ctx.obj, NULL);
>  	i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
> +	i915_gem_object_unlock(wa_ctx->indirect_ctx.obj);
>  	i915_gem_object_put(wa_ctx->indirect_ctx.obj);
>  
>  	wa_ctx->indirect_ctx.obj = NULL;
> @@ -520,6 +522,7 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
>  	struct intel_gvt *gvt = workload->vgpu->gvt;
>  	const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
>  	struct intel_vgpu_shadow_bb *bb;
> +	struct i915_gem_ww_ctx ww;
>  	int ret;
>  
>  	list_for_each_entry(bb, &workload->shadow_bb, list) {
> @@ -544,10 +547,19 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
>  		 * directly
>  		 */
>  		if (!bb->ppgtt) {
> -			bb->vma = i915_gem_object_ggtt_pin(bb->obj,
> -							   NULL, 0, 0, 0);
> +			i915_gem_ww_ctx_init(&ww, false);
> +retry:
> +			i915_gem_object_lock(bb->obj, &ww);
> +
> +			bb->vma = i915_gem_object_ggtt_pin_ww(bb->obj, &ww,
> +							      NULL, 0, 0, 0);
>  			if (IS_ERR(bb->vma)) {
>  				ret = PTR_ERR(bb->vma);
> +				if (ret == -EDEADLK) {
> +					ret = i915_gem_ww_ctx_backoff(&ww);
> +					if (!ret)
> +						goto retry;
> +				}
>  				goto err;
>  			}
>  
> @@ -561,13 +573,15 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
>  						      0);
>  			if (ret)
>  				goto err;
> -		}
>  
> -		/* No one is going to touch shadow bb from now on. */
> -		i915_gem_object_flush_map(bb->obj);
> +			/* No one is going to touch shadow bb from now on. */
> +			i915_gem_object_flush_map(bb->obj);
> +			i915_gem_object_unlock(bb->obj);
> +		}
>  	}
>  	return 0;
>  err:
> +	i915_gem_ww_ctx_fini(&ww);
>  	release_shadow_batch_buffer(workload);
>  	return ret;
>  }
> @@ -594,14 +608,29 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
>  	unsigned char *per_ctx_va =
>  		(unsigned char *)wa_ctx->indirect_ctx.shadow_va +
>  		wa_ctx->indirect_ctx.size;
> +	struct i915_gem_ww_ctx ww;
> +	int ret;
>  
>  	if (wa_ctx->indirect_ctx.size == 0)
>  		return 0;
>  
> -	vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
> -				       0, CACHELINE_BYTES, 0);
> -	if (IS_ERR(vma))
> -		return PTR_ERR(vma);
> +	i915_gem_ww_ctx_init(&ww, false);
> +retry:
> +	i915_gem_object_lock(wa_ctx->indirect_ctx.obj, &ww);
> +
> +	vma = i915_gem_object_ggtt_pin_ww(wa_ctx->indirect_ctx.obj, &ww, NULL,
> +					  0, CACHELINE_BYTES, 0);
> +	if (IS_ERR(vma)) {
> +		ret = PTR_ERR(vma);
> +		if (ret == -EDEADLK) {
> +			ret = i915_gem_ww_ctx_backoff(&ww);
> +			if (!ret)
> +				goto retry;
> +		}
> +		return ret;
> +	}
> +
> +	i915_gem_object_unlock(wa_ctx->indirect_ctx.obj);
>  
>  	/* FIXME: we are not tracking our pinned VMA leaving it
>  	 * up to the core to fix up the stray pin_count upon
> @@ -635,12 +664,14 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
>  
>  	list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
>  		if (bb->obj) {
> +			i915_gem_object_lock(bb->obj, NULL);
>  			if (bb->va && !IS_ERR(bb->va))
>  				i915_gem_object_unpin_map(bb->obj);
>  
>  			if (bb->vma && !IS_ERR(bb->vma))
>  				i915_vma_unpin(bb->vma);
>  
> +			i915_gem_object_unlock(bb->obj);
>  			i915_gem_object_put(bb->obj);
>  		}
>  		list_del(&bb->list);
> -- 
> 2.7.4
> 
> _______________________________________________
> intel-gvt-dev mailing list
> intel-gvt-dev at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev
-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 195 bytes
Desc: not available
URL: <https://lists.freedesktop.org/archives/intel-gvt-dev/attachments/20210118/7d8f0786/attachment.sig>


More information about the intel-gvt-dev mailing list