[PATCH v4] drm/i915/gvt: Support PPGTT table load command

Yan Zhao yan.y.zhao at intel.com
Fri May 8 03:25:20 UTC 2020


Reviewed-by: Yan Zhao <yan.y.zhao at intel.com>

On Fri, May 08, 2020 at 11:14:09AM +0800, Zhenyu Wang wrote:
> The PPGTT in context image can be overridden by LRI cmd with another
> PPGTT's pdps. In such case, the load mm is used instead of the one in
> the context image. So we need to load its shadow mm in GVT and replace
> ppgtt pointers in command.
> 
> This feature is used by guest IGD driver to share gfx VM between
> different contexts. Verified by IGT "gem_ctx_clone" test.
> 
> v4:
> - consolidate shadow mm handlers (Yan)
> - fix cmd shadow mm pin error path
> 
> v3: (Zhenyu Wang)
> - Cleanup PDP register offset check
> - Add debug check for guest context ppgtt update
> - Skip 3-level ppgtt guest handling code. The reason is that all
>   guests now use 4-level ppgtt table and the only left case for
>   3-level table is ancient aliasing ppgtt case. But those guest
>   kernel has no use of PPGTT LRI command. So 3-level ppgtt guest
>   for this feature becomes simply un-testable.
> 
> v2: (Zhenyu Wang)
> - Change to list for handling possible multiple ppgtt table loads
>   in one submission. Make sure shadow mm is to replace for each one.
> 
> Cc: Yan Zhao <yan.y.zhao at intel.com>
> Signed-off-by: Tina Zhang <tina.zhang at intel.com>
> Signed-off-by: Zhenyu Wang <zhenyuw at linux.intel.com>
> ---
>  drivers/gpu/drm/i915/gvt/cmd_parser.c |  45 +++++++++++
>  drivers/gpu/drm/i915/gvt/gtt.c        |   1 +
>  drivers/gpu/drm/i915/gvt/gtt.h        |   1 +
>  drivers/gpu/drm/i915/gvt/handlers.c   |   2 +-
>  drivers/gpu/drm/i915/gvt/scheduler.c  | 106 +++++++++++++++++++++++++-
>  drivers/gpu/drm/i915/gvt/scheduler.h  |   1 +
>  6 files changed, 151 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
> index a3cc080a46c6..8b87f130f7f1 100644
> --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
> +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
> @@ -882,6 +882,47 @@ static int mocs_cmd_reg_handler(struct parser_exec_state *s,
>  	return 0;
>  }
>  
> +static int is_cmd_update_pdps(unsigned int offset,
> +			      struct parser_exec_state *s)
> +{
> +	u32 base = s->workload->engine->mmio_base;
> +	return i915_mmio_reg_equal(_MMIO(offset), GEN8_RING_PDP_UDW(base, 0));
> +}
> +
> +static int cmd_pdp_mmio_update_handler(struct parser_exec_state *s,
> +				       unsigned int offset, unsigned int index)
> +{
> +	struct intel_vgpu *vgpu = s->vgpu;
> +	struct intel_vgpu_mm *shadow_mm = s->workload->shadow_mm;
> +	struct intel_vgpu_mm *mm;
> +	u64 pdps[GEN8_3LVL_PDPES];
> +
> +	if (shadow_mm->ppgtt_mm.root_entry_type ==
> +	    GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
> +		pdps[0] = (u64)cmd_val(s, 2) << 32;
> +		pdps[0] |= cmd_val(s, 4);
> +
> +		mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
> +		if (!mm) {
> +			gvt_vgpu_err("failed to get the 4-level shadow vm\n");
> +			return -EINVAL;
> +		}
> +		intel_vgpu_mm_get(mm);
> +		list_add_tail(&mm->ppgtt_mm.link,
> +			      &s->workload->lri_shadow_mm);
> +		*cmd_ptr(s, 2) = upper_32_bits(mm->ppgtt_mm.shadow_pdps[0]);
> +		*cmd_ptr(s, 4) = lower_32_bits(mm->ppgtt_mm.shadow_pdps[0]);
> +	} else {
> +		/* Currently all guests use PML4 table and now can't
> +		 * have a guest with 3-level table but uses LRI for
> +		 * PPGTT update. So this is simply un-testable. */
> +		GEM_BUG_ON(1);
> +		gvt_vgpu_err("invalid shared shadow vm type\n");
> +		return -EINVAL;
> +	}
> +	return 0;
> +}
> +
>  static int cmd_reg_handler(struct parser_exec_state *s,
>  	unsigned int offset, unsigned int index, char *cmd)
>  {
> @@ -920,6 +961,10 @@ static int cmd_reg_handler(struct parser_exec_state *s,
>  		patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
>  	}
>  
> +	if (is_cmd_update_pdps(offset, s) &&
> +	    cmd_pdp_mmio_update_handler(s, offset, index))
> +		return -EINVAL;
> +
>  	/* TODO
>  	 * In order to let workload with inhibit context to generate
>  	 * correct image data into memory, vregs values will be loaded to
> diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
> index d2b0d85b39bc..210016192ce7 100644
> --- a/drivers/gpu/drm/i915/gvt/gtt.c
> +++ b/drivers/gpu/drm/i915/gvt/gtt.c
> @@ -1900,6 +1900,7 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
>  
>  	INIT_LIST_HEAD(&mm->ppgtt_mm.list);
>  	INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
> +	INIT_LIST_HEAD(&mm->ppgtt_mm.link);
>  
>  	if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
>  		mm->ppgtt_mm.guest_pdps[0] = pdps[0];
> diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
> index 88789316807d..320b8d6ad92f 100644
> --- a/drivers/gpu/drm/i915/gvt/gtt.h
> +++ b/drivers/gpu/drm/i915/gvt/gtt.h
> @@ -160,6 +160,7 @@ struct intel_vgpu_mm {
>  
>  			struct list_head list;
>  			struct list_head lru_list;
> +			struct list_head link; /* possible LRI shadow mm list */
>  		} ppgtt_mm;
>  		struct {
>  			void *virtual_ggtt;
> diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
> index 2faf50e1b051..3e88e3b5c43a 100644
> --- a/drivers/gpu/drm/i915/gvt/handlers.c
> +++ b/drivers/gpu/drm/i915/gvt/handlers.c
> @@ -2812,7 +2812,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
>  	MMIO_D(GAMTARBMODE, D_BDW_PLUS);
>  
>  #define RING_REG(base) _MMIO((base) + 0x270)
> -	MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
> +	MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
>  #undef RING_REG
>  
>  	MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
> index 6e59c7d61397..cc6bfeda0325 100644
> --- a/drivers/gpu/drm/i915/gvt/scheduler.c
> +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
> @@ -646,10 +646,11 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
>  	}
>  }
>  
> -static int prepare_workload(struct intel_vgpu_workload *workload)
> +static int
> +intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload)
>  {
>  	struct intel_vgpu *vgpu = workload->vgpu;
> -	struct intel_vgpu_submission *s = &vgpu->submission;
> +	struct intel_vgpu_mm *m;
>  	int ret = 0;
>  
>  	ret = intel_vgpu_pin_mm(workload->shadow_mm);
> @@ -664,6 +665,52 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
>  		return -EINVAL;
>  	}
>  
> +	if (!list_empty(&workload->lri_shadow_mm)) {
> +		list_for_each_entry(m, &workload->lri_shadow_mm,
> +				    ppgtt_mm.link) {
> +			ret = intel_vgpu_pin_mm(m);
> +			if (ret) {
> +				list_for_each_entry_from_reverse(m,
> +								 &workload->lri_shadow_mm,
> +								 ppgtt_mm.link)
> +					intel_vgpu_unpin_mm(m);
> +				gvt_vgpu_err("LRI shadow ppgtt fail to pin\n");
> +				break;
> +			}
> +		}
> +	}
> +
> +	if (ret)
> +		intel_vgpu_unpin_mm(workload->shadow_mm);
> +
> +	return ret;
> +}
> +
> +static void
> +intel_vgpu_shadow_mm_unpin(struct intel_vgpu_workload *workload)
> +{
> +	struct intel_vgpu_mm *m;
> +
> +	if (!list_empty(&workload->lri_shadow_mm)) {
> +		list_for_each_entry(m, &workload->lri_shadow_mm,
> +				    ppgtt_mm.link)
> +			intel_vgpu_unpin_mm(m);
> +	}
> +	intel_vgpu_unpin_mm(workload->shadow_mm);
> +}
> +
> +static int prepare_workload(struct intel_vgpu_workload *workload)
> +{
> +	struct intel_vgpu *vgpu = workload->vgpu;
> +	struct intel_vgpu_submission *s = &vgpu->submission;
> +	int ret = 0;
> +
> +	ret = intel_vgpu_shadow_mm_pin(workload);
> +	if (ret) {
> +		gvt_vgpu_err("fail to pin shadow mm\n");
> +		return ret;
> +	}
> +
>  	update_shadow_pdps(workload);
>  
>  	set_context_ppgtt_from_shadow(workload, s->shadow[workload->engine->id]);
> @@ -710,7 +757,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
>  err_shadow_batch:
>  	release_shadow_batch_buffer(workload);
>  err_unpin_mm:
> -	intel_vgpu_unpin_mm(workload->shadow_mm);
> +	intel_vgpu_shadow_mm_unpin(workload);
>  	return ret;
>  }
>  
> @@ -820,6 +867,37 @@ pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine)
>  	return workload;
>  }
>  
> +static void update_guest_pdps(struct intel_vgpu *vgpu,
> +			      u64 ring_context_gpa, u32 pdp[8])
> +{
> +	u64 gpa;
> +	int i;
> +
> +	gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
> +
> +	for (i = 0; i < 8; i++)
> +		intel_gvt_hypervisor_write_gpa(vgpu,
> +				gpa + i * 8, &pdp[7 - i], 4);
> +}
> +
> +static bool
> +check_shadow_context_ppgtt(struct execlist_ring_context *c, struct intel_vgpu_mm *m)
> +{
> +	if (m->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
> +		u64 shadow_pdp = c->pdps[7].val | (u64) c->pdps[6].val << 32;
> +
> +		if (shadow_pdp != m->ppgtt_mm.shadow_pdps[0]) {
> +			gvt_dbg_mm("4-level context ppgtt not match LRI command\n");
> +			return false;
> +		}
> +		return true;
> +	} else {
> +		/* see comment in LRI handler in cmd_parser.c */
> +		gvt_dbg_mm("invalid shadow mm type\n");
> +		return false;
> +	}
> +}
> +
>  static void update_guest_context(struct intel_vgpu_workload *workload)
>  {
>  	struct i915_request *rq = workload->req;
> @@ -905,6 +983,15 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
>  
>  	shadow_ring_context = (void *) ctx->lrc_reg_state;
>  
> +	if (!list_empty(&workload->lri_shadow_mm)) {
> +		struct intel_vgpu_mm *m = list_last_entry(&workload->lri_shadow_mm,
> +							  struct intel_vgpu_mm,
> +							  ppgtt_mm.link);
> +		GEM_BUG_ON(!check_shadow_context_ppgtt(shadow_ring_context, m));
> +		update_guest_pdps(vgpu, workload->ring_context_gpa,
> +				  (void *)m->ppgtt_mm.guest_pdps);
> +	}
> +
>  #define COPY_REG(name) \
>  	intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
>  		RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
> @@ -1013,7 +1100,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
>  
>  	workload->complete(workload);
>  
> -	intel_vgpu_unpin_mm(workload->shadow_mm);
> +	intel_vgpu_shadow_mm_unpin(workload);
>  	intel_vgpu_destroy_workload(workload);
>  
>  	atomic_dec(&s->running_workload_num);
> @@ -1409,6 +1496,16 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
>  	release_shadow_batch_buffer(workload);
>  	release_shadow_wa_ctx(&workload->wa_ctx);
>  
> +	if (!list_empty(&workload->lri_shadow_mm)) {
> +		struct intel_vgpu_mm *m, *mm;
> +		list_for_each_entry_safe(m, mm, &workload->lri_shadow_mm,
> +					 ppgtt_mm.link) {
> +			list_del(&m->ppgtt_mm.link);
> +			intel_vgpu_mm_put(m);
> +		}
> +	}
> +
> +	GEM_BUG_ON(!list_empty(&workload->lri_shadow_mm));
>  	if (workload->shadow_mm)
>  		intel_vgpu_mm_put(workload->shadow_mm);
>  
> @@ -1427,6 +1524,7 @@ alloc_workload(struct intel_vgpu *vgpu)
>  
>  	INIT_LIST_HEAD(&workload->list);
>  	INIT_LIST_HEAD(&workload->shadow_bb);
> +	INIT_LIST_HEAD(&workload->lri_shadow_mm);
>  
>  	init_waitqueue_head(&workload->shadow_ctx_status_wq);
>  	atomic_set(&workload->shadow_ctx_active, 0);
> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
> index bf7fc0ca4cb1..15d317f2a4a4 100644
> --- a/drivers/gpu/drm/i915/gvt/scheduler.h
> +++ b/drivers/gpu/drm/i915/gvt/scheduler.h
> @@ -87,6 +87,7 @@ struct intel_vgpu_workload {
>  	int status;
>  
>  	struct intel_vgpu_mm *shadow_mm;
> +	struct list_head lri_shadow_mm; /* For PPGTT load cmd */
>  
>  	/* different submission model may need different handler */
>  	int (*prepare)(struct intel_vgpu_workload *);
> -- 
> 2.26.2
> 


More information about the intel-gvt-dev mailing list