[PATCH v3 2/2] drm/i915/gvt: check ggtt entry modification status for guest ctxs

Yan Zhao yan.y.zhao at intel.com
Fri Mar 13 09:35:36 UTC 2020


On Fri, Mar 13, 2020 at 05:35:46PM +0800, Tian, Kevin wrote:
> > From: Yan Zhao
> > Sent: Friday, March 13, 2020 5:15 PM
> > 
> > for guest context, if its ggtt entry is modified after last context
> > shadowing, it is deemed as not the same context as last shadowed one.
> > 
> > v3: no change
> > v2: rebased to 5.6.0-rc4+
> > 
> > Suggested-by: Zhenyu Wang <zhenyuw at linux.intel.com>
> > Signed-off-by: Yan Zhao <yan.y.zhao at intel.com>
> > ---
> >  drivers/gpu/drm/i915/gvt/gtt.c       | 17 +++++++++++++++++
> >  drivers/gpu/drm/i915/gvt/gvt.h       |  3 ++-
> >  drivers/gpu/drm/i915/gvt/scheduler.c | 14 ++++++++++++--
> >  3 files changed, 31 insertions(+), 3 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
> > index 2a4b23f8aa74..b1b6a51c006a 100644
> > --- a/drivers/gpu/drm/i915/gvt/gtt.c
> > +++ b/drivers/gpu/drm/i915/gvt/gtt.c
> > @@ -2341,12 +2341,29 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct
> > intel_vgpu *vgpu,
> >  {
> >  	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
> >  	int ret;
> > +	struct intel_vgpu_submission *s = &vgpu->submission;
> > +	struct intel_engine_cs *engine;
> > +	int i;
> > 
> >  	if (bytes != 4 && bytes != 8)
> >  		return -EINVAL;
> > 
> >  	off -= info->gtt_start_offset;
> >  	ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes);
> > +
> > +	/* if ggtt of last submitted context is written,
> > +	 * that context is probably got unpinned.
> > +	 * Set last shadowed ctx to invalid.
> > +	 */
> > +	for_each_engine(engine, vgpu->gvt->gt->i915, i) {
> > +		if (!s->last_ctx[i].valid)
> > +			continue;
> > +
> > +		if (atomic_read(&s->last_ctx[i].lrca) ==
> > +				off >> info->gtt_entry_size_shift) {
> > +			s->last_ctx[i].valid = false;
> > +		}
> 
> the context has multiple pages. Is it sufficient to compare
> just the first entry?
>
yes, context pages are pinned/unpined as a whole each time.
it does not make sense to only pin/unpin part of a context.

> > +	}
> >  	return ret;
> >  }
> > 
> > diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
> > index e2d7ffd84457..b342d7be741f 100644
> > --- a/drivers/gpu/drm/i915/gvt/gvt.h
> > +++ b/drivers/gpu/drm/i915/gvt/gvt.h
> > @@ -164,7 +164,8 @@ struct intel_vgpu_submission {
> >  	int virtual_submission_interface;
> >  	bool active;
> >  	struct {
> > -		u32 lrca;
> > +		atomic_t lrca;
> > +		bool valid;
> 
> It's not a good patch split exercise. The type should be finalized from
> the start.
>
ok. will move it to patch 1.


> >  		u64 ring_context_gpa;
> >  	} last_ctx[I915_NUM_ENGINES];
> >  };
> > diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c
> > b/drivers/gpu/drm/i915/gvt/scheduler.c
> > index a66050a3d65a..7e0d4183b07b 100644
> > --- a/drivers/gpu/drm/i915/gvt/scheduler.c
> > +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
> > @@ -138,6 +138,7 @@ static int populate_shadow_context(struct
> > intel_vgpu_workload *workload)
> >  	int i;
> >  	bool skip = false;
> >  	int ring_id = workload->engine->id;
> > +	bool valid;
> > 
> >  	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
> >  	shadow_ring_context = kmap(page);
> > @@ -179,13 +180,20 @@ static int populate_shadow_context(struct
> > intel_vgpu_workload *workload)
> >  			workload->ctx_desc.context_id,
> >  			workload->ring_context_gpa);
> > 
> > -	if ((s->last_ctx[ring_id].lrca ==
> > +	/* only need to ensure this context is not pinned/unpinned during
> > the
> > +	 * period from last submission to this this submission.
> > +	 * Upon reaching this function, the currently submitted context is not
> > +	 * supposed to get unpinned. If a misbehaving guest driver ever does
> > +	 * this, it would corrupt itself.
> > +	 */
> > +	valid = s->last_ctx[ring_id].valid;
> > +	if (valid && (atomic_read(&s->last_ctx[ring_id].lrca) ==
> >  				workload->ctx_desc.lrca) &&
> >  			(s->last_ctx[ring_id].ring_context_gpa ==
> >  				workload->ring_context_gpa))
> >  		skip = true;
> > 
> > -	s->last_ctx[ring_id].lrca = workload->ctx_desc.lrca;
> > +	atomic_set(&s->last_ctx[ring_id].lrca, workload->ctx_desc.lrca);
> >  	s->last_ctx[ring_id].ring_context_gpa = workload->ring_context_gpa;
> > 
> >  	if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val) || skip)
> > @@ -214,6 +222,8 @@ static int populate_shadow_context(struct
> > intel_vgpu_workload *workload)
> >  		kunmap(page);
> >  		i++;
> >  	}
> > +	if (!valid)
> > +		s->last_ctx[ring_id].valid = true;
> >  	return 0;
> >  }
> > 
> > --
> > 2.17.1
> > 
> > _______________________________________________
> > intel-gvt-dev mailing list
> > intel-gvt-dev at lists.freedesktop.org
> > https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev


More information about the intel-gvt-dev mailing list