[PATCH] drm/i915/gvt: refine shadow batch buffer

Zhang, Tina tina.zhang at intel.com
Mon May 8 01:32:33 UTC 2017



> -----Original Message-----
> From: intel-gvt-dev [mailto:intel-gvt-dev-bounces at lists.freedesktop.org] On
> Behalf Of Zhang, Tina
> Sent: Tuesday, March 28, 2017 11:53 AM
> To: Zhenyu Wang <zhenyuw at linux.intel.com>
> Cc: intel-gvt-dev <intel-gvt-dev at lists.freedesktop.org>
> Subject: RE: [PATCH] drm/i915/gvt: refine shadow batch buffer
> 
> 
> 
> > -----Original Message-----
> > From: Zhenyu Wang [mailto:zhenyuw at linux.intel.com]
> > Sent: Tuesday, March 28, 2017 11:22 AM
> > To: Zhang, Tina <tina.zhang at intel.com>
> > Cc: intel-gvt-dev <intel-gvt-dev at lists.freedesktop.org>
> > Subject: Re: [PATCH] drm/i915/gvt: refine shadow batch buffer
> >
> > On 2017.03.28 00:30:13 +0000, Zhang, Tina wrote:
> > >
> > >
> > > > -----Original Message-----
> > > > From: intel-gvt-dev
> > > > [mailto:intel-gvt-dev-bounces at lists.freedesktop.org] On Behalf Of
> > > > Zhenyu Wang
> > > > Sent: Monday, March 20, 2017 11:30 AM
> > > > To: Zhang, Tina <tina.zhang at intel.com>
> > > > Cc: intel-gvt-dev at lists.freedesktop.org
> > > > Subject: Re: [PATCH] drm/i915/gvt: refine shadow batch buffer
> > > >
> > > > On 2017.03.16 21:25:56 -0400, Tina Zhang wrote:
> > > > > i915 driver implements i915 batch buffer pool based on internal
> > > > > GEM object, which can be used for shadow batch buffer. This
> > > > > patch refines the shadow batch buffer with i915 batch buffer
> > > > > pool APIs to implement an internal GEM object based shadow batch
> buffer.
> > > > >
> > > > > Signed-off-by: Tina Zhang <tina.zhang at intel.com>
> > > > >
> > > > > diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c
> > > > > b/drivers/gpu/drm/i915/gvt/cmd_parser.c
> > > > > index 6f972af..4e05215 100644
> > > > > --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
> > > > > +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
> > > > > @@ -1064,7 +1064,7 @@ static int cmd_advance_default(struct
> > > > parser_exec_state *s)
> > > > >  	return ip_gma_advance(s, cmd_length(s));  }
> > > > >
> > > > > -static int cmd_handler_mi_batch_buffer_end(struct
> > > > > parser_exec_state
> > > > > *s)
> > > > > +static int batch_buffer_end(struct parser_exec_state *s)
> > > > >  {
> > > > >  	int ret;
> > > > >
> > > > > @@ -1082,6 +1082,21 @@ static int
> > > > cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s)
> > > > >  	return ret;
> > > > >  }
> > > > >
> > > > > +static int cmd_handler_mi_batch_buffer_end(struct
> > > > > +parser_exec_state
> > > > > +*s) {
> > > > > +	struct intel_shadow_bb_entry *entry_obj;
> > > > > +
> > > > > +	batch_buffer_end(s);
> > > > > +
> > > > > +	entry_obj = list_last_entry(&s->workload-
> >mapped_shadow_bb,
> > > > > +				    struct intel_shadow_bb_entry, list);
> > > > > +
> > > > > +	i915_gem_object_unpin_map(entry_obj->obj);
> > > > > +	list_move_tail(&entry_obj->list, &s->workload->shadow_bb);
> > > > > +
> > > > > +	return 0;
> > > > > +}
> > > > > +
> > > > >  struct mi_display_flip_command_info {
> > > > >  	int pipe;
> > > > >  	int plane;
> > > > > @@ -1628,6 +1643,7 @@ static int perform_bb_shadow(struct
> > > > > parser_exec_state *s)  {
> > > > >  	struct intel_shadow_bb_entry *entry_obj;
> > > > >  	struct intel_vgpu *vgpu = s->vgpu;
> > > > > +	unsigned int dst_needs_clflush;
> > > > >  	unsigned long gma = 0;
> > > > >  	uint32_t bb_size;
> > > > >  	void *dst = NULL;
> > > > > @@ -1644,41 +1660,46 @@ static int perform_bb_shadow(struct
> > > > parser_exec_state *s)
> > > > >  	if (entry_obj == NULL)
> > > > >  		return -ENOMEM;
> > > > >
> > > > > -	entry_obj->obj =
> > > > > -		i915_gem_object_create(s->vgpu->gvt->dev_priv,
> > > > > -				       roundup(bb_size, PAGE_SIZE));
> > > > > +	entry_obj->obj = i915_gem_batch_pool_get(
> > > > > +		&s->workload->req->engine->batch_pool,
> > > > PAGE_ALIGN(bb_size));
> > > > >  	if (IS_ERR(entry_obj->obj)) {
> > > > >  		ret = PTR_ERR(entry_obj->obj);
> > > > > -		goto free_entry;
> > > > > +		kfree(entry_obj);
> > > > > +		return ret;
> > > > >  	}
> > > > > +
> > > > > +	entry_obj->obj->active_count++;
> > > > >  	entry_obj->len = bb_size;
> > > > >  	INIT_LIST_HEAD(&entry_obj->list);
> > > > >
> > > > > +	ret = i915_gem_obj_prepare_shmem_write(entry_obj->obj,
> > > > > +			&dst_needs_clflush);
> > > > > +	if (ret) {
> > > > > +		kfree(entry_obj);
> > > > > +		return ret;
> > > > > +	}
> > > > >  	dst = i915_gem_object_pin_map(entry_obj->obj, I915_MAP_WB);
> > > > >  	if (IS_ERR(dst)) {
> > > > > +		i915_gem_obj_finish_shmem_access(entry_obj->obj);
> > > > > +		kfree(entry_obj);
> > > >
> > > > Should unpin allocated batch from pool.
> > > i915_gem_obj_finish_shmem_access() can do it.
> > > See:
> > > i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj) {
> > >         i915_gem_object_unpin_pages(obj); }
> > >
> >
> > oh, yeah, missed that. Have you done any testing for shadowed bb? btw
> > is it possible to have a test case on this instead of some hacking ways?
> Yes, I have done some tests with Ubuntu guest. Since the shadowed bb is used
> by shadow indirect context image, this logic is used almost all the time by RCS
> ring.
> So, problem should be easily found, if there is.
Just found that the i915 GEM batch pool is updated by i915 patch,  51a575d957fcb2848babb7163cb6e44a028a29ae. We need to rethink the refine shadow batch buffer based on i915 GEM batch pool work. 
Current, i915's GEM batch pool is assumed that "obj->active_count is only updated upon retirement". But our implementation of refining shadow batch buffer won't use retirement to update the obj->active_count. We do the updating during workload->complete(). That's why, with the "refine shadow batch buffer" patch, the latest code would show kernel BUG.

> 
> >
> >
> > > >
> > > > >  		ret = PTR_ERR(dst);
> > > > > -		goto put_obj;
> > > > > -	}
> > > > > -
> > > > > -	ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj,
> false);
> > > > > -	if (ret) {
> > > > > -		gvt_vgpu_err("failed to set shadow batch to CPU\n");
> > > > > -		goto unmap_src;
> > > > > +		return ret;
> > > > >  	}
> > > > >
> > > > > -	entry_obj->va = dst;
> > > > >  	entry_obj->bb_start_cmd_va = s->ip_va;
> > > > >
> > > > >  	/* copy batch buffer to shadow batch buffer*/
> > > > >  	ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
> > > > > -			      gma, gma + bb_size,
> > > > > -			      dst);
> > > > > +			      gma, gma + bb_size, dst);
> > > > >  	if (ret < 0) {
> > > > > -		gvt_vgpu_err("fail to copy guest ring buffer\n");
> > > > > -		goto unmap_src;
> > > > > +		gvt_err("fail to copy guest batch buffer\n");
> > > > > +		i915_gem_object_unpin_map(entry_obj->obj);
> > > > > +		i915_gem_obj_finish_shmem_access(entry_obj->obj);
> > > > > +		kfree(entry_obj);
> > > > > +		return ret;
> > > > >  	}
> > > > >
> > > > > -	list_add(&entry_obj->list, &s->workload->shadow_bb);
> > > > > +	list_add_tail(&entry_obj->list,
> > > > > +&s->workload->mapped_shadow_bb);
> > > > >  	/*
> > > > >  	 * ip_va saves the virtual address of the shadow batch buffer, while
> > > > >  	 * ip_gma saves the graphics address of the original batch buffer.
> > > > > @@ -1690,14 +1711,7 @@ static int perform_bb_shadow(struct
> > > > parser_exec_state *s)
> > > > >  	s->ip_va = dst;
> > > > >  	s->ip_gma = gma;
> > > > >
> > > > > -	return 0;
> > > > > -
> > > > > -unmap_src:
> > > > > -	i915_gem_object_unpin_map(entry_obj->obj);
> > > > > -put_obj:
> > > > > -	i915_gem_object_put(entry_obj->obj);
> > > > > -free_entry:
> > > > > -	kfree(entry_obj);
> > > > > +	i915_gem_obj_finish_shmem_access(entry_obj->obj);
> > > > >  	return ret;
> > > > >  }
> > > > >
> > > > > @@ -1735,7 +1749,7 @@ static int
> > > > cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
> > > > >  			gvt_vgpu_err("invalid shadow batch buffer\n");
> > > > >  	} else {
> > > > >  		/* emulate a batch buffer end to do return right */
> > > > > -		ret = cmd_handler_mi_batch_buffer_end(s);
> > > > > +		ret = batch_buffer_end(s);
> > > > >  		if (ret < 0)
> > > > >  			return ret;
> > > > >  	}
> > > > > diff --git a/drivers/gpu/drm/i915/gvt/execlist.c
> > > > > b/drivers/gpu/drm/i915/gvt/execlist.c
> > > > > index f1f426a..6c19645 100644
> > > > > --- a/drivers/gpu/drm/i915/gvt/execlist.c
> > > > > +++ b/drivers/gpu/drm/i915/gvt/execlist.c
> > > > > @@ -374,12 +374,11 @@ static void
> > > > > prepare_shadow_batch_buffer(struct
> > > > intel_vgpu_workload *workload)
> > > > >  	/* pin the gem object to ggtt */
> > > > >  	list_for_each_entry(entry_obj, &workload->shadow_bb, list) {
> > > > >  		struct i915_vma *vma;
> > > > > -
> > > > > -		vma = i915_gem_object_ggtt_pin(entry_obj->obj,
> NULL, 0, 4,
> > > > 0);
> > > > > +		vma = i915_gem_object_ggtt_pin(entry_obj->obj,
> NULL, 0, 0,
> > > > 0);
> > > > >  		if (IS_ERR(vma)) {
> > > > >  			return;
> > > > >  		}
> > > > > -
> > > > > +		entry_obj->batch = vma;
> > > > >  		/* FIXME: we are not tracking our pinned VMA leaving it
> > > > >  		 * up to the core to fix up the stray pin_count upon
> > > > >  		 * free.
> > > > > @@ -389,6 +388,9 @@ static void
> > > > > prepare_shadow_batch_buffer(struct
> > > > intel_vgpu_workload *workload)
> > > > >  		entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma);
> > > > >  		if (gmadr_bytes == 8)
> > > > >  			entry_obj->bb_start_cmd_va[2] = 0;
> > > > > +
> > > > > +		i915_gem_object_get(entry_obj->obj);
> > > > > +		i915_gem_object_unpin_pages(entry_obj->obj);
> > > > >  	}
> > > > >  }
> > > > >
> > > > > @@ -477,7 +479,8 @@ static void
> > > > > release_shadow_batch_buffer(struct
> > > > > intel_vgpu_workload *workload)
> > > > >
> > > > >  		list_for_each_entry_safe(entry_obj, temp, &workload-
> > shadow_bb,
> > > > >  					 list) {
> > > > > -			i915_gem_object_unpin_map(entry_obj-
> >obj);
> > > > > +			i915_vma_unpin(entry_obj->batch);
> > > > > +			--entry_obj->obj->active_count;
> > > > >  			i915_gem_object_put(entry_obj->obj);
> > > > >  			list_del(&entry_obj->list);
> > > > >  			kfree(entry_obj);
> > > > > @@ -650,6 +653,7 @@ static int submit_context(struct intel_vgpu
> > > > > *vgpu, int ring_id,
> > > > >
> > > > >  	INIT_LIST_HEAD(&workload->list);
> > > > >  	INIT_LIST_HEAD(&workload->shadow_bb);
> > > > > +	INIT_LIST_HEAD(&workload->mapped_shadow_bb);
> > > > >
> > > > >  	init_waitqueue_head(&workload->shadow_ctx_status_wq);
> > > > >  	atomic_set(&workload->shadow_ctx_active, 0); diff --git
> > > > > a/drivers/gpu/drm/i915/gvt/scheduler.h
> > > > > b/drivers/gpu/drm/i915/gvt/scheduler.h
> > > > > index 2833dfa..505a2c6 100644
> > > > > --- a/drivers/gpu/drm/i915/gvt/scheduler.h
> > > > > +++ b/drivers/gpu/drm/i915/gvt/scheduler.h
> > > > > @@ -104,6 +104,7 @@ struct intel_vgpu_workload {
> > > > >
> > > > >  	/* shadow batch buffer */
> > > > >  	struct list_head shadow_bb;
> > > > > +	struct list_head mapped_shadow_bb;
> > > > >  	struct intel_shadow_wa_ctx wa_ctx;  };
> > > > >
> > > > > @@ -111,7 +112,7 @@ struct intel_vgpu_workload {  struct
> > > > > intel_shadow_bb_entry {
> > > > >  	struct list_head list;
> > > > >  	struct drm_i915_gem_object *obj;
> > > > > -	void *va;
> > > > > +	struct i915_vma *batch;
> > > > >  	unsigned long len;
> > > > >  	u32 *bb_start_cmd_va;
> > > > >  };
> > > > > --
> > > > > 2.7.4
> > > > >
> > > > > _______________________________________________
> > > > > intel-gvt-dev mailing list
> > > > > intel-gvt-dev at lists.freedesktop.org
> > > > > https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev
> > > >
> > > > --
> > > > Open Source Technology Center, Intel ltd.
> > > >
> > > > $gpg --keyserver wwwkeys.pgp.net --recv-keys 4D781827
> > > _______________________________________________
> > > intel-gvt-dev mailing list
> > > intel-gvt-dev at lists.freedesktop.org
> > > https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev
> >
> > --
> > Open Source Technology Center, Intel ltd.
> >
> > $gpg --keyserver wwwkeys.pgp.net --recv-keys 4D781827
> _______________________________________________
> intel-gvt-dev mailing list
> intel-gvt-dev at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev


More information about the intel-gvt-dev mailing list