[PATCH 1/6] drm/i915/gvt: Separate cmd scan from request allocation

Gao, Fred fred.gao at intel.com
Wed Aug 9 06:35:30 UTC 2017


thanks Zhenyu for the thoughtful review.

for the ctx pin issue:
 ring = engine->context_pin(engine, shadow_ctx);

the above pin will be used in the below shadow ctx, 
otherwise the null pointer err will be thrown.
>       ret = populate_shadow_context(workload);

previously it is ok since the pin is also done in i915_gem_request_alloc() before scan
now i915_gem_request_alloc() is moved after scan.
________________________________________
From: Zhenyu Wang [zhenyuw at linux.intel.com]
Sent: Wednesday, August 09, 2017 11:00 AM
To: Gao, Fred
Cc: intel-gvt-dev at lists.freedesktop.org
Subject: Re: [PATCH 1/6] drm/i915/gvt: Separate cmd scan from request allocation

On 2017.08.08 23:31:00 +0800, fred gao wrote:
> Currently i915 request structure and shadow ring buffer are allocated
> before command scan, so it will have to restore to previous states once
> any error happens afterwards in the long dispatch_workload path.
>
> This patch is to introduce a reserved ring buffer created at the beginning
> of vGPU initialization. Workload will be coped to this reserved buffer and
> be scanned first, the i915 request and shadow ring buffer are only
> allocated after the result of scan is successful.
>
> To balance the memory usage and buffer alloc time, the coming bigger ring
> buffer will be reallocated and kept until more bigger buffer is coming.
>
> v2:
> - use kmalloc for the smaller ring buffer, realloc if required. (Zhenyu)
>
> v3:
> - remove the dynamically allocated ring buffer. (Zhenyu)
> Signed-off-by: fred gao <fred.gao at intel.com>
> ---
>  drivers/gpu/drm/i915/gvt/cmd_parser.c | 34 +++++++++-----
>  drivers/gpu/drm/i915/gvt/execlist.c   | 21 +++++++++
>  drivers/gpu/drm/i915/gvt/execlist.h   |  1 +
>  drivers/gpu/drm/i915/gvt/gvt.h        |  3 ++
>  drivers/gpu/drm/i915/gvt/scheduler.c  | 88 +++++++++++++++++++++++------------
>  5 files changed, 107 insertions(+), 40 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
> index 4ddae9b..448ab02 100644
> --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
> +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
> @@ -2529,6 +2529,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
>       s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
>       s.ring_head = gma_head;
>       s.ring_tail = gma_tail;
> +
>       s.rb_va = workload->shadow_ring_buffer_va;
>       s.workload = workload;

no extra line

>
> @@ -2593,7 +2594,8 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
>  {
>       struct intel_vgpu *vgpu = workload->vgpu;
>       unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
> -     u32 *cs;
> +     void *shadow_ring_buffer_va;
> +     int ring_id = workload->ring_id;
>       int ret;
>
>       guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
> @@ -2606,34 +2608,43 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
>       gma_tail = workload->rb_start + workload->rb_tail;
>       gma_top = workload->rb_start + guest_rb_size;
>
> -     /* allocate shadow ring buffer */
> -     cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
> -     if (IS_ERR(cs))
> -             return PTR_ERR(cs);
> +     if (workload->rb_len > vgpu->reserve_ring_buffer_size[ring_id]) {
> +             void *va = vgpu->reserve_ring_buffer_va[ring_id];
> +             /* realloc the new ring buffer if needed */
> +             vgpu->reserve_ring_buffer_va[ring_id] =
> +                     krealloc(va, workload->rb_len, GFP_KERNEL);
> +             if (!vgpu->reserve_ring_buffer_va[ring_id]) {
> +                     gvt_vgpu_err("fail to alloc reserve ring buffer\n");
> +                     return -ENOMEM;
> +             }
> +             vgpu->reserve_ring_buffer_size[ring_id] = workload->rb_len;
> +     }
> +
> +     shadow_ring_buffer_va = vgpu->reserve_ring_buffer_va[ring_id];
>
>       /* get shadow ring buffer va */
> -     workload->shadow_ring_buffer_va = cs;
> +     workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
>
>       /* head > tail --> copy head <-> top */
>       if (gma_head > gma_tail) {
>               ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
> -                                   gma_head, gma_top, cs);
> +                                   gma_head, gma_top, shadow_ring_buffer_va);
>               if (ret < 0) {
>                       gvt_vgpu_err("fail to copy guest ring buffer\n");
>                       return ret;
>               }
> -             cs += ret / sizeof(u32);
> +             shadow_ring_buffer_va += ret;
>               gma_head = workload->rb_start;
>       }
>
>       /* copy head or start <-> tail */
> -     ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, cs);
> +     ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail,
> +                             shadow_ring_buffer_va);
>       if (ret < 0) {
>               gvt_vgpu_err("fail to copy guest ring buffer\n");
>               return ret;
>       }
> -     cs += ret / sizeof(u32);
> -     intel_ring_advance(workload->req, cs);
> +
>       return 0;
>  }
>
> @@ -2653,6 +2664,7 @@ int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload)
>               gvt_vgpu_err("scan workload error\n");
>               return ret;
>       }
> +
>       return 0;
>  }

Don't include this not related to this patch.

>
> diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
> index 28a2c7e..4369686 100644
> --- a/drivers/gpu/drm/i915/gvt/execlist.c
> +++ b/drivers/gpu/drm/i915/gvt/execlist.c
> @@ -799,8 +799,18 @@ static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
>
>  void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
>  {
> +     enum intel_engine_id i;
> +     struct intel_engine_cs *engine;
> +
>       clean_workloads(vgpu, ALL_ENGINES);
>       kmem_cache_destroy(vgpu->workloads);
> +
> +     for_each_engine(engine, vgpu->gvt->dev_priv, i) {
> +             kfree(vgpu->reserve_ring_buffer_va[i]);
> +             vgpu->reserve_ring_buffer_va[i] = NULL;
> +             vgpu->reserve_ring_buffer_size[i] = 0;
> +     }
> +
>  }
>
>  int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
> @@ -822,6 +832,17 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
>       if (!vgpu->workloads)
>               return -ENOMEM;
>
> +     /* each ring has a shadow ring buffer until vgpu destroyed */
> +     for_each_engine(engine, vgpu->gvt->dev_priv, i) {
> +             vgpu->reserve_ring_buffer_va[i] =
> +                     kmalloc(RESERVE_RING_BUFFER_SIZE/8, GFP_KERNEL);

just "#define RESERVE_RING_BUFFER_SIZE 512" and use it.


> +             if (!vgpu->reserve_ring_buffer_va[i]) {
> +                     gvt_vgpu_err("fail to alloc reserve ring buffer\n");
> +                     return -ENOMEM;

You need to handle to kfree previous allocated buffer when this happen.

> +             }
> +             vgpu->reserve_ring_buffer_size[i] = RESERVE_RING_BUFFER_SIZE/8;
> +     }
> +
>       return 0;
>  }
>
> diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
> index 7eced40..c9307e0 100644
> --- a/drivers/gpu/drm/i915/gvt/execlist.h
> +++ b/drivers/gpu/drm/i915/gvt/execlist.h
> @@ -35,6 +35,7 @@
>  #ifndef _GVT_EXECLIST_H_
>  #define _GVT_EXECLIST_H_
>
> +#define RESERVE_RING_BUFFER_SIZE             (1 * PAGE_SIZE)

ditto

>  struct execlist_ctx_descriptor_format {
>       union {
>               u32 udw;
> diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
> index 0f85fff..d2aa7f9 100644
> --- a/drivers/gpu/drm/i915/gvt/gvt.h
> +++ b/drivers/gpu/drm/i915/gvt/gvt.h
> @@ -167,6 +167,9 @@ struct intel_vgpu {
>       atomic_t running_workload_num;
>       DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
>       struct i915_gem_context *shadow_ctx;
> +     /* 4 pages for each reserve_ring_buffer_va */
> +     void *reserve_ring_buffer_va[I915_NUM_ENGINES];
> +     int reserve_ring_buffer_size[I915_NUM_ENGINES];
>
>  #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
>       struct {
> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
> index bd59c6d..88ef48a 100644
> --- a/drivers/gpu/drm/i915/gvt/scheduler.c
> +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
> @@ -184,6 +184,37 @@ static int shadow_context_status_change(struct notifier_block *nb,
>       return NOTIFY_OK;
>  }
>
> +static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
> +{
> +     struct intel_vgpu *vgpu = workload->vgpu;
> +     int ring_id = workload->ring_id;
> +     struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
> +     void *shadow_ring_buffer_va;
> +     u32 *cs;
> +
> +     /* allocate shadow ring buffer */
> +     cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
> +     if (IS_ERR(cs)) {
> +             gvt_vgpu_err("fail to alloc size =%ld shadow  ring buffer\n",
> +                     workload->rb_len);
> +             return PTR_ERR(cs);
> +     }
> +
> +     shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
> +
> +     /* get shadow ring buffer va */
> +     workload->shadow_ring_buffer_va = cs;
> +
> +     memcpy(workload->shadow_ring_buffer_va, shadow_ring_buffer_va,
> +                     workload->rb_len);

Turn these lines just to

        memcpy(cs, workload->shadow_ring_buffer_va, workload->rb_len);

> +
> +     cs += workload->rb_len / sizeof(u32);
> +     intel_ring_advance(workload->req, cs);
> +
> +     return 0;
> +}
> +
> +
>  /**
>   * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
>   * shadow it as well, include ringbuffer,wa_ctx and ctx.
> @@ -197,8 +228,10 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
>       int ring_id = workload->ring_id;
>       struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
>       struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
> +     struct intel_engine_cs *engine = dev_priv->engine[ring_id];
>       struct drm_i915_gem_request *rq;
>       struct intel_vgpu *vgpu = workload->vgpu;
> +     struct intel_ring *ring;
>       int ret;
>
>       if (workload->shadowed)
> @@ -208,17 +241,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
>       shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
>                                   GEN8_CTX_ADDRESSING_MODE_SHIFT;
>
> -     rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
> -     if (IS_ERR(rq)) {
> -             gvt_vgpu_err("fail to allocate gem request\n");
> -             ret = PTR_ERR(rq);
> -             goto out;
> -     }
> -
> -     gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
> -
> -     workload->req = i915_gem_request_get(rq);
> -
>       ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
>       if (ret)
>               goto out;
> @@ -230,10 +252,36 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
>                       goto out;
>       }
>
> +     /* pin shadow context by gvt even the shadow context will be pinned
> +      * when i915 alloc request. That is because gvt will update the guest
> +      * context from shadow context when workload is completed, and at that
> +      * moment, i915 may already unpined the shadow context to make the
> +      * shadow_ctx pages invalid. So gvt need to pin itself. After update
> +      * the guest context, gvt can unpin the shadow_ctx safely.
> +      */
> +     ring = engine->context_pin(engine, shadow_ctx);
> +     if (IS_ERR(ring)) {
> +             ret = PTR_ERR(ring);
> +             gvt_vgpu_err("fail to pin shadow context\n");
> +             goto out;
> +     }
> +

Why move this extra pin here? This is to get extra reference for gvt
when complete workload for guest ctx update, not need to change that.

>       ret = populate_shadow_context(workload);
>       if (ret)
>               goto out;
>
> +     rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
> +     if (IS_ERR(rq)) {
> +             gvt_vgpu_err("fail to allocate gem request\n");
> +             ret = PTR_ERR(rq);
> +             goto out;
> +     }
> +
> +     gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
> +
> +     workload->req = i915_gem_request_get(rq);

blank line here.

> +     copy_workload_to_ring_buffer(workload);
> +
>       workload->shadowed = true;
>
>  out:
> @@ -243,11 +291,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
>  static int dispatch_workload(struct intel_vgpu_workload *workload)
>  {
>       int ring_id = workload->ring_id;
> -     struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
>       struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
> -     struct intel_engine_cs *engine = dev_priv->engine[ring_id];
> -     struct intel_vgpu *vgpu = workload->vgpu;
> -     struct intel_ring *ring;
>       int ret = 0;
>
>       gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
> @@ -265,20 +309,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
>                       goto out;
>       }
>
> -     /* pin shadow context by gvt even the shadow context will be pinned
> -      * when i915 alloc request. That is because gvt will update the guest
> -      * context from shadow context when workload is completed, and at that
> -      * moment, i915 may already unpined the shadow context to make the
> -      * shadow_ctx pages invalid. So gvt need to pin itself. After update
> -      * the guest context, gvt can unpin the shadow_ctx safely.
> -      */
> -     ring = engine->context_pin(engine, shadow_ctx);
> -     if (IS_ERR(ring)) {
> -             ret = PTR_ERR(ring);
> -             gvt_vgpu_err("fail to pin shadow context\n");
> -             goto out;
> -     }
> -

ditto

>  out:
>       if (ret)
>               workload->status = ret;
> --
> 2.7.4
>
> _______________________________________________
> intel-gvt-dev mailing list
> intel-gvt-dev at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev

--
Open Source Technology Center, Intel ltd.

$gpg --keyserver wwwkeys.pgp.net --recv-keys 4D781827


More information about the intel-gvt-dev mailing list