[PATCH 1/3] drm/i915/gvt: Separate cmd scan from request allocation
fred gao
fred.gao at intel.com
Wed Jul 19 06:46:28 UTC 2017
Currently i915 request structure and shadow ring buffer are allocated
before command scan, so it will have to restore to previous states once
any error happens afterwards in the long dispatch_workload path.
This patch is to introduce a reserved ring buffer created at the beginning
of vGPU initialization. Workload will be coped to this reserved buffer and
be scanned first, the i915 request and shadow ring buffer are only
allocated after the scan is successful.
To balance the memory usage and buffer alloc time, dynamically ring buffer
is also introduced for any guest ring buffer with size > 1 pages.
Signed-off-by: fred gao <fred.gao at intel.com>
---
drivers/gpu/drm/i915/gvt/cmd_parser.c | 38 ++++++++++----
drivers/gpu/drm/i915/gvt/execlist.c | 21 ++++++++
drivers/gpu/drm/i915/gvt/execlist.h | 1 +
drivers/gpu/drm/i915/gvt/gvt.h | 4 ++
drivers/gpu/drm/i915/gvt/scheduler.c | 95 ++++++++++++++++++++++++-----------
5 files changed, 119 insertions(+), 40 deletions(-)
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 4ddae9b..157a718 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2529,6 +2529,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
s.ring_head = gma_head;
s.ring_tail = gma_tail;
+
s.rb_va = workload->shadow_ring_buffer_va;
s.workload = workload;
@@ -2593,7 +2594,8 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
- u32 *cs;
+ void *shadow_ring_buffer_va;
+ int ring_id = workload->ring_id;
int ret;
guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
@@ -2606,34 +2608,47 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_tail = workload->rb_start + workload->rb_tail;
gma_top = workload->rb_start + guest_rb_size;
- /* allocate shadow ring buffer */
- cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
- if (IS_ERR(cs))
- return PTR_ERR(cs);
+ /* dynamically allocated ring buffer once buffer size > 32k */
+ if (workload->rb_len > RESERVE_RING_BUFFER_SIZE) {
+ vgpu->dynamic_ring_buffer_va[ring_id] =
+ (void *)__get_free_pages(GFP_KERNEL |
+ __GFP_ZERO, get_order(workload->rb_len));
+ gvt_vgpu_err("re to alloc size %ld dynamic ring buffer\n",
+ workload->rb_len);
+
+ if (!vgpu->dynamic_ring_buffer_va[ring_id]) {
+ gvt_vgpu_err("fail to alloc size %ld dynamic ring buffer\n",
+ workload->rb_len);
+ return -ENOMEM;
+ }
+ shadow_ring_buffer_va = vgpu->dynamic_ring_buffer_va[ring_id];
+ } else
+ shadow_ring_buffer_va = vgpu->reserve_ring_buffer_va[ring_id];
+
/* get shadow ring buffer va */
- workload->shadow_ring_buffer_va = cs;
+ workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
/* head > tail --> copy head <-> top */
if (gma_head > gma_tail) {
ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
- gma_head, gma_top, cs);
+ gma_head, gma_top, shadow_ring_buffer_va);
if (ret < 0) {
gvt_vgpu_err("fail to copy guest ring buffer\n");
return ret;
}
- cs += ret / sizeof(u32);
+ shadow_ring_buffer_va += ret;
gma_head = workload->rb_start;
}
/* copy head or start <-> tail */
- ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, cs);
+ ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail,
+ shadow_ring_buffer_va);
if (ret < 0) {
gvt_vgpu_err("fail to copy guest ring buffer\n");
return ret;
}
- cs += ret / sizeof(u32);
- intel_ring_advance(workload->req, cs);
+
return 0;
}
@@ -2653,6 +2668,7 @@ int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload)
gvt_vgpu_err("scan workload error\n");
return ret;
}
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 28a2c7e..95f1607 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -799,8 +799,18 @@ static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
{
+ enum intel_engine_id i;
+ struct intel_engine_cs *engine;
+
clean_workloads(vgpu, ALL_ENGINES);
kmem_cache_destroy(vgpu->workloads);
+
+ for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ free_pages((unsigned long)vgpu->reserve_ring_buffer_va[i],
+ get_order(RESERVE_RING_BUFFER_SIZE));
+ vgpu->reserve_ring_buffer_va[i] = NULL;
+ }
+
}
int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
@@ -822,6 +832,17 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
if (!vgpu->workloads)
return -ENOMEM;
+ /* each ring has a shadow ring buffer until vgpu destroyed */
+ for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ vgpu->reserve_ring_buffer_va[i] =
+ (void *)__get_free_pages(GFP_KERNEL |
+ __GFP_ZERO, get_order(RESERVE_RING_BUFFER_SIZE));
+ if (!vgpu->reserve_ring_buffer_va[i]) {
+ gvt_vgpu_err("fail to alloc reserve ring buffer\n");
+ return -ENOMEM;
+ }
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index 7eced40..c9307e0 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -35,6 +35,7 @@
#ifndef _GVT_EXECLIST_H_
#define _GVT_EXECLIST_H_
+#define RESERVE_RING_BUFFER_SIZE (1 * PAGE_SIZE)
struct execlist_ctx_descriptor_format {
union {
u32 udw;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 0f85fff..4afe080 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -167,6 +167,10 @@ struct intel_vgpu {
atomic_t running_workload_num;
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
struct i915_gem_context *shadow_ctx;
+ /* 4 pages for each reserve_ring_buffer_va */
+ void *reserve_ring_buffer_va[I915_NUM_ENGINES];
+ /* dynamically allocated once ring buffer size > 1 pages */
+ void *dynamic_ring_buffer_va[I915_NUM_ENGINES];
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
struct {
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index bd59c6d..6d05ad4 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -184,6 +184,44 @@ static int shadow_context_status_change(struct notifier_block *nb,
return NOTIFY_OK;
}
+static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ int ring_id = workload->ring_id;
+ struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
+ void *shadow_ring_buffer_va;
+ u32 *cs;
+
+ /* allocate shadow ring buffer */
+ cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
+ if (IS_ERR(cs)) {
+ gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
+ workload->rb_len);
+ return PTR_ERR(cs);
+ }
+
+ shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
+
+ /* get shadow ring buffer va */
+ workload->shadow_ring_buffer_va = cs;
+
+ memcpy(workload->shadow_ring_buffer_va, shadow_ring_buffer_va,
+ workload->rb_len);
+
+ cs += workload->rb_len / sizeof(u32);
+ intel_ring_advance(workload->req, cs);
+
+ /* free the dynamical rinb buffer every time to save the memory */
+ if (workload->rb_len > RESERVE_RING_BUFFER_SIZE) {
+ free_pages((unsigned long)vgpu->dynamic_ring_buffer_va[ring_id],
+ get_order(workload->rb_len));
+ vgpu->dynamic_ring_buffer_va[workload->ring_id] = NULL;
+ }
+
+ return 0;
+}
+
+
/**
* intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
* shadow it as well, include ringbuffer,wa_ctx and ctx.
@@ -197,8 +235,10 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_ring *ring;
int ret;
if (workload->shadowed)
@@ -208,17 +248,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
- rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
- if (IS_ERR(rq)) {
- gvt_vgpu_err("fail to allocate gem request\n");
- ret = PTR_ERR(rq);
- goto out;
- }
-
- gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
-
- workload->req = i915_gem_request_get(rq);
-
ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
if (ret)
goto out;
@@ -230,10 +259,36 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
goto out;
}
+ /* pin shadow context by gvt even the shadow context will be pinned
+ * when i915 alloc request. That is because gvt will update the guest
+ * context from shadow context when workload is completed, and at that
+ * moment, i915 may already unpined the shadow context to make the
+ * shadow_ctx pages invalid. So gvt need to pin itself. After update
+ * the guest context, gvt can unpin the shadow_ctx safely.
+ */
+ ring = engine->context_pin(engine, shadow_ctx);
+ if (IS_ERR(ring)) {
+ ret = PTR_ERR(ring);
+ gvt_vgpu_err("fail to pin shadow context\n");
+ goto out;
+ }
+
ret = populate_shadow_context(workload);
if (ret)
goto out;
+ rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
+ if (IS_ERR(rq)) {
+ gvt_vgpu_err("fail to allocate gem request\n");
+ ret = PTR_ERR(rq);
+ goto out;
+ }
+
+ gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
+
+ workload->req = i915_gem_request_get(rq);
+ copy_workload_to_ring_buffer(workload);
+
workload->shadowed = true;
out:
@@ -243,11 +298,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
static int dispatch_workload(struct intel_vgpu_workload *workload)
{
int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
- struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_ring *ring;
int ret = 0;
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
@@ -265,20 +316,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
goto out;
}
- /* pin shadow context by gvt even the shadow context will be pinned
- * when i915 alloc request. That is because gvt will update the guest
- * context from shadow context when workload is completed, and at that
- * moment, i915 may already unpined the shadow context to make the
- * shadow_ctx pages invalid. So gvt need to pin itself. After update
- * the guest context, gvt can unpin the shadow_ctx safely.
- */
- ring = engine->context_pin(engine, shadow_ctx);
- if (IS_ERR(ring)) {
- ret = PTR_ERR(ring);
- gvt_vgpu_err("fail to pin shadow context\n");
- goto out;
- }
-
out:
if (ret)
workload->status = ret;
--
2.7.4
More information about the intel-gvt-dev
mailing list