[PATCH 2/2] drm/i915/gvt: optimization for save-inhibit context

Yan Zhao yan.y.zhao at intel.com
Mon Jun 3 04:57:57 UTC 2019


sometimes, linux guest would send preempt contexts which are both
restore inhibit context and save inhibit context.
E.g. glmark2 sends 30 preempt contexts every sec on average.

For this kind of context,
1. no need to load mmios in save-restore list back to hardware
2. no need to copy context mmios back to guest

Cc: Weinan Li <weinan.z.li at intel.com>
Signed-off-by: Yan Zhao <yan.y.zhao at intel.com>
---
 drivers/gpu/drm/i915/gvt/mmio_context.c |  8 ++++
 drivers/gpu/drm/i915/gvt/mmio_context.h |  5 +++
 drivers/gpu/drm/i915/gvt/scheduler.c    | 49 ++++++++++++++-----------
 3 files changed, 41 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 0c3e2f21e28c..9b9630f4297b 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -445,6 +445,14 @@ bool is_restore_inhibit_context(struct intel_context *ce)
 	return IS_RESTORE_INHIBIT(reg_state[CTX_CONTEXT_CONTROL_VAL]);
 }
 
+bool is_save_inhibit_context(struct intel_context *ce)
+{
+	const u32 *reg_state = ce->lrc_reg_state;
+
+	return IS_SAVE_INHIBIT(reg_state[CTX_CONTEXT_CONTROL_VAL]);
+}
+
+
 /* Switch ring mmio values (context). */
 static void switch_mmio(struct intel_vgpu *pre,
 			struct intel_vgpu *next,
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index 08e3a775fae7..daffd2d3d023 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -50,6 +50,7 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
 void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
 
 bool is_restore_inhibit_context(struct intel_context *ce);
+bool is_save_inhibit_context(struct intel_context *ce);
 
 int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
 				       struct i915_request *req);
@@ -57,4 +58,8 @@ int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
 	(_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) == \
 	((a) & _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)))
 
+#define IS_SAVE_INHIBIT(a)	\
+	(_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT) == \
+	((a) & _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)))
+
 #endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 5c2087600442..9fb69bf37bc1 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -313,7 +313,9 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
 	u32 *cs;
 	int err;
 
-	if (IS_GEN(req->i915, 9) && is_restore_inhibit_context(req->hw_context))
+	if (IS_GEN(req->i915, 9) &&
+			is_restore_inhibit_context(req->hw_context) &&
+			!is_save_inhibit_context(req->hw_context))
 		intel_vgpu_restore_inhibit_context(vgpu, req);
 
 	/*
@@ -804,6 +806,31 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
 	gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
 		      workload->ctx_desc.lrca);
 
+	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+	shadow_ring_context = kmap(page);
+	if (IS_SAVE_INHIBIT(shadow_ring_context->ctx_ctrl.val)) {
+		kunmap(page);
+		return;
+	}
+
+#define COPY_REG(name) \
+	intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
+		RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
+
+	COPY_REG(ctx_ctrl);
+	COPY_REG(ctx_timestamp);
+
+#undef COPY_REG
+
+	intel_gvt_hypervisor_write_gpa(vgpu,
+			workload->ring_context_gpa +
+			sizeof(*shadow_ring_context),
+			(void *)shadow_ring_context +
+			sizeof(*shadow_ring_context),
+			I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
+
+	kunmap(page);
+
 	context_page_num = rq->engine->context_size;
 	context_page_num = context_page_num >> PAGE_SHIFT;
 
@@ -832,26 +859,6 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
 	intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
 		RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
 
-	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
-	shadow_ring_context = kmap(page);
-
-#define COPY_REG(name) \
-	intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
-		RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
-
-	COPY_REG(ctx_ctrl);
-	COPY_REG(ctx_timestamp);
-
-#undef COPY_REG
-
-	intel_gvt_hypervisor_write_gpa(vgpu,
-			workload->ring_context_gpa +
-			sizeof(*shadow_ring_context),
-			(void *)shadow_ring_context +
-			sizeof(*shadow_ring_context),
-			I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
-
-	kunmap(page);
 }
 
 void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
-- 
2.17.1



More information about the intel-gvt-dev mailing list