[PATCH v2 2/2] drm/i915/gvt: only reset execlist state of one engine during VM engine reset
Weinan Li
weinan.z.li at intel.com
Thu Jan 25 03:27:59 UTC 2018
Only reset vgpu execlist state of the exact engine which gets reset
request from VM. After read context status from HWSP enabled, KMD will use
the saved CSB read pointer but not always read from MMIO. When one engine
reset happen, only the read pointer of this engine will be reset, in GVT-g
host side also need to align with this policy, otherwise VM may get wrong
CSB status after one engine reset compeleted.
v2: Split refine and fix patch, code refine(Zhenyu)
Cc: Fred Gao <fred.gao at intel.com>
Cc: Zhi Wang <zhi.a.wang at intel.com>
Cc: Zhenyu Wang <zhenyuw at linux.intel.com>
Signed-off-by: Weinan Li <weinan.z.li at intel.com>
---
drivers/gpu/drm/i915/gvt/handlers.c | 11 ++++-------
drivers/gpu/drm/i915/gvt/sched_policy.c | 11 ++++++++++-
drivers/gpu/drm/i915/gvt/sched_policy.h | 2 ++
drivers/gpu/drm/i915/gvt/scheduler.c | 26 +++++++++++++-------------
4 files changed, 29 insertions(+), 21 deletions(-)
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index befda75..3a0eee8 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1494,7 +1494,6 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- struct intel_vgpu_submission *s = &vgpu->submission;
u32 data = *(u32 *)p_data;
int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
bool enable_execlist;
@@ -1523,16 +1522,14 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
if (!enable_execlist)
return 0;
- if (s->active)
- return 0;
-
ret = intel_vgpu_select_submission_ops(vgpu,
- ALL_ENGINES,
- INTEL_VGPU_EXECLIST_SUBMISSION);
+ ENGINE_MASK(ring_id),
+ INTEL_VGPU_EXECLIST_SUBMISSION);
if (ret)
return ret;
- intel_vgpu_start_schedule(vgpu);
+ if (!intel_gvt_sched_is_active(vgpu))
+ intel_vgpu_start_schedule(vgpu);
}
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index d031f64..c7b5645 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -50,6 +50,7 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
struct vgpu_sched_data {
struct list_head lru_list;
struct intel_vgpu *vgpu;
+ bool active;
ktime_t sched_in_time;
ktime_t sched_out_time;
@@ -332,6 +333,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
if (!hrtimer_active(&sched_data->timer))
hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
sched_data->period), HRTIMER_MODE_ABS);
+ vgpu_data->active = true;
}
static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
@@ -339,6 +341,7 @@ static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
list_del_init(&vgpu_data->lru_list);
+ vgpu_data->active = false;
}
static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
@@ -375,7 +378,6 @@ void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
{
gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
-
vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
}
@@ -412,3 +414,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
}
spin_unlock_bh(&scheduler->mmio_context_lock);
}
+
+bool intel_gvt_sched_is_active(struct intel_vgpu *vgpu)
+{
+ struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
+
+ return vgpu_data->active;
+}
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.h b/drivers/gpu/drm/i915/gvt/sched_policy.h
index 7b59e3e..cb4bf02 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.h
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.h
@@ -59,4 +59,6 @@ struct intel_gvt_sched_policy_ops {
void intel_gvt_kick_schedule(struct intel_gvt *gvt);
+bool intel_gvt_sched_is_active(struct intel_vgpu *vgpu);
+
#endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index f0997a2..12a65c9 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1092,17 +1092,17 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
if (WARN_ON(interface >= ARRAY_SIZE(ops)))
return -EINVAL;
- if (s->active) {
+ if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES))
+ return -EINVAL;
+
+ if (s->active)
s->ops->clean(vgpu, engine_mask);
- s->active = false;
- gvt_dbg_core("vgpu%d: de-select ops [ %s ] \n",
- vgpu->id, s->ops->name);
- }
if (interface == 0) {
s->ops = NULL;
s->virtual_submission_interface = 0;
- gvt_dbg_core("vgpu%d: no submission ops\n", vgpu->id);
+ s->active = false;
+ gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id);
return 0;
}
@@ -1110,13 +1110,13 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
if (ret)
return ret;
- s->ops = ops[interface];
- s->virtual_submission_interface = interface;
- s->active = true;
-
- gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
- vgpu->id, s->ops->name);
-
+ if (!s->active) {
+ s->ops = ops[interface];
+ s->virtual_submission_interface = interface;
+ s->active = true;
+ gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
+ vgpu->id, s->ops->name);
+ }
return 0;
}
--
1.9.1
More information about the intel-gvt-dev
mailing list