[PATCH 3/3] drm/i915/gvt: Use sched_lock to protect gvt scheduler logic.
Colin Xu
colin.xu at intel.com
Sat Mar 31 00:57:00 UTC 2018
From: Pei Zhang <pei.zhang at intel.com>
The scheduler lock(gvt->sched_lock) is used to protect gvt
scheduler logic, including the gvt scheduler structure(gvt->scheduler
and per vgpu schedule data(vgpu->sched_data, vgpu->sched_ctl).
v4:
- Rebase to latest gvt-staging.
v3: update to latest code base
Signed-off-by: Pei Zhang <pei.zhang at intel.com>
Signed-off-by: Colin Xu <colin.xu at intel.com>
---
drivers/gpu/drm/i915/gvt/gvt.c | 1 +
drivers/gpu/drm/i915/gvt/gvt.h | 7 ++++++
drivers/gpu/drm/i915/gvt/sched_policy.c | 41 ++++++++++++++++++++++++++++-----
drivers/gpu/drm/i915/gvt/scheduler.c | 8 +++----
4 files changed, 47 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 129a7795cb1f..eeac956339e0 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -377,6 +377,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
spin_lock_init(&gvt->scheduler.mmio_context_lock);
mutex_init(&gvt->lock);
mutex_init(&gvt->gtt_lock);
+ mutex_init(&gvt->sched_lock);
gvt->dev_priv = dev_priv;
init_device_info(gvt);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 89daf0737317..580edd1069e0 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -178,6 +178,11 @@ struct intel_vgpu {
bool pv_notified;
bool failsafe;
unsigned int resetting_eng;
+
+ /* Both sched_data and sched_ctl can be seen a part of the global gvt
+ * scheduler structure. So below 2 vgpu data are protected
+ * by sched_lock, not vgpu_lock.
+ */
void *sched_data;
struct vgpu_sched_ctl sched_ctl;
@@ -301,6 +306,8 @@ struct intel_gvt {
*/
struct mutex lock;
struct mutex gtt_lock;
+ /* scheduler scope lock, protect gvt and vgpu schedule related data */
+ struct mutex sched_lock;
struct drm_i915_private *dev_priv;
struct idr vgpu_idr; /* vGPU IDR pool */
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 75b7bc7b344c..c5dfd4b30485 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -228,8 +228,9 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
static uint64_t timer_check;
- mutex_lock(&gvt->lock);
-
+ /* Don't use external lock to protect service_request.
+ * Instead, always use atomic bit ops on it.
+ */
if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
(void *)&gvt->service_request)) {
if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS))
@@ -237,9 +238,9 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
}
clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request);
+ mutex_lock(&gvt->sched_lock);
tbs_sched_func(sched_data);
-
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&gvt->sched_lock);
}
static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
@@ -354,39 +355,65 @@ static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
{
+ int ret;
+
+ mutex_lock(&gvt->sched_lock);
gvt->scheduler.sched_ops = &tbs_schedule_ops;
+ ret = gvt->scheduler.sched_ops->init(gvt);
+ mutex_unlock(&gvt->sched_lock);
- return gvt->scheduler.sched_ops->init(gvt);
+ return ret;
}
void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
{
+ mutex_lock(&gvt->sched_lock);
gvt->scheduler.sched_ops->clean(gvt);
+ mutex_unlock(&gvt->sched_lock);
}
+/* for per-vgpu scheduler policy, there are 2 per-vgpu data:
+ * sched_data, and sched_ctl. We see these 2 data as part of
+ * the global scheduler which are proteced by gvt->sched_lock.
+ * Caller should make their decision if the vgpu_lock should
+ * be hold outside.
+ */
+
int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
{
- return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
+ int ret;
+
+ mutex_lock(&vgpu->gvt->sched_lock);
+ ret = vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
+ mutex_unlock(&vgpu->gvt->sched_lock);
+
+ return ret;
}
void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
{
+ mutex_lock(&vgpu->gvt->sched_lock);
vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
+ mutex_unlock(&vgpu->gvt->sched_lock);
}
void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
{
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
+ mutex_lock(&vgpu->gvt->sched_lock);
if (!vgpu_data->active) {
gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
}
+ mutex_unlock(&vgpu->gvt->sched_lock);
}
void intel_gvt_kick_schedule(struct intel_gvt *gvt)
{
+ mutex_lock(&gvt->sched_lock);
intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
+ mutex_unlock(&gvt->sched_lock);
}
void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
@@ -401,6 +428,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
+ mutex_lock(&vgpu->gvt->sched_lock);
scheduler->sched_ops->stop_schedule(vgpu);
if (scheduler->next_vgpu == vgpu)
@@ -420,4 +448,5 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
+ mutex_unlock(&vgpu->gvt->sched_lock);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 348464badac0..4c5de77f8c70 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -693,7 +693,7 @@ static struct intel_vgpu_workload *pick_next_workload(
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
- mutex_lock(&gvt->lock);
+ mutex_lock(&gvt->sched_lock);
/*
* no current vgpu / will be scheduled out / no workload
@@ -739,7 +739,7 @@ static struct intel_vgpu_workload *pick_next_workload(
atomic_inc(&workload->vgpu->submission.running_workload_num);
out:
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&gvt->sched_lock);
return workload;
}
@@ -840,8 +840,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
struct intel_vgpu_submission *s = &vgpu->submission;
int event;
- mutex_lock(&gvt->lock);
mutex_lock(&vgpu->vgpu_lock);
+ mutex_lock(&gvt->sched_lock);
/* For the workload w/ request, needs to wait for the context
* switch to make sure request is completed.
@@ -919,8 +919,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
if (gvt->scheduler.need_reschedule)
intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
+ mutex_unlock(&gvt->sched_lock);
mutex_unlock(&vgpu->vgpu_lock);
- mutex_unlock(&gvt->lock);
}
struct workload_thread_param {
--
2.16.3
More information about the intel-gvt-dev
mailing list