[PATCH v2 4/9] drm/i915/gvt: add some statistic routine for scheduler
Ping Gao
ping.a.gao at intel.com
Tue Feb 14 04:25:50 UTC 2017
Add some statistic routine to collect the time when vGPU
schedule in/out and the time of the last ctx submission.
Signed-off-by: Ping Gao <ping.a.gao at intel.com>
---
drivers/gpu/drm/i915/gvt/gvt.h | 7 ++++++
drivers/gpu/drm/i915/gvt/handlers.c | 1 +
drivers/gpu/drm/i915/gvt/sched_policy.c | 43 +++++++++++++++++++++++----------
3 files changed, 38 insertions(+), 13 deletions(-)
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 78cbf3c..8c50230 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -138,6 +138,12 @@ struct intel_vgpu_display {
struct intel_vgpu_sbi sbi;
};
+struct intel_sched_ctl {
+ int cap;
+ int weight;
+ int priority;
+};
+
struct intel_vgpu {
struct intel_gvt *gvt;
int id;
@@ -158,6 +164,7 @@ struct intel_vgpu {
struct list_head workload_q_head[I915_NUM_ENGINES];
struct kmem_cache *workloads;
atomic_t running_workload_num;
+ cycles_t last_ctx_submit_time;
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
struct i915_gem_context *shadow_ctx;
struct notifier_block shadow_ctx_notifier_block;
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index ab2ea15..70a54c4 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1300,6 +1300,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
if (execlist->elsp_dwords.index == 3) {
+ vgpu->last_ctx_submit_time = get_cycles();
ret = intel_vgpu_submit_execlist(vgpu, ring_id);
if(ret)
gvt_err("fail submit workload on ring %d\n", ring_id);
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index ff6e104..6c53bf0 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -47,11 +47,33 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
return false;
}
+struct tbs_vgpu_data {
+ struct list_head list;
+ struct intel_vgpu *vgpu;
+ /* put some per-vgpu sched stats here */
+ int64_t sched_in_time;
+ int64_t sched_out_time;
+ int64_t sched_time;
+ int64_t ts_usage;
+ int64_t ts_alloc;
+
+ struct intel_sched_ctl sched_ctl;
+};
+
+struct tbs_sched_data {
+ struct intel_gvt *gvt;
+ struct hrtimer timer;
+ unsigned long period;
+ struct list_head runq_head;
+};
+
static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
enum intel_engine_id i;
struct intel_engine_cs *engine;
+ struct tbs_vgpu_data *vgpu_data;
+ cycles_t cur_cycles;
/* no target to schedule */
if (!scheduler->next_vgpu)
@@ -77,6 +99,14 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
gvt_dbg_sched("switch to next vgpu %d\n",
scheduler->next_vgpu->id);
+ cur_cycles = get_cycles();
+ if (scheduler->current_vgpu) {
+ vgpu_data = scheduler->current_vgpu->sched_data;
+ vgpu_data->sched_out_time = cur_cycles;
+ }
+ vgpu_data = scheduler->next_vgpu->sched_data;
+ vgpu_data->sched_in_time = cur_cycles;
+
/* switch current vgpu */
scheduler->current_vgpu = scheduler->next_vgpu;
scheduler->next_vgpu = NULL;
@@ -88,19 +118,6 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
wake_up(&scheduler->waitq[i]);
}
-struct tbs_vgpu_data {
- struct list_head list;
- struct intel_vgpu *vgpu;
- /* put some per-vgpu sched stats here */
-};
-
-struct tbs_sched_data {
- struct intel_gvt *gvt;
- struct hrtimer timer;
- unsigned long period;
- struct list_head runq_head;
-};
-
#define GVT_DEFAULT_TIME_SLICE 1000000
static void tbs_sched_func(struct tbs_sched_data *sched_data)
--
2.7.4
More information about the intel-gvt-dev
mailing list