[PATCH 1/8] drm/i915/gvt: Factor out vGPU workload creation/destroy
Zhi Wang
zhi.a.wang at intel.com
Thu Sep 21 17:52:48 UTC 2017
Factor out vGPU workload creation/destroy functions since they are not
specific to execlist emulation.
Signed-off-by: Zhi Wang <zhi.a.wang at intel.com>
---
drivers/gpu/drm/i915/gvt/execlist.c | 27 +++++--------------
drivers/gpu/drm/i915/gvt/scheduler.c | 51 ++++++++++++++++++++++++++++++++++++
drivers/gpu/drm/i915/gvt/scheduler.h | 5 ++++
3 files changed, 62 insertions(+), 21 deletions(-)
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 7cf013a..70eb5e6 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -358,12 +358,6 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
return 0;
}
-static void free_workload(struct intel_vgpu_workload *workload)
-{
- intel_gvt_mm_unreference(workload->shadow_mm);
- kmem_cache_free(workload->vgpu->submission.workloads, workload);
-}
-
#define get_desc_from_elsp_dwords(ed, i) \
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
@@ -586,7 +580,7 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
out:
intel_vgpu_unpin_mm(workload->shadow_mm);
- free_workload(workload);
+ intel_vgpu_destroy_workload(workload);
return ret;
}
@@ -687,10 +681,6 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
- workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
- if (!workload)
- return -ENOMEM;
-
/* record some ring buffer register values for scan and shadow */
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rb_start.val), &start, 4);
@@ -699,13 +689,10 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
- INIT_LIST_HEAD(&workload->list);
- INIT_LIST_HEAD(&workload->shadow_bb);
-
- init_waitqueue_head(&workload->shadow_ctx_status_wq);
- atomic_set(&workload->shadow_ctx_active, 0);
+ workload = intel_vgpu_create_workload(vgpu);
+ if (IS_ERR(workload))
+ return PTR_ERR(workload);
- workload->vgpu = vgpu;
workload->ring_id = ring_id;
workload->ctx_desc = *desc;
workload->ring_context_gpa = ring_context_gpa;
@@ -715,9 +702,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
workload->rb_ctl = ctl;
workload->prepare = prepare_execlist_workload;
workload->complete = complete_execlist_workload;
- workload->status = -EINPROGRESS;
workload->emulate_schedule_in = emulate_schedule_in;
- workload->shadowed = false;
if (ring_id == RCS) {
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
@@ -765,7 +750,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
if (ret == 0)
queue_workload(workload);
else {
- free_workload(workload);
+ intel_vgpu_destroy_workload(workload);
if (vgpu_is_vm_unhealthy(ret)) {
intel_vgpu_clean_execlist(vgpu);
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
@@ -854,7 +839,7 @@ static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
list_for_each_entry_safe(pos, n,
&s->workload_q_head[engine->id], list) {
list_del_init(&pos->list);
- free_workload(pos);
+ intel_vgpu_destroy_workload(pos);
}
clear_bit(engine->id, s->shadow_ctx_desc_updated);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 6c8a676..30771ab 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -782,3 +782,54 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
i915_gem_context_put(s->shadow_ctx);
return ret;
}
+
+/**
+ * intel_vgpu_destroy_workload - destroy a vGPU workload
+ * @vgpu: a vGPU
+ *
+ * This function is called when destroy a vGPU workload.
+ *
+ */
+void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu_submission *s = &workload->vgpu->submission;
+
+ if (workload->shadow_mm)
+ intel_gvt_mm_unreference(workload->shadow_mm);
+
+ kmem_cache_free(s->workloads, workload);
+}
+
+/**
+ * intel_vgpu_create_workload - create a vGPU workload
+ * @vgpu: a vGPU
+ *
+ * This function is called when creating a vGPU workload.
+ *
+ * Returns:
+ * struct intel_vgpu_workload * on success, negative error code in
+ * pointer if failed.
+ *
+ */
+struct intel_vgpu_workload *
+intel_vgpu_create_workload(struct intel_vgpu *vgpu)
+{
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct intel_vgpu_workload *workload;
+
+ workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
+ if (!workload)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&workload->list);
+ INIT_LIST_HEAD(&workload->shadow_bb);
+
+ init_waitqueue_head(&workload->shadow_ctx_status_wq);
+ atomic_set(&workload->shadow_ctx_active, 0);
+
+ workload->status = -EINPROGRESS;
+ workload->shadowed = false;
+ workload->vgpu = vgpu;
+
+ return workload;
+}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index c6e47ef..9435d7e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -140,5 +140,10 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu);
void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
+struct intel_vgpu_workload *
+intel_vgpu_create_workload(struct intel_vgpu *vgpu);
+
+void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
+
void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
#endif
--
2.7.4
More information about the intel-gvt-dev
mailing list