[PATCH] drm/i915/gvt: invalidate PPGTT mm of vGPU in all engines reset
Weinan Li
weinan.z.li at intel.com
Fri Sep 22 04:50:53 UTC 2017
invalidate all the PPGTT mm of vgpu when all engines reset triggered. VM may
reuse these memorys for other purpose but without destroy notification
after reset. Let GVT-g recreate automatically when used.
Signed-off-by: Weinan Li <weinan.z.li at intel.com>
Cc: Zhi Wang <zhi.a.wang at intel.com>
---
drivers/gpu/drm/i915/gvt/gtt.c | 17 +++++++++++++++--
drivers/gpu/drm/i915/gvt/gtt.h | 1 +
drivers/gpu/drm/i915/gvt/scheduler.c | 7 ++++++-
drivers/gpu/drm/i915/gvt/vgpu.c | 2 +-
4 files changed, 23 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 73746b2..aefacb5 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2038,6 +2038,21 @@ static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type)
}
}
+void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
+{
+ struct list_head *pos, *n;
+ struct intel_vgpu_mm *mm;
+
+ list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
+ mm = container_of(pos, struct intel_vgpu_mm, list);
+ if (mm->type == INTEL_GVT_MM_PPGTT) {
+ list_del_init(&mm->lru_list);
+ if (mm->has_shadow_page_table && mm->shadowed)
+ invalidate_mm(mm);
+ }
+ }
+}
+
/**
* intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
* @vgpu: a vGPU
@@ -2340,8 +2355,6 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
{
int i;
- ppgtt_free_all_shadow_page(vgpu);
-
/* Shadow pages are only created when there is no page
* table tracking data, so remove page tracking data after
* removing the shadow pages.
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 159dce8..cb12a56 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -209,6 +209,7 @@ struct intel_vgpu_gtt {
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu);
+void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu);
extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 1cdbe92..a61dd92 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -112,9 +112,14 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
COPY_REG(rcs_indirect_ctx_offset);
}
#undef COPY_REG
-
+ /*
+ * pin/unpin the shadow mm before using to ensure it has been
+ * shadowed.
+ */
+ intel_vgpu_pin_mm(workload->shadow_mm);
set_context_pdp_root_pointer(shadow_ring_context,
workload->shadow_mm->shadow_page_table);
+ intel_vgpu_unpin_mm(workload->shadow_mm);
intel_gvt_hypervisor_read_gpa(vgpu,
workload->ring_context_gpa +
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index fe65fff..2ed8aaf 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -506,7 +506,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
/* full GPU reset or device model level reset */
if (engine_mask == ALL_ENGINES || dmlr) {
-
+ intel_vgpu_invalidate_ppgtt(vgpu);
/*fence will not be reset during virtual reset */
if (dmlr) {
intel_vgpu_reset_gtt(vgpu);
--
1.9.1
More information about the intel-gvt-dev
mailing list