[PATCH v2 18/22] drm/i915/gvt: Add 2M huge gtt support

changbin.du at intel.com changbin.du at intel.com
Wed Dec 20 09:39:23 UTC 2017


From: Changbin Du <changbin.du at intel.com>

This add 2M huge gtt support for GVTg. Don't like 64K entry, we can
shadow 2M guest entry with real huge gtt. But before that, we have to
check memory physical continuous, alignment and if it is supported on
the host. We can get all supported page sizes from
intel_device_info.page_sizes.

We check the guest physical memory continuity by comparing the PFH
of neighboring gtt mappings.

Finally we must split the 2M page into smaller pages if we cannot
satisfy guest Huge Page.

Signed-off-by: Changbin Du <changbin.du at intel.com>
---
 drivers/gpu/drm/i915/gvt/gtt.c | 106 +++++++++++++++++++++++++++++++++++++++--
 1 file changed, 103 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 2064b31..70601f3 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -887,6 +887,11 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn(
 		if (!ppgtt_get_shadow_entry(spt, e, i) && \
 		    spt->vgpu->gvt->gtt.pte_ops->test_present(e))
 
+#define for_each_shadow_entry(spt, e, i) \
+	for (i = 0; i < pt_entries(spt); \
+	     i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \
+		if (!ppgtt_get_shadow_entry(spt, e, i))
+
 static void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
 {
 	int v = atomic_read(&spt->refcount);
@@ -952,8 +957,10 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
 			WARN(1, "suspicious 64K gtt entry\n");
 			continue;
 		case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
+			gvt_vdbg_mm("invalidate 2M entry\n");
+			continue;
 		case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
-			WARN(1, "GVT doesn't support 64K/2M/1GB page\n");
+			WARN(1, "GVT doesn't support 1GB page\n");
 			continue;
 		case GTT_TYPE_PPGTT_PML4_ENTRY:
 		case GTT_TYPE_PPGTT_PDP_ENTRY:
@@ -1053,6 +1060,31 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
 	ops->set_pfn(se, s->shadow_page.mfn);
 }
 
+static int check_guest_pmem_continuity(struct intel_vgpu *vgpu,
+	unsigned long start_gfn, int nr_pages)
+{
+
+	unsigned long mfn, last_mfn = INTEL_GVT_INVALID_ADDR;
+	int i;
+
+	for (i = 0; i < nr_pages; i++) {
+		mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, start_gfn + i);
+		if (mfn == INTEL_GVT_INVALID_ADDR)
+			return -ENXIO;
+
+		/* Check alignment */
+		if (i == 0 && (mfn & (nr_pages - 1)))
+			return 0;
+
+		/* Check continuity */
+		if (last_mfn + 1 != mfn)
+			return 0;
+
+		last_mfn = mfn;
+	}
+	return 1;
+}
+
 /**
  * Return 1 if huge gtt shadowing is possilbe, 0 if miscondition,
  * negtive if found err.
@@ -1060,6 +1092,10 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
 static int is_huge_gtt_possible(struct intel_vgpu *vgpu,
 	struct intel_gvt_gtt_entry *entry)
 {
+	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+	unsigned long page_size;
+	int nr_pages;
+
 	switch (entry->type) {
 	case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
 		/**
@@ -1068,14 +1104,65 @@ static int is_huge_gtt_possible(struct intel_vgpu *vgpu,
 		 */
 		return 0;
 	case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
-		/* Will add later. */
+		nr_pages = GEN8_PTES;
+		page_size = I915_GTT_PAGE_SIZE_2M;
+		break;
 	default:
 		return 0;
 	}
 
+	if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, page_size))
+		return 0;
+
+	return check_guest_pmem_continuity(vgpu, ops->get_pfn(entry), nr_pages);
+}
+
+static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
+	struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
+	struct intel_gvt_gtt_entry *se)
+{
+	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+	struct intel_vgpu_ppgtt_spt *sub_spt;
+	struct intel_gvt_gtt_entry sub_se;
+	unsigned long start_gfn, mfn;
+	unsigned long sub_index;
+
+	gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index);
+
+	start_gfn = ops->get_pfn(se);
+
+	sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT);
+	if (IS_ERR(sub_spt))
+		return PTR_ERR(sub_spt);
+
+	for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
+		mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu,
+					start_gfn + sub_index);
+		if (mfn == INTEL_GVT_INVALID_ADDR) {
+			ppgtt_free_spt(spt);
+			return -ENXIO;
+		}
+		sub_se.val64 = se->val64;
+
+		/* Copy the PAT field from PDE. */
+		sub_se.val64 &= ~_PAGE_PAT;
+		sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5;
+
+		ops->set_pfn(&sub_se, mfn);
+		ppgtt_set_shadow_entry(sub_spt, &sub_se, sub_index);
+	}
+
+	/* Clear diry field. */
+	se->val64 &= ~_PAGE_DIRTY;
+
+	ops->clear_pse(se);
+	ops->clear_ips(se);
+	ops->set_pfn(se, sub_spt->shadow_page.mfn);
+	ppgtt_set_shadow_entry(spt, se, index);
 	return 0;
 }
 
+
 static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
 	struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
 	struct intel_gvt_gtt_entry *se)
@@ -1132,8 +1219,15 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
 			return -EINVAL;
 		break;
 	case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
+		gvt_vdbg_mm("shadow 2M gtt entry\n");
+		ret = is_huge_gtt_possible(vgpu, ge);
+		if (ret == 0)
+			return split_2MB_gtt_entry(vgpu, spt, index, &se);
+		else if (ret < 0)
+			return -EINVAL;
+		break;
 	case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
-		gvt_vgpu_err("GVT doesn't support 64K/2M/1GB entry\n");
+		gvt_vgpu_err("GVT doesn't support 1GB entry\n");
 		return -EINVAL;
 	default:
 		GEM_BUG_ON(1);
@@ -1472,6 +1566,12 @@ static int ppgtt_handle_guest_write_page_table(
 					vgpu->gtt.scratch_pt[type].page_mfn);
 				ppgtt_set_shadow_entry(spt, &old_se, index + i);
 			}
+		} else if (old_se.type == GTT_TYPE_PPGTT_PTE_2M_ENTRY ||
+			   old_se.type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
+			ops->clear_pse(&old_se);
+			ops->set_pfn(&old_se,
+				     vgpu->gtt.scratch_pt[type].page_mfn);
+			ppgtt_set_shadow_entry(spt, &old_se, index);
 		} else {
 			ops->set_pfn(&old_se,
 				     vgpu->gtt.scratch_pt[type].page_mfn);
-- 
2.7.4



More information about the intel-gvt-dev mailing list