[PATCH v3][Security Check] drm/i915/gvt: remap gtt ptes space as uncached in gvt

Weinan Li weinan.z.li at intel.com
Wed Nov 1 05:37:14 UTC 2017


To avoid doing flush every PTE entry update from vGPU, remapp the gtt space
as uncached, save time cost of GTT update from vgpu.

v2,v3 : add clean up of uc_gsm

Signed-off-by: Weinan Li <weinan.z.li at intel.com>
Cc: Zhi Wang <zhi.a.wang at intel.com>
---
 drivers/gpu/drm/i915/gvt/gtt.c | 69 +++++++++++++++++++++++++++---------------
 drivers/gpu/drm/i915/gvt/gtt.h |  2 ++
 2 files changed, 46 insertions(+), 25 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 6fa9271..64e5096 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -257,24 +257,17 @@ static inline int get_pse_type(int type)
 	return gtt_type_table[type].pse_entry_type;
 }
 
-static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
+static u64 read_pte64(struct intel_gvt *gvt, unsigned long index)
 {
-	void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
+	void __iomem *addr = (gen8_pte_t __iomem *)gvt->gtt.uc_gsm + index;
 
 	return readq(addr);
 }
 
-static void gtt_invalidate(struct drm_i915_private *dev_priv)
-{
-	mmio_hw_access_pre(dev_priv);
-	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
-	mmio_hw_access_post(dev_priv);
-}
-
-static void write_pte64(struct drm_i915_private *dev_priv,
+static void write_pte64(struct intel_gvt *gvt,
 		unsigned long index, u64 pte)
 {
-	void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
+	void __iomem *addr = (gen8_pte_t __iomem *)gvt->gtt.uc_gsm + index;
 
 	writeq(pte, addr);
 }
@@ -297,7 +290,7 @@ static inline int gtt_get_entry64(void *pt,
 		if (WARN_ON(ret))
 			return ret;
 	} else if (!pt) {
-		e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
+		e->val64 = read_pte64(vgpu->gvt, index);
 	} else {
 		e->val64 = *((u64 *)pt + index);
 	}
@@ -322,7 +315,7 @@ static inline int gtt_set_entry64(void *pt,
 		if (WARN_ON(ret))
 			return ret;
 	} else if (!pt) {
-		write_pte64(vgpu->gvt->dev_priv, index, e->val64);
+		write_pte64(vgpu->gvt, index, e->val64);
 	} else {
 		*((u64 *)pt + index) = e->val64;
 	}
@@ -1975,7 +1968,6 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
 	}
 
 	ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
-	gtt_invalidate(gvt->dev_priv);
 	ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
 	return 0;
 }
@@ -2324,11 +2316,29 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
 {
 	int ret;
 	void *page;
-	struct device *dev = &gvt->dev_priv->drm.pdev->dev;
+	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct device *dev = &pdev->dev;
 	dma_addr_t daddr;
+	phys_addr_t phys_addr;
+	unsigned int size;
 
 	gvt_dbg_core("init gtt\n");
 
+	/*
+	 * Remap the PTEs space as uncached which will be used in GVT-g
+	 * host, then host don't need to do TLB flush every PTE entry
+	 * update, it can save time in vGPU reset process.
+	 */
+	phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
+	size = (dev_priv->ggtt.base.total >> PAGE_SHIFT) * sizeof(gen8_pte_t);
+	gvt->gtt.uc_gsm = ioremap_nocache(phys_addr, size);
+	if (!gvt->gtt.uc_gsm) {
+		gvt_err("fail to remap ggtt page table\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+
 	if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
 		|| IS_KABYLAKE(gvt->dev_priv)) {
 		gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
@@ -2336,21 +2346,23 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
 		gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table;
 		gvt->gtt.mm_free_page_table = gen8_mm_free_page_table;
 	} else {
-		return -ENODEV;
+		ret = -ENODEV;
+		goto err_iounmap;
 	}
 
 	page = (void *)get_zeroed_page(GFP_KERNEL);
 	if (!page) {
 		gvt_err("fail to allocate scratch ggtt page\n");
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto err_iounmap;
 	}
 
 	daddr = dma_map_page(dev, virt_to_page(page), 0,
 			4096, PCI_DMA_BIDIRECTIONAL);
 	if (dma_mapping_error(dev, daddr)) {
 		gvt_err("fail to dmamap scratch ggtt page\n");
-		__free_page(virt_to_page(page));
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto err_free_page;
 	}
 
 	gvt->gtt.scratch_page = virt_to_page(page);
@@ -2360,13 +2372,21 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
 		ret = setup_spt_oos(gvt);
 		if (ret) {
 			gvt_err("fail to initialize SPT oos\n");
-			dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
-			__free_page(gvt->gtt.scratch_page);
-			return ret;
+			goto err_dma_unmap;
 		}
 	}
 	INIT_LIST_HEAD(&gvt->gtt.mm_lru_list_head);
 	return 0;
+
+err_dma_unmap:
+	dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+err_free_page:
+	__free_page(virt_to_page(page));
+err_iounmap:
+	iounmap(gvt->gtt.uc_gsm);
+err:
+	gvt_err("init gtt fail with err:%d\n", ret);
+	return ret;
 }
 
 /**
@@ -2389,6 +2409,8 @@ void intel_gvt_clean_gtt(struct intel_gvt *gvt)
 
 	if (enable_out_of_sync)
 		clean_spt_oos(gvt);
+
+	iounmap(gvt->gtt.uc_gsm);
 }
 
 /**
@@ -2402,7 +2424,6 @@ void intel_gvt_clean_gtt(struct intel_gvt *gvt)
 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
 {
 	struct intel_gvt *gvt = vgpu->gvt;
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
 	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
 	u32 index;
 	u32 offset;
@@ -2423,8 +2444,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
 	num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
 	for (offset = 0; offset < num_entries; offset++)
 		ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
-
-	gtt_invalidate(dev_priv);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 416b2f8..985271e 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -88,6 +88,8 @@ struct intel_gvt_gtt {
 
 	struct page *scratch_page;
 	unsigned long scratch_mfn;
+
+	void __iomem *uc_gsm;
 };
 
 enum {
-- 
1.9.1



More information about the intel-gvt-dev mailing list