[PATCH v2 01/22] drm/i915/gvt: Rework shadow graphic memory management code

changbin.du at intel.com changbin.du at intel.com
Wed Dec 20 09:39:06 UTC 2017


From: Changbin Du <changbin.du at intel.com>

This is a big one and the GVT shadow graphic memory management code is
heavily refined. The new code is more straightforward with less code.

The struct intel_vgpu_mm is restructured to be clearly defined, use
accurate names and some of the original fields are removed which are
really redundant.

Now we only manage ppgtt mm object with mm->ppgtt_mm.lru_list. No need
to mix ppgtt and ggtt together, since one vGPU only has one ggtt object.

The reference count of mm obj is unified and define clear interface.

Some GEM assertions are added to verify our code. The can prevent bugs
in developing stage.

And also there are some other refines, but they're hard to split because
I need ensure each patch is compileable at least.

Signed-off-by: Changbin Du <changbin.du at intel.com>
---
 drivers/gpu/drm/i915/gvt/gtt.c       | 626 +++++++++++++++++------------------
 drivers/gpu/drm/i915/gvt/gtt.h       | 113 +++----
 drivers/gpu/drm/i915/gvt/handlers.c  |  15 +-
 drivers/gpu/drm/i915/gvt/mmio.c      |   5 +-
 drivers/gpu/drm/i915/gvt/scheduler.c |  39 ++-
 drivers/gpu/drm/i915/gvt/trace.h     |   8 +-
 6 files changed, 379 insertions(+), 427 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 71a0f2b..003c5e8 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -473,32 +473,88 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
 /*
  * MM helpers.
  */
-int intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
-		void *page_table, struct intel_gvt_gtt_entry *e,
-		unsigned long index)
+static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
+		struct intel_gvt_gtt_entry *entry, unsigned long index,
+		bool guest)
 {
-	struct intel_gvt *gvt = mm->vgpu->gvt;
-	struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
-	int ret;
+	struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
 
-	e->type = mm->page_table_entry_type;
+	GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT);
 
-	ret = ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
-	if (ret)
-		return ret;
+	entry->type = mm->ppgtt_mm.root_entry_type;
+	pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
+			   mm->ppgtt_mm.shadow_pdps,
+			   entry, index, false, 0, mm->vgpu);
 
-	ops->test_pse(e);
-	return 0;
+	pte_ops->test_pse(entry);
 }
 
-int intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
-		void *page_table, struct intel_gvt_gtt_entry *e,
-		unsigned long index)
+static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
+		struct intel_gvt_gtt_entry *entry, unsigned long index)
 {
-	struct intel_gvt *gvt = mm->vgpu->gvt;
-	struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+	_ppgtt_get_root_entry(mm, entry, index, true);
+}
+
+static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm,
+		struct intel_gvt_gtt_entry *entry, unsigned long index)
+{
+	_ppgtt_get_root_entry(mm, entry, index, false);
+}
+
+static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
+		struct intel_gvt_gtt_entry *entry, unsigned long index,
+		bool guest)
+{
+	struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
+
+	pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps :
+			   mm->ppgtt_mm.shadow_pdps,
+			   entry, index, false, 0, mm->vgpu);
+}
+
+static inline void ppgtt_set_guest_root_entry(struct intel_vgpu_mm *mm,
+		struct intel_gvt_gtt_entry *entry, unsigned long index)
+{
+	_ppgtt_set_root_entry(mm, entry, index, true);
+}
+
+static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
+		struct intel_gvt_gtt_entry *entry, unsigned long index)
+{
+	_ppgtt_set_root_entry(mm, entry, index, false);
+}
 
-	return ops->set_entry(page_table, e, index, false, 0, mm->vgpu);
+static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm,
+		struct intel_gvt_gtt_entry *entry, unsigned long index)
+{
+	struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
+
+	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
+
+	entry->type = GTT_TYPE_GGTT_PTE;
+	pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
+			   false, 0, mm->vgpu);
+}
+
+static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
+		struct intel_gvt_gtt_entry *entry, unsigned long index)
+{
+	struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
+
+	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
+
+	pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
+			   false, 0, mm->vgpu);
+}
+
+static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
+		struct intel_gvt_gtt_entry *entry, unsigned long index)
+{
+	struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
+
+	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
+
+	pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
 }
 
 /*
@@ -510,8 +566,7 @@ static inline int ppgtt_spt_get_entry(
 		struct intel_gvt_gtt_entry *e, unsigned long index,
 		bool guest)
 {
-	struct intel_gvt *gvt = spt->vgpu->gvt;
-	struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+	struct intel_gvt_gtt_pte_ops *ops = spt->vgpu->gvt->gtt.pte_ops;
 	int ret;
 
 	e->type = get_entry_type(type);
@@ -535,8 +590,7 @@ static inline int ppgtt_spt_set_entry(
 		struct intel_gvt_gtt_entry *e, unsigned long index,
 		bool guest)
 {
-	struct intel_gvt *gvt = spt->vgpu->gvt;
-	struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+	struct intel_gvt_gtt_pte_ops *ops = spt->vgpu->gvt->gtt.pte_ops;
 
 	if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
 		return -EINVAL;
@@ -782,7 +836,7 @@ static int ppgtt_write_protection_handler(void *data, u64 pa,
 	return ret;
 }
 
-static int reclaim_one_mm(struct intel_gvt *gvt);
+static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
 
 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page(
 		struct intel_vgpu *vgpu, int type, unsigned long gfn)
@@ -793,7 +847,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page(
 retry:
 	spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
 	if (!spt) {
-		if (reclaim_one_mm(vgpu->gvt))
+		if (reclaim_one_ppgtt_mm(vgpu->gvt))
 			goto retry;
 
 		gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
@@ -1439,111 +1493,37 @@ static int ppgtt_handle_guest_write_page_table_bytes(
 	return 0;
 }
 
-/*
- * mm page table allocation policy for bdw+
- *  - for ggtt, only virtual page table will be allocated.
- *  - for ppgtt, dedicated virtual/shadow page table will be allocated.
- */
-static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
-{
-	struct intel_vgpu *vgpu = mm->vgpu;
-	struct intel_gvt *gvt = vgpu->gvt;
-	const struct intel_gvt_device_info *info = &gvt->device_info;
-	void *mem;
-
-	if (mm->type == INTEL_GVT_MM_PPGTT) {
-		mm->page_table_entry_cnt = 4;
-		mm->page_table_entry_size = mm->page_table_entry_cnt *
-			info->gtt_entry_size;
-		mem = kzalloc(mm->has_shadow_page_table ?
-			mm->page_table_entry_size * 2
-				: mm->page_table_entry_size, GFP_KERNEL);
-		if (!mem)
-			return -ENOMEM;
-		mm->virtual_page_table = mem;
-		if (!mm->has_shadow_page_table)
-			return 0;
-		mm->shadow_page_table = mem + mm->page_table_entry_size;
-	} else if (mm->type == INTEL_GVT_MM_GGTT) {
-		mm->page_table_entry_cnt =
-			(gvt_ggtt_gm_sz(gvt) >> I915_GTT_PAGE_SHIFT);
-		mm->page_table_entry_size = mm->page_table_entry_cnt *
-			info->gtt_entry_size;
-		mem = vzalloc(mm->page_table_entry_size);
-		if (!mem)
-			return -ENOMEM;
-		mm->virtual_page_table = mem;
-	}
-	return 0;
-}
-
-static void gen8_mm_free_page_table(struct intel_vgpu_mm *mm)
-{
-	if (mm->type == INTEL_GVT_MM_PPGTT) {
-		kfree(mm->virtual_page_table);
-	} else if (mm->type == INTEL_GVT_MM_GGTT) {
-		if (mm->virtual_page_table)
-			vfree(mm->virtual_page_table);
-	}
-	mm->virtual_page_table = mm->shadow_page_table = NULL;
-}
-
-static void invalidate_mm(struct intel_vgpu_mm *mm)
+static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm)
 {
 	struct intel_vgpu *vgpu = mm->vgpu;
 	struct intel_gvt *gvt = vgpu->gvt;
 	struct intel_gvt_gtt *gtt = &gvt->gtt;
 	struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
 	struct intel_gvt_gtt_entry se;
-	int i;
+	int index;
 
-	if (WARN_ON(!mm->has_shadow_page_table || !mm->shadowed))
+	if (!mm->ppgtt_mm.shadowed)
 		return;
 
-	for (i = 0; i < mm->page_table_entry_cnt; i++) {
-		ppgtt_get_shadow_root_entry(mm, &se, i);
+	for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) {
+		ppgtt_get_shadow_root_entry(mm, &se, index);
+
 		if (!ops->test_present(&se))
 			continue;
-		ppgtt_invalidate_shadow_page_by_shadow_entry(
-				vgpu, &se);
+
+		ppgtt_invalidate_shadow_page_by_shadow_entry(vgpu, &se);
 		se.val64 = 0;
-		ppgtt_set_shadow_root_entry(mm, &se, i);
+		ppgtt_set_shadow_root_entry(mm, &se, index);
 
 		trace_gpt_change(vgpu->id, "destroy root pointer",
-				NULL, se.type, se.val64, i);
+				 NULL, se.type, se.val64, index);
 	}
-	mm->shadowed = false;
-}
-
-/**
- * intel_vgpu_destroy_mm - destroy a mm object
- * @mm: a kref object
- *
- * This function is used to destroy a mm object for vGPU
- *
- */
-void intel_vgpu_destroy_mm(struct kref *mm_ref)
-{
-	struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
-	struct intel_vgpu *vgpu = mm->vgpu;
-	struct intel_gvt *gvt = vgpu->gvt;
-	struct intel_gvt_gtt *gtt = &gvt->gtt;
-
-	if (!mm->initialized)
-		goto out;
 
-	list_del(&mm->list);
-	list_del(&mm->lru_list);
-
-	if (mm->has_shadow_page_table)
-		invalidate_mm(mm);
-
-	gtt->mm_free_page_table(mm);
-out:
-	kfree(mm);
+	mm->ppgtt_mm.shadowed = false;
 }
 
-static int shadow_mm(struct intel_vgpu_mm *mm)
+
+static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
 {
 	struct intel_vgpu *vgpu = mm->vgpu;
 	struct intel_gvt *gvt = vgpu->gvt;
@@ -1551,21 +1531,21 @@ static int shadow_mm(struct intel_vgpu_mm *mm)
 	struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
 	struct intel_vgpu_ppgtt_spt *spt;
 	struct intel_gvt_gtt_entry ge, se;
-	int i;
-	int ret;
+	int index, ret;
 
-	if (WARN_ON(!mm->has_shadow_page_table || mm->shadowed))
+	if (mm->ppgtt_mm.shadowed)
 		return 0;
 
-	mm->shadowed = true;
+	mm->ppgtt_mm.shadowed = true;
+
+	for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) {
+		ppgtt_get_guest_root_entry(mm, &ge, index);
 
-	for (i = 0; i < mm->page_table_entry_cnt; i++) {
-		ppgtt_get_guest_root_entry(mm, &ge, i);
 		if (!ops->test_present(&ge))
 			continue;
 
 		trace_gpt_change(vgpu->id, __func__, NULL,
-				ge.type, ge.val64, i);
+				ge.type, ge.val64, index);
 
 		spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
 		if (IS_ERR(spt)) {
@@ -1574,96 +1554,128 @@ static int shadow_mm(struct intel_vgpu_mm *mm)
 			goto fail;
 		}
 		ppgtt_generate_shadow_entry(&se, spt, &ge);
-		ppgtt_set_shadow_root_entry(mm, &se, i);
+		ppgtt_set_shadow_root_entry(mm, &se, index);
 
 		trace_gpt_change(vgpu->id, "populate root pointer",
-				NULL, se.type, se.val64, i);
+				NULL, se.type, se.val64, index);
 	}
+
 	return 0;
 fail:
-	invalidate_mm(mm);
+	invalidate_ppgtt_mm(mm);
 	return ret;
 }
 
+static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu)
+{
+	struct intel_vgpu_mm *mm;
+
+	mm = kzalloc(sizeof(*mm), GFP_KERNEL);
+	if (!mm)
+		return NULL;
+
+	mm->vgpu = vgpu;
+	kref_init(&mm->ref);
+	atomic_set(&mm->pincount, 0);
+
+	return mm;
+}
+
+static void vgpu_free_mm(struct intel_vgpu_mm *mm)
+{
+	kfree(mm);
+}
+
 /**
- * intel_vgpu_create_mm - create a mm object for a vGPU
+ * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU
  * @vgpu: a vGPU
- * @mm_type: mm object type, should be PPGTT or GGTT
- * @virtual_page_table: page table root pointers. Could be NULL if user wants
- *	to populate shadow later.
- * @page_table_level: describe the page table level of the mm object
- * @pde_base_index: pde root pointer base in GGTT MMIO.
+ * @root_entry_type: ppgtt root entry type
+ * @pdps: guest pdps.
  *
- * This function is used to create a mm object for a vGPU.
+ * This function is used to create a ppgtt mm object for a vGPU.
  *
  * Returns:
  * Zero on success, negative error code in pointer if failed.
  */
-struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
-		int mm_type, void *virtual_page_table, int page_table_level,
-		u32 pde_base_index)
+struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
+		intel_gvt_gtt_type_t root_entry_type, u64 pdps[GEN8_3LVL_PDPES])
 {
 	struct intel_gvt *gvt = vgpu->gvt;
-	struct intel_gvt_gtt *gtt = &gvt->gtt;
 	struct intel_vgpu_mm *mm;
 	int ret;
 
-	mm = kzalloc(sizeof(*mm), GFP_KERNEL);
-	if (!mm) {
-		ret = -ENOMEM;
-		goto fail;
-	}
+	mm = vgpu_alloc_mm(vgpu);
+	if (!mm)
+		return ERR_PTR(-ENOMEM);
 
-	mm->type = mm_type;
+	mm->type = INTEL_GVT_MM_PPGTT;
 
-	if (page_table_level == 1)
-		mm->page_table_entry_type = GTT_TYPE_GGTT_PTE;
-	else if (page_table_level == 3)
-		mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
-	else if (page_table_level == 4)
-		mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
-	else {
-		WARN_ON(1);
-		ret = -EINVAL;
-		goto fail;
-	}
+	GEM_BUG_ON(root_entry_type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY &&
+		   root_entry_type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY);
+	mm->ppgtt_mm.root_entry_type = root_entry_type;
 
-	mm->page_table_level = page_table_level;
-	mm->pde_base_index = pde_base_index;
+	INIT_LIST_HEAD(&mm->ppgtt_mm.list);
+	INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
 
-	mm->vgpu = vgpu;
-	mm->has_shadow_page_table = !!(mm_type == INTEL_GVT_MM_PPGTT);
+	memcpy(mm->ppgtt_mm.guest_pdps, pdps, sizeof(mm->ppgtt_mm.guest_pdps));
 
-	kref_init(&mm->ref);
-	atomic_set(&mm->pincount, 0);
-	INIT_LIST_HEAD(&mm->list);
-	INIT_LIST_HEAD(&mm->lru_list);
-	list_add_tail(&mm->list, &vgpu->gtt.mm_list_head);
-
-	ret = gtt->mm_alloc_page_table(mm);
+	ret = shadow_ppgtt_mm(mm);
 	if (ret) {
-		gvt_vgpu_err("fail to allocate page table for mm\n");
-		goto fail;
+		gvt_vgpu_err("failed to shadow ppgtt mm\n");
+		vgpu_free_mm(mm);
+		return ERR_PTR(ret);
 	}
 
-	mm->initialized = true;
+	list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
+	list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
+	return mm;
+}
+
+static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
+{
+	struct intel_vgpu_mm *mm;
+	unsigned long nr_entries;
+
+	mm = vgpu_alloc_mm(vgpu);
+	if (!mm)
+		return ERR_PTR(-ENOMEM);
 
-	if (virtual_page_table)
-		memcpy(mm->virtual_page_table, virtual_page_table,
-				mm->page_table_entry_size);
+	mm->type = INTEL_GVT_MM_GGTT;
 
-	if (mm->has_shadow_page_table) {
-		ret = shadow_mm(mm);
-		if (ret)
-			goto fail;
-		list_add_tail(&mm->lru_list, &gvt->gtt.mm_lru_list_head);
+	nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT;
+	mm->ggtt_mm.virtual_ggtt = vzalloc(nr_entries *
+					vgpu->gvt->device_info.gtt_entry_size);
+	if (!mm->ggtt_mm.virtual_ggtt) {
+		vgpu_free_mm(mm);
+		return ERR_PTR(-ENOMEM);
 	}
+
 	return mm;
-fail:
-	gvt_vgpu_err("fail to create mm\n");
-	if (mm)
-		intel_gvt_mm_unreference(mm);
-	return ERR_PTR(ret);
+}
+
+/**
+ * _intel_vgpu_mm_release - destroy a mm object
+ * @mm_ref: a kref object
+ *
+ * This function is used to destroy a mm object for vGPU
+ *
+ */
+void _intel_vgpu_mm_release(struct kref *mm_ref)
+{
+	struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
+
+	if (GEM_WARN_ON(atomic_read(&mm->pincount)))
+		gvt_err("vgpu mm pin count bug detected\n");
+
+	if (mm->type == INTEL_GVT_MM_PPGTT) {
+		list_del(&mm->ppgtt_mm.list);
+		list_del(&mm->ppgtt_mm.lru_list);
+		invalidate_ppgtt_mm(mm);
+	} else {
+		vfree(mm->ggtt_mm.virtual_ggtt);
+	}
+
+	vgpu_free_mm(mm);
 }
 
 /**
@@ -1674,9 +1686,6 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
  */
 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
 {
-	if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
-		return;
-
 	atomic_dec(&mm->pincount);
 }
 
@@ -1695,36 +1704,34 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
 {
 	int ret;
 
-	if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
-		return 0;
+	atomic_inc(&mm->pincount);
 
-	if (!mm->shadowed) {
-		ret = shadow_mm(mm);
+	if (mm->type == INTEL_GVT_MM_PPGTT) {
+		ret = shadow_ppgtt_mm(mm);
 		if (ret)
 			return ret;
+
+		list_move_tail(&mm->ppgtt_mm.lru_list,
+			       &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
+
 	}
 
-	atomic_inc(&mm->pincount);
-	list_del_init(&mm->lru_list);
-	list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head);
 	return 0;
 }
 
-static int reclaim_one_mm(struct intel_gvt *gvt)
+static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
 {
 	struct intel_vgpu_mm *mm;
 	struct list_head *pos, *n;
 
-	list_for_each_safe(pos, n, &gvt->gtt.mm_lru_list_head) {
-		mm = container_of(pos, struct intel_vgpu_mm, lru_list);
+	list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
+		mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
 
-		if (mm->type != INTEL_GVT_MM_PPGTT)
-			continue;
 		if (atomic_read(&mm->pincount))
 			continue;
 
-		list_del_init(&mm->lru_list);
-		invalidate_mm(mm);
+		list_del_init(&mm->ppgtt_mm.lru_list);
+		invalidate_ppgtt_mm(mm);
 		return 1;
 	}
 	return 0;
@@ -1740,9 +1747,6 @@ static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
 	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
 	struct intel_vgpu_ppgtt_spt *s;
 
-	if (WARN_ON(!mm->has_shadow_page_table))
-		return -EINVAL;
-
 	s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
 	if (!s)
 		return -ENXIO;
@@ -1774,78 +1778,65 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
 	unsigned long gpa = INTEL_GVT_INVALID_ADDR;
 	unsigned long gma_index[4];
 	struct intel_gvt_gtt_entry e;
-	int i, index;
+	int i, levels = 0;
 	int ret;
 
-	if (mm->type != INTEL_GVT_MM_GGTT && mm->type != INTEL_GVT_MM_PPGTT)
-		return INTEL_GVT_INVALID_ADDR;
+	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT &&
+		   mm->type != INTEL_GVT_MM_PPGTT);
 
 	if (mm->type == INTEL_GVT_MM_GGTT) {
 		if (!vgpu_gmadr_is_valid(vgpu, gma))
 			goto err;
 
-		ret = ggtt_get_guest_entry(mm, &e,
-				gma_ops->gma_to_ggtt_pte_index(gma));
-		if (ret)
-			goto err;
+		ggtt_get_guest_entry(mm, &e,
+			gma_ops->gma_to_ggtt_pte_index(gma));
+
 		gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
 			+ (gma & ~I915_GTT_PAGE_MASK);
 
 		trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
-		return gpa;
-	}
-
-	switch (mm->page_table_level) {
-	case 4:
-		ret = ppgtt_get_shadow_root_entry(mm, &e, 0);
-		if (ret)
-			goto err;
-		gma_index[0] = gma_ops->gma_to_pml4_index(gma);
-		gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
-		gma_index[2] = gma_ops->gma_to_pde_index(gma);
-		gma_index[3] = gma_ops->gma_to_pte_index(gma);
-		index = 4;
-		break;
-	case 3:
-		ret = ppgtt_get_shadow_root_entry(mm, &e,
-				gma_ops->gma_to_l3_pdp_index(gma));
-		if (ret)
-			goto err;
-		gma_index[0] = gma_ops->gma_to_pde_index(gma);
-		gma_index[1] = gma_ops->gma_to_pte_index(gma);
-		index = 2;
-		break;
-	case 2:
-		ret = ppgtt_get_shadow_root_entry(mm, &e,
-				gma_ops->gma_to_pde_index(gma));
-		if (ret)
-			goto err;
-		gma_index[0] = gma_ops->gma_to_pte_index(gma);
-		index = 1;
-		break;
-	default:
-		WARN_ON(1);
-		goto err;
-	}
+	} else {
+		switch (mm->ppgtt_mm.root_entry_type) {
+		case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
+			ppgtt_get_shadow_root_entry(mm, &e, 0);
+
+			gma_index[0] = gma_ops->gma_to_pml4_index(gma);
+			gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
+			gma_index[2] = gma_ops->gma_to_pde_index(gma);
+			gma_index[3] = gma_ops->gma_to_pte_index(gma);
+			levels = 4;
+			break;
+		case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
+			ppgtt_get_shadow_root_entry(mm, &e,
+					gma_ops->gma_to_l3_pdp_index(gma));
+
+			gma_index[0] = gma_ops->gma_to_pde_index(gma);
+			gma_index[1] = gma_ops->gma_to_pte_index(gma);
+			levels = 2;
+			break;
+		default:
+			GEM_BUG_ON(1);
+		}
 
-	/* walk into the shadow page table and get gpa from guest entry */
-	for (i = 0; i < index; i++) {
-		ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
-			(i == index - 1));
-		if (ret)
-			goto err;
+		/* walk the shadow page table and get gpa from guest entry */
+		for (i = 0; i < levels; i++) {
+			ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
+				(i == levels - 1));
+			if (ret)
+				goto err;
 
-		if (!pte_ops->test_present(&e)) {
-			gvt_dbg_core("GMA 0x%lx is not present\n", gma);
-			goto err;
+			if (!pte_ops->test_present(&e)) {
+				gvt_dbg_core("GMA 0x%lx is not present\n", gma);
+				goto err;
+			}
 		}
-	}
 
-	gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
-		+ (gma & ~I915_GTT_PAGE_MASK);
+		gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) +
+					(gma & ~I915_GTT_PAGE_MASK);
+		trace_gma_translate(vgpu->id, "ppgtt", 0,
+				    mm->ppgtt_mm.root_entry_type, gma, gpa);
+	}
 
-	trace_gma_translate(vgpu->id, "ppgtt", 0,
-			mm->page_table_level, gma, gpa);
 	return gpa;
 err:
 	gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
@@ -1936,7 +1927,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
 		ops->set_pfn(&m, gvt->gtt.scratch_mfn);
 	}
 
-	ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
+	ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
 	gtt_invalidate(gvt->dev_priv);
 	ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
 	return 0;
@@ -2081,43 +2072,42 @@ static int create_scratch_page_tree(struct intel_vgpu *vgpu)
 int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
 {
 	struct intel_vgpu_gtt *gtt = &vgpu->gtt;
-	struct intel_vgpu_mm *ggtt_mm;
 
 	hash_init(gtt->tracked_guest_page_hash_table);
 	hash_init(gtt->shadow_page_hash_table);
 
-	INIT_LIST_HEAD(&gtt->mm_list_head);
+	INIT_LIST_HEAD(&gtt->ppgtt_mm_list_head);
 	INIT_LIST_HEAD(&gtt->oos_page_list_head);
 	INIT_LIST_HEAD(&gtt->post_shadow_list_head);
 
-	intel_vgpu_reset_ggtt(vgpu);
-
-	ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
-			NULL, 1, 0);
-	if (IS_ERR(ggtt_mm)) {
+	gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu);
+	if (IS_ERR(gtt->ggtt_mm)) {
 		gvt_vgpu_err("fail to create mm for ggtt.\n");
-		return PTR_ERR(ggtt_mm);
+		return PTR_ERR(gtt->ggtt_mm);
 	}
 
-	gtt->ggtt_mm = ggtt_mm;
+	intel_vgpu_reset_ggtt(vgpu);
 
 	return create_scratch_page_tree(vgpu);
 }
 
-static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type)
+static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
 {
 	struct list_head *pos, *n;
 	struct intel_vgpu_mm *mm;
 
-	list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
-		mm = container_of(pos, struct intel_vgpu_mm, list);
-		if (mm->type == type) {
-			vgpu->gvt->gtt.mm_free_page_table(mm);
-			list_del(&mm->list);
-			list_del(&mm->lru_list);
-			kfree(mm);
-		}
+	list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
+		mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
+		intel_vgpu_destroy_mm(mm);
 	}
+
+	if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
+		gvt_err("vgpu ppgtt mm is not fully destoried\n");
+}
+
+static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
+{
+	intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
 }
 
 /**
@@ -2135,8 +2125,8 @@ void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
 	ppgtt_free_all_shadow_page(vgpu);
 	release_scratch_page_tree(vgpu);
 
-	intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
-	intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_GGTT);
+	intel_vgpu_destroy_all_ppgtt_mm(vgpu);
+	intel_vgpu_destroy_ggtt_mm(vgpu);
 }
 
 static void clean_spt_oos(struct intel_gvt *gvt)
@@ -2198,32 +2188,28 @@ static int setup_spt_oos(struct intel_gvt *gvt)
  * pointer to mm object on success, NULL if failed.
  */
 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
-		int page_table_level, void *root_entry)
+		u64 pdps[GEN8_3LVL_PDPES])
 {
-	struct list_head *pos;
 	struct intel_vgpu_mm *mm;
-	u64 *src, *dst;
-
-	list_for_each(pos, &vgpu->gtt.mm_list_head) {
-		mm = container_of(pos, struct intel_vgpu_mm, list);
-		if (mm->type != INTEL_GVT_MM_PPGTT)
-			continue;
-
-		if (mm->page_table_level != page_table_level)
-			continue;
+	struct list_head *pos;
 
-		src = root_entry;
-		dst = mm->virtual_page_table;
+	list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) {
+		mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
 
-		if (page_table_level == 3) {
-			if (src[0] == dst[0]
-					&& src[1] == dst[1]
-					&& src[2] == dst[2]
-					&& src[3] == dst[3])
+		switch (mm->ppgtt_mm.root_entry_type) {
+		case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
+			if (pdps[0] == mm->ppgtt_mm.guest_pdps[0])
 				return mm;
-		} else {
-			if (src[0] == dst[0])
+			break;
+		case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
+			if (pdps[0] == mm->ppgtt_mm.guest_pdps[0] &&
+			    pdps[1] == mm->ppgtt_mm.guest_pdps[1] &&
+			    pdps[2] == mm->ppgtt_mm.guest_pdps[2] &&
+			    pdps[3] == mm->ppgtt_mm.guest_pdps[3])
 				return mm;
+			break;
+		default:
+			GEM_BUG_ON(1);
 		}
 	}
 	return NULL;
@@ -2233,7 +2219,8 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
  * intel_vgpu_g2v_create_ppgtt_mm - create a PPGTT mm object from
  * g2v notification
  * @vgpu: a vGPU
- * @page_table_level: PPGTT page table level
+ * @root_entry_type: ppgtt root entry type
+ * @pdps: guest pdps
  *
  * This function is used to create a PPGTT mm object from a guest to GVT-g
  * notification.
@@ -2242,20 +2229,15 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
  * Zero on success, negative error code if failed.
  */
 int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
-		int page_table_level)
+		intel_gvt_gtt_type_t root_entry_type, u64 pdps[GEN8_3LVL_PDPES])
 {
-	u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
 	struct intel_vgpu_mm *mm;
 
-	if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
-		return -EINVAL;
-
-	mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
+	mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
 	if (mm) {
-		intel_gvt_mm_reference(mm);
+		intel_vgpu_mm_get(mm);
 	} else {
-		mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
-				pdp, page_table_level, 0);
+		mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps);
 		if (IS_ERR(mm)) {
 			gvt_vgpu_err("fail to create mm\n");
 			return PTR_ERR(mm);
@@ -2268,7 +2250,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
  * intel_vgpu_g2v_destroy_ppgtt_mm - destroy a PPGTT mm object from
  * g2v notification
  * @vgpu: a vGPU
- * @page_table_level: PPGTT page table level
+ * @pdps: guest pdps
  *
  * This function is used to create a PPGTT mm object from a guest to GVT-g
  * notification.
@@ -2277,20 +2259,16 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
  * Zero on success, negative error code if failed.
  */
 int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
-		int page_table_level)
+		u64 pdps[GEN8_3LVL_PDPES])
 {
-	u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
 	struct intel_vgpu_mm *mm;
 
-	if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
-		return -EINVAL;
-
-	mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
+	mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
 	if (!mm) {
 		gvt_vgpu_err("fail to find ppgtt instance.\n");
 		return -EINVAL;
 	}
-	intel_gvt_mm_unreference(mm);
+	intel_vgpu_mm_put(mm);
 	return 0;
 }
 
@@ -2317,8 +2295,6 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
 		|| IS_KABYLAKE(gvt->dev_priv)) {
 		gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
 		gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
-		gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table;
-		gvt->gtt.mm_free_page_table = gen8_mm_free_page_table;
 	} else {
 		return -ENODEV;
 	}
@@ -2349,7 +2325,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
 			return ret;
 		}
 	}
-	INIT_LIST_HEAD(&gvt->gtt.mm_lru_list_head);
+	INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
 	return 0;
 }
 
@@ -2387,26 +2363,23 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
 {
 	struct intel_gvt *gvt = vgpu->gvt;
 	struct drm_i915_private *dev_priv = gvt->dev_priv;
-	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+	struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
+	struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
 	u32 index;
-	u32 offset;
 	u32 num_entries;
-	struct intel_gvt_gtt_entry e;
 
-	memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
-	e.type = GTT_TYPE_GGTT_PTE;
-	ops->set_pfn(&e, gvt->gtt.scratch_mfn);
-	e.val64 |= _PAGE_PRESENT;
+	pte_ops->set_pfn(&entry, gvt->gtt.scratch_mfn);
+	pte_ops->set_present(&entry);
 
 	index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
 	num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
-	for (offset = 0; offset < num_entries; offset++)
-		ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
+	while (num_entries--)
+		ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
 
 	index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
 	num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
-	for (offset = 0; offset < num_entries; offset++)
-		ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
+	while (num_entries--)
+		ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
 
 	gtt_invalidate(dev_priv);
 }
@@ -2427,7 +2400,6 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
 	 * table tracking data, so remove page tracking data after
 	 * removing the shadow pages.
 	 */
-	intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
-
+	intel_vgpu_destroy_all_ppgtt_mm(vgpu);
 	intel_vgpu_reset_ggtt(vgpu);
 }
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index f98c1c1..38f6820 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -84,17 +84,12 @@ struct intel_gvt_gtt {
 	void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
 	struct list_head oos_page_use_list_head;
 	struct list_head oos_page_free_list_head;
-	struct list_head mm_lru_list_head;
+	struct list_head ppgtt_mm_lru_list_head;
 
 	struct page *scratch_page;
 	unsigned long scratch_mfn;
 };
 
-enum {
-	INTEL_GVT_MM_GGTT = 0,
-	INTEL_GVT_MM_PPGTT,
-};
-
 typedef enum {
 	GTT_TYPE_INVALID = -1,
 
@@ -125,66 +120,53 @@ typedef enum {
 	GTT_TYPE_MAX,
 } intel_gvt_gtt_type_t;
 
-struct intel_vgpu_mm {
-	int type;
-	bool initialized;
-	bool shadowed;
-
-	int page_table_entry_type;
-	u32 page_table_entry_size;
-	u32 page_table_entry_cnt;
-	void *virtual_page_table;
-	void *shadow_page_table;
+enum intel_gvt_mm_type {
+	INTEL_GVT_MM_GGTT,
+	INTEL_GVT_MM_PPGTT,
+};
 
-	int page_table_level;
-	bool has_shadow_page_table;
-	u32 pde_base_index;
+struct intel_vgpu_mm {
+	enum intel_gvt_mm_type type;
+	struct intel_vgpu *vgpu;
 
-	struct list_head list;
 	struct kref ref;
 	atomic_t pincount;
-	struct list_head lru_list;
-	struct intel_vgpu *vgpu;
-};
-
-extern int intel_vgpu_mm_get_entry(
-		struct intel_vgpu_mm *mm,
-		void *page_table, struct intel_gvt_gtt_entry *e,
-		unsigned long index);
-
-extern int intel_vgpu_mm_set_entry(
-		struct intel_vgpu_mm *mm,
-		void *page_table, struct intel_gvt_gtt_entry *e,
-		unsigned long index);
-
-#define ggtt_get_guest_entry(mm, e, index) \
-	intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
-
-#define ggtt_set_guest_entry(mm, e, index) \
-	intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
 
-#define ggtt_get_shadow_entry(mm, e, index) \
-	intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
-
-#define ggtt_set_shadow_entry(mm, e, index) \
-	intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
+	union {
+		struct {
+			intel_gvt_gtt_type_t root_entry_type;
+			u64 guest_pdps[GEN8_3LVL_PDPES];
+			u64 shadow_pdps[GEN8_3LVL_PDPES];
+			bool shadowed;
+
+			struct list_head list;
+			struct list_head lru_list;
+		} ppgtt_mm;
+		struct {
+			void *virtual_ggtt;
+		} ggtt_mm;
+	};
+};
 
-#define ppgtt_get_guest_root_entry(mm, e, index) \
-	intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
+struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
+		intel_gvt_gtt_type_t root_entry_type, u64 pdps[]);
 
-#define ppgtt_set_guest_root_entry(mm, e, index) \
-	intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
+static inline void intel_vgpu_mm_get(struct intel_vgpu_mm *mm)
+{
+	kref_get(&mm->ref);
+}
 
-#define ppgtt_get_shadow_root_entry(mm, e, index) \
-	intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
+void _intel_vgpu_mm_release(struct kref *mm_ref);
 
-#define ppgtt_set_shadow_root_entry(mm, e, index) \
-	intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
+static inline void intel_vgpu_mm_put(struct intel_vgpu_mm *mm)
+{
+	kref_put(&mm->ref, _intel_vgpu_mm_release);
+}
 
-extern struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
-		int mm_type, void *virtual_page_table, int page_table_level,
-		u32 pde_base_index);
-extern void intel_vgpu_destroy_mm(struct kref *mm_ref);
+static inline void intel_vgpu_destroy_mm(struct intel_vgpu_mm *mm)
+{
+	intel_vgpu_mm_put(mm);
+}
 
 struct intel_vgpu_guest_page;
 
@@ -196,7 +178,7 @@ struct intel_vgpu_scratch_pt {
 struct intel_vgpu_gtt {
 	struct intel_vgpu_mm *ggtt_mm;
 	unsigned long active_ppgtt_mm_bitmap;
-	struct list_head mm_list_head;
+	struct list_head ppgtt_mm_list_head;
 	DECLARE_HASHTABLE(shadow_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
 	DECLARE_HASHTABLE(tracked_guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
 	atomic_t n_tracked_guest_page;
@@ -276,16 +258,6 @@ int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu);
 
 int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu);
 
-static inline void intel_gvt_mm_reference(struct intel_vgpu_mm *mm)
-{
-	kref_get(&mm->ref);
-}
-
-static inline void intel_gvt_mm_unreference(struct intel_vgpu_mm *mm)
-{
-	kref_put(&mm->ref, intel_vgpu_destroy_mm);
-}
-
 int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm);
 
 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm);
@@ -294,13 +266,12 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm,
 		unsigned long gma);
 
 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
-		int page_table_level, void *root_entry);
+		u64 pdps[]);
 
 int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
-		int page_table_level);
+		intel_gvt_gtt_type_t root_entry_type, u64 pdps[]);
 
-int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
-		int page_table_level);
+int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]);
 
 int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
 	unsigned int off, void *p_data, unsigned int bytes);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index c982867..31536ce 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1139,20 +1139,27 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
 
 static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
 {
+	u64 *pdps;
 	int ret = 0;
 
+	pdps = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
+
 	switch (notification) {
 	case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
-		ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 3);
+		ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu,
+				GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
+				pdps);
 		break;
 	case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
-		ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 3);
+		ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, pdps);
 		break;
 	case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
-		ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 4);
+		ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu,
+				GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
+				pdps);
 		break;
 	case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY:
-		ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 4);
+		ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, pdps);
 		break;
 	case VGT_G2V_EXECLIST_CONTEXT_CREATE:
 	case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index f7227a3..c344184 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -108,10 +108,9 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
 		else
 			intel_vgpu_default_mmio_write(vgpu, offset, p_data,
 					bytes);
-	} else if (reg_is_gtt(gvt, offset) &&
-			vgpu->gtt.ggtt_mm->virtual_page_table) {
+	} else if (reg_is_gtt(gvt, offset)) {
 		offset -= gvt->device_info.gtt_start_offset;
-		pt = vgpu->gtt.ggtt_mm->virtual_page_table + offset;
+		pt = vgpu->gtt.ggtt_mm->ggtt_mm.virtual_ggtt + offset;
 		if (read)
 			memcpy(p_data, pt, bytes);
 		else
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 0056638..23c33e6 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -43,13 +43,13 @@
 
 static void set_context_pdp_root_pointer(
 		struct execlist_ring_context *ring_context,
-		u32 pdp[8])
+		u64 pdps[GEN8_3LVL_PDPES])
 {
 	struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
 	int i;
 
 	for (i = 0; i < 8; i++)
-		pdp_pair[i].val = pdp[7 - i];
+		pdp_pair[i].val = ((u32 *)pdps)[7 - i];
 }
 
 static int populate_shadow_context(struct intel_vgpu_workload *workload)
@@ -113,7 +113,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 #undef COPY_REG
 
 	set_context_pdp_root_pointer(shadow_ring_context,
-				     workload->shadow_mm->shadow_page_table);
+				     workload->shadow_mm->ppgtt_mm.shadow_pdps);
 
 	intel_gvt_hypervisor_read_gpa(vgpu,
 			workload->ring_context_gpa +
@@ -1131,7 +1131,7 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
 	struct intel_vgpu_submission *s = &workload->vgpu->submission;
 
 	if (workload->shadow_mm)
-		intel_gvt_mm_unreference(workload->shadow_mm);
+		intel_vgpu_mm_put(workload->shadow_mm);
 
 	kmem_cache_free(s->workloads, workload);
 }
@@ -1163,7 +1163,7 @@ alloc_workload(struct intel_vgpu *vgpu)
 	offsetof(struct execlist_ring_context, x)
 
 static void read_guest_pdps(struct intel_vgpu *vgpu,
-		u64 ring_context_gpa, u32 pdp[8])
+		u64 ring_context_gpa, u64 pdps[GEN8_3LVL_PDPES])
 {
 	u64 gpa;
 	int i;
@@ -1172,7 +1172,7 @@ static void read_guest_pdps(struct intel_vgpu *vgpu,
 
 	for (i = 0; i < 8; i++)
 		intel_gvt_hypervisor_read_gpa(vgpu,
-				gpa + i * 8, &pdp[7 - i], 4);
+				gpa + i * 8, &((u32 *)pdps)[7 - i], 4);
 }
 
 static int prepare_mm(struct intel_vgpu_workload *workload)
@@ -1180,27 +1180,30 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
 	struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
 	struct intel_vgpu_mm *mm;
 	struct intel_vgpu *vgpu = workload->vgpu;
-	int page_table_level;
-	u32 pdp[8];
+	intel_gvt_gtt_type_t root_entry_type;
+	u64 pdps[GEN8_3LVL_PDPES];
 
-	if (desc->addressing_mode == 1) { /* legacy 32-bit */
-		page_table_level = 3;
-	} else if (desc->addressing_mode == 3) { /* legacy 64 bit */
-		page_table_level = 4;
-	} else {
+	switch (desc->addressing_mode) {
+	case 1: /* legacy 32-bit */
+		root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
+		break;
+	case 3: /* legacy 64-bit */
+		root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
+		break;
+	default:
 		gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
 		return -EINVAL;
 	}
 
-	read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp);
+	read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdps);
 
-	mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp);
+	mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, pdps);
 	if (mm) {
-		intel_gvt_mm_reference(mm);
+		intel_vgpu_mm_get(mm);
 	} else {
 
-		mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
-				pdp, page_table_level, 0);
+		mm = intel_vgpu_create_ppgtt_mm(workload->vgpu, root_entry_type,
+						pdps);
 		if (IS_ERR(mm)) {
 			gvt_vgpu_err("fail to create mm object.\n");
 			return PTR_ERR(mm);
diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h
index 7a25115..5a060da 100644
--- a/drivers/gpu/drm/i915/gvt/trace.h
+++ b/drivers/gpu/drm/i915/gvt/trace.h
@@ -113,10 +113,10 @@ TRACE_EVENT(gma_index,
 );
 
 TRACE_EVENT(gma_translate,
-	TP_PROTO(int id, char *type, int ring_id, int pt_level,
+	TP_PROTO(int id, char *type, int ring_id, int root_entry_type,
 		unsigned long gma, unsigned long gpa),
 
-	TP_ARGS(id, type, ring_id, pt_level, gma, gpa),
+	TP_ARGS(id, type, ring_id, root_entry_type, gma, gpa),
 
 	TP_STRUCT__entry(
 		__array(char, buf, MAX_BUF_LEN)
@@ -124,8 +124,8 @@ TRACE_EVENT(gma_translate,
 
 	TP_fast_assign(
 		snprintf(__entry->buf, MAX_BUF_LEN,
-			"VM%d %s ring %d pt_level %d gma 0x%lx -> gpa 0x%lx\n",
-				id, type, ring_id, pt_level, gma, gpa);
+			"VM%d %s ring %d root_entry_type %d gma 0x%lx -> gpa 0x%lx\n",
+			id, type, ring_id, root_entry_type, gma, gpa);
 	),
 
 	TP_printk("%s", __entry->buf)
-- 
2.7.4



More information about the intel-gvt-dev mailing list