[Intel-gfx] [PATCH v1 04/12] drm/i915: vgpu ppgtt page table pv support

Xiaolin Zhang xiaolin.zhang at intel.com
Fri Sep 4 16:21:37 UTC 2020


to improve efficiency and reduce the complexsity of vgpu ppgtt support,
vgpu ppgtt page table operations are implemented in pv fashion and
implemented pv version of bind/unbind for ppgtt vma ops.

The pv version of ppgtt vma ops use the CTB protocol to communicate
pv ppgtt command along with data struct pv_vma from guest to GVT
and then GVT will implement command handler of PV_CMD_BIND_PPGTT and
PV_CMD_UBIND_PPGTT to support vgpu PPGTT feature.

new PV_PPGTT pv_cap is used to control this level of pv support in
both guest and host side.

Signed-off-by: Xiaolin Zhang <xiaolin.zhang at intel.com>
---
 drivers/gpu/drm/i915/gt/gen8_ppgtt.c |  4 +-
 drivers/gpu/drm/i915/i915_vgpu.c     | 95 ++++++++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/i915_vgpu.h     | 17 +++++++
 3 files changed, 115 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index eb64f47..de0eb6d 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -729,8 +729,10 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
 
 	ppgtt->vm.pte_encode = gen8_pte_encode;
 
-	if (intel_vgpu_active(gt->i915))
+	if (intel_vgpu_active(gt->i915)) {
+		intel_vgpu_config_pv_caps(gt->i915, PV_PPGTT, ppgtt);
 		gen8_ppgtt_notify_vgt(ppgtt, true);
+	}
 
 	ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
 
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index e856eff..9875e2f 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -99,6 +99,9 @@ void intel_vgpu_detect(struct drm_i915_private *dev_priv)
 	dev_priv->vgpu.active = true;
 	mutex_init(&dev_priv->vgpu.lock);
 
+	/* guest driver PV capability */
+	dev_priv->vgpu.pv_caps = PV_PPGTT;
+
 	if (!intel_vgpu_detect_pv_caps(dev_priv, shared_area)) {
 		DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
 		goto out;
@@ -370,6 +373,91 @@ int intel_vgt_balloon(struct i915_ggtt *ggtt)
  * i915 vgpu PV support for Linux
  */
 
+static int vgpu_pv_vma_action(struct i915_address_space *vm,
+		struct i915_vma *vma,
+		u32 action, u64 flags, u64 pte_flag)
+{
+	struct drm_i915_private *i915 = vma->vm->i915;
+	struct sgt_iter sgt_iter;
+	dma_addr_t addr;
+	struct pv_vma pvvma;
+	u32 num_pages;
+	u64 *gpas;
+	int i = 0;
+	u32 data[32];
+	int ret;
+	u32 size = sizeof(pvvma) / 4;
+
+	if (1 + size > ARRAY_SIZE(data))
+		return -EIO;
+
+	memset(&pvvma, 0, sizeof(pvvma));
+	num_pages = vma->node.size >> PAGE_SHIFT;
+	pvvma.size = num_pages;
+	pvvma.start = vma->node.start;
+	pvvma.flags = flags;
+
+	if (action == PV_CMD_BIND_PPGTT || action == PV_CMD_UNBIND_PPGTT)
+		pvvma.pml4 = px_dma(i915_vm_to_ppgtt(vm)->pd);
+
+	if (num_pages == 1) {
+		pvvma.dma_addrs = vma->pages->sgl->dma_address | pte_flag;
+		goto out;
+	}
+
+	gpas = kmalloc_array(num_pages, sizeof(u64), GFP_KERNEL);
+	if (gpas == NULL)
+		return -ENOMEM;
+
+	pvvma.dma_addrs = virt_to_phys((void *)gpas);
+	for_each_sgt_daddr(addr, sgt_iter, vma->pages)
+		gpas[i++] = addr | pte_flag;
+
+	/* Fill the allocated but "unused" space beyond the end of the buffer */
+	while (i < num_pages)
+		gpas[i++] = vm->scratch[0]->encode;
+out:
+	data[0] = action;
+	memcpy(&data[1], &pvvma, sizeof(pvvma));
+	ret = i915->vgpu.pv->send(i915, data, 1 + size);
+
+	if (num_pages > 1)
+		kfree(gpas);
+
+	return ret;
+}
+
+static void ppgtt_bind_vma_pv(struct i915_address_space *vm,
+		    struct i915_vm_pt_stash *stash,
+		    struct i915_vma *vma,
+		    enum i915_cache_level cache_level,
+		    u32 flags)
+{
+	u32 pte_flags;
+	u64 pte_encode;
+
+	if (!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
+		set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
+		flags |= BIT(I915_VMA_ALLOC_BIT);
+	}
+
+	/* Applicable to VLV, and gen8+ */
+	pte_flags = 0;
+	if (i915_gem_object_is_readonly(vma->obj))
+		pte_flags |= PTE_READ_ONLY;
+
+	pte_encode = vma->vm->pte_encode(0, cache_level, pte_flags);
+
+	vgpu_pv_vma_action(vm, vma, PV_CMD_BIND_PPGTT, flags, pte_encode);
+}
+
+static void ppgtt_unbind_vma_pv(struct i915_address_space *vm,
+		struct i915_vma *vma)
+{
+	if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
+		vgpu_pv_vma_action(vm, vma, PV_CMD_UNBIND_PPGTT, 0, 0);
+}
+
 /**
  * wait_for_desc_update - Wait for the command buffer descriptor update.
  * @desc:	buffer descriptor
@@ -644,9 +732,16 @@ static int intel_vgpu_setup_shared_page(struct drm_i915_private *i915,
 void intel_vgpu_config_pv_caps(struct drm_i915_private *i915,
 		enum pv_caps cap, void *data)
 {
+	struct i915_ppgtt *ppgtt;
 
 	if (!intel_vgpu_check_pv_cap(i915, cap))
 		return;
+
+	if (cap == PV_PPGTT) {
+		ppgtt = (struct i915_ppgtt *)data;
+		ppgtt->vm.vma_ops.bind_vma    = ppgtt_bind_vma_pv;
+		ppgtt->vm.vma_ops.unbind_vma  = ppgtt_unbind_vma_pv;
+	}
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index f2826f9..7e4ea99 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -37,6 +37,14 @@ struct i915_ggtt;
 /* define different PV capabilities */
 enum pv_caps {
 	PV_NONE = 0,
+	PV_PPGTT = BIT(0),
+};
+
+/* vgpu PV commands */
+enum intel_vgpu_pv_cmd {
+	PV_CMD_DEFAULT = 0x0,
+	PV_CMD_BIND_PPGTT,
+	PV_CMD_UNBIND_PPGTT,
 };
 
 /* A common shared page(4KB) between GVTg and vgpu allocated by guest */
@@ -45,6 +53,15 @@ struct gvt_shared_page {
 	u16 ver_minor;
 };
 
+/* PV virtual memory address for GGTT/PPGTT */
+struct pv_vma {
+	u32 size; /* num of pages */
+	u32 flags; /* bind or unbind flags */
+	u64 start; /* start of virtual address */
+	u64 dma_addrs; /* BO's dma address list */
+	u64 pml4; /* ppgtt handler */
+} __packed;
+
 /*
  * Definition of the command transport message header (DW0)
  *
-- 
2.7.4



More information about the Intel-gfx mailing list