[Intel-gfx] [PATCH v14 4/7] drm/i915/gvt: Add opregion support
Tina Zhang
tina.zhang at intel.com
Fri Aug 18 10:21:33 UTC 2017
Windows guest driver needs vbt in opregion, to configure the setting
for display. Without opregion support, the display registers won't
be set and this blocks display model to get the correct information
of the guest display plane.
This patch is to provide a virtual opregion for guest. Current
implementation is to fill the virtual opregion with the content in
host's opregion. The original author of this patch is Xiaoguang Chen.
Signed-off-by: Bing Niu <bing.niu at intel.com>
Signed-off-by: Tina Zhang <tina.zhang at intel.com>
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index df7f33a..32c345c 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -55,6 +55,7 @@ struct intel_gvt_mpt {
unsigned long mfn, unsigned int nr, bool map);
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
bool map);
+ int (*set_opregion)(void *vgpu);
};
extern struct intel_gvt_mpt xengt_mpt;
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index fd0c85f..6b0a330 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -53,11 +53,23 @@ static const struct intel_gvt_ops *intel_gvt_ops;
#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
+#define OPREGION_SIGNATURE "IntelGraphicsMem"
+
+struct vfio_region;
+struct intel_vgpu_regops {
+ size_t (*rw)(struct intel_vgpu *vgpu, char *buf,
+ size_t count, loff_t *ppos, bool iswrite);
+ void (*release)(struct intel_vgpu *vgpu,
+ struct vfio_region *region);
+};
+
struct vfio_region {
u32 type;
u32 subtype;
size_t size;
u32 flags;
+ const struct intel_vgpu_regops *ops;
+ void *data;
};
struct kvmgt_pgfn {
@@ -430,6 +442,91 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
}
}
+static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
+ size_t count, loff_t *ppos, bool iswrite)
+{
+ unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
+ VFIO_PCI_NUM_REGIONS;
+ void *base = vgpu->vdev.region[i].data;
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+
+ if (pos >= vgpu->vdev.region[i].size || iswrite) {
+ gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
+ return -EINVAL;
+ }
+ count = min(count, (size_t)(vgpu->vdev.region[i].size - pos));
+ memcpy(buf, base + pos, count);
+
+ return count;
+}
+
+static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu,
+ struct vfio_region *region)
+{
+ memunmap(region->data);
+}
+
+static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
+ .rw = intel_vgpu_reg_rw_opregion,
+ .release = intel_vgpu_reg_release_opregion,
+};
+
+static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
+ unsigned int type, unsigned int subtype,
+ const struct intel_vgpu_regops *ops,
+ size_t size, u32 flags, void *data)
+{
+ struct vfio_region *region;
+
+ region = krealloc(vgpu->vdev.region,
+ (vgpu->vdev.num_regions + 1) * sizeof(*region),
+ GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ vgpu->vdev.region = region;
+ vgpu->vdev.region[vgpu->vdev.num_regions].type = type;
+ vgpu->vdev.region[vgpu->vdev.num_regions].subtype = subtype;
+ vgpu->vdev.region[vgpu->vdev.num_regions].ops = ops;
+ vgpu->vdev.region[vgpu->vdev.num_regions].size = size;
+ vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags;
+ vgpu->vdev.region[vgpu->vdev.num_regions].data = data;
+ vgpu->vdev.num_regions++;
+
+ return 0;
+}
+
+static int kvmgt_set_opregion(void *p_vgpu)
+{
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+ unsigned int addr;
+ void *base;
+ int ret;
+
+ addr = vgpu->gvt->opregion.opregion_pa;
+ if (!addr || !(~addr))
+ return -ENODEV;
+
+ base = memremap(addr, OPREGION_SIZE, MEMREMAP_WB);
+ if (!base)
+ return -ENOMEM;
+
+ if (memcmp(base, OPREGION_SIGNATURE, 16)) {
+ memunmap(base);
+ return -EINVAL;
+ }
+
+ ret = intel_vgpu_register_reg(vgpu,
+ PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
+ VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
+ &intel_vgpu_regops_opregion, OPREGION_SIZE,
+ VFIO_REGION_INFO_FLAG_READ, base);
+ if (ret)
+ memunmap(base);
+
+ return ret;
+}
+
static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
{
struct intel_vgpu *vgpu = NULL;
@@ -646,7 +743,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
int ret = -EINVAL;
- if (index >= VFIO_PCI_NUM_REGIONS) {
+ if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) {
gvt_vgpu_err("invalid index: %u\n", index);
return -EINVAL;
}
@@ -680,8 +777,11 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
case VFIO_PCI_BAR5_REGION_INDEX:
case VFIO_PCI_VGA_REGION_INDEX:
case VFIO_PCI_ROM_REGION_INDEX:
+ break;
default:
- gvt_vgpu_err("unsupported region: %u\n", index);
+ index -= VFIO_PCI_NUM_REGIONS;
+ return vgpu->vdev.region[index].ops->rw(vgpu, buf, count,
+ ppos, is_write);
}
return ret == 0 ? count : ret;
@@ -944,7 +1044,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
info.flags = VFIO_DEVICE_FLAGS_PCI;
info.flags |= VFIO_DEVICE_FLAGS_RESET;
- info.num_regions = VFIO_PCI_NUM_REGIONS;
+ info.num_regions = VFIO_PCI_NUM_REGIONS +
+ vgpu->vdev.num_regions;
info.num_irqs = VFIO_PCI_NUM_IRQS;
return copy_to_user((void __user *)arg, &info, minsz) ?
@@ -1065,6 +1166,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
}
if (caps.size) {
+ info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
if (info.argsz < sizeof(info) + caps.size) {
info.argsz = sizeof(info) + caps.size;
info.cap_offset = 0;
@@ -1513,6 +1615,7 @@ struct intel_gvt_mpt kvmgt_mpt = {
.read_gpa = kvmgt_read_gpa,
.write_gpa = kvmgt_write_gpa,
.gfn_to_mfn = kvmgt_gfn_to_pfn,
+ .set_opregion = kvmgt_set_opregion,
};
EXPORT_SYMBOL_GPL(kvmgt_mpt);
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index f0e5487..9e73f2e 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -292,4 +292,19 @@ static inline int intel_gvt_hypervisor_set_trap_area(
return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
}
+/**
+ * intel_gvt_hypervisor_set_opregion - Set opregion for guest
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
+{
+ if (!intel_gvt_host.mpt->set_opregion)
+ return 0;
+
+ return intel_gvt_host.mpt->set_opregion(vgpu);
+}
+
#endif /* _GVT_MPT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 3117991..04c0452 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -113,23 +113,37 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
*/
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa)
{
- int ret;
+ int ret = 0;
+ unsigned long pfn;
gvt_dbg_core("vgpu%d: init vgpu opregion\n", vgpu->id);
- if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
+ switch (intel_gvt_host.hypervisor_type) {
+ case INTEL_GVT_HYPERVISOR_KVM:
+ pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gpa >> PAGE_SHIFT);
+ vgpu_opregion(vgpu)->va = memremap(pfn << PAGE_SHIFT,
+ INTEL_GVT_OPREGION_SIZE,
+ MEMREMAP_WB);
+ if (!vgpu_opregion(vgpu)->va) {
+ gvt_vgpu_err("failed to map guest opregion\n");
+ ret = -EFAULT;
+ }
+ break;
+ case INTEL_GVT_HYPERVISOR_XEN:
gvt_dbg_core("emulate opregion from kernel\n");
ret = init_vgpu_opregion(vgpu, gpa);
if (ret)
- return ret;
+ break;
ret = map_vgpu_opregion(vgpu, true);
- if (ret)
- return ret;
+ break;
+ default:
+ ret = -EINVAL;
+ gvt_vgpu_err("not supported hypervisor\n");
}
- return 0;
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 3deadcb..bcbf535 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -383,6 +383,10 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_shadow_ctx;
+ ret = intel_gvt_hypervisor_set_opregion(vgpu);
+ if (ret)
+ goto out_clean_shadow_ctx;
+
mutex_unlock(&gvt->lock);
return vgpu;
--
2.7.4
More information about the Intel-gfx
mailing list