[Intel-gfx] [PATCH v2 08/13] vfio/gvt: Convert to use vfio_register_group_dev()
Jason Gunthorpe
jgg at nvidia.com
Mon Apr 26 20:00:10 UTC 2021
While there is a confusing mess of pointers and structs in this driver,
the struct kvmgt_vdev (which in turn is 1:1 with a struct intel_vgpu) is
what holds the vfio_device. Replace all the drvdata's and weird
derivations of vgpu and vdev with container_of() or vdev->vgpu.
Signed-off-by: Jason Gunthorpe <jgg at nvidia.com>
---
drivers/gpu/drm/i915/gvt/kvmgt.c | 208 +++++++++++++++++--------------
1 file changed, 111 insertions(+), 97 deletions(-)
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 6bf176e8426e63..85ef300087e091 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -50,6 +50,7 @@
#include "gvt.h"
static const struct intel_gvt_ops *intel_gvt_ops;
+static const struct vfio_device_ops intel_vgpu_dev_ops;
/* helper macros copied from vfio-pci */
#define VFIO_PCI_OFFSET_SHIFT 40
@@ -109,8 +110,8 @@ struct gvt_dma {
};
struct kvmgt_vdev {
+ struct vfio_device vfio_device;
struct intel_vgpu *vgpu;
- struct mdev_device *mdev;
struct vfio_region *region;
int num_regions;
struct eventfd_ctx *intx_trigger;
@@ -130,7 +131,6 @@ struct kvmgt_vdev {
struct kvm *kvm;
struct work_struct release_work;
atomic_t released;
- struct vfio_device *vfio_device;
struct vfio_group *vfio_group;
};
@@ -144,7 +144,7 @@ static inline bool handle_valid(unsigned long handle)
return !!(handle & ~0xff);
}
-static int kvmgt_guest_init(struct mdev_device *mdev);
+static int kvmgt_guest_init(struct kvmgt_vdev *vdev);
static void intel_vgpu_release_work(struct work_struct *work);
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
@@ -611,12 +611,7 @@ static int kvmgt_get_vfio_device(void *p_vgpu)
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
- vdev->vfio_device = vfio_device_get_from_dev(
- mdev_dev(vdev->mdev));
- if (!vdev->vfio_device) {
- gvt_vgpu_err("failed to get vfio device\n");
- return -ENODEV;
- }
+ vfio_device_get(&vdev->vfio_device);
return 0;
}
@@ -683,16 +678,14 @@ static void kvmgt_put_vfio_device(void *vgpu)
{
struct kvmgt_vdev *vdev = kvmgt_vdev((struct intel_vgpu *)vgpu);
- if (WARN_ON(!vdev->vfio_device))
- return;
-
- vfio_device_put(vdev->vfio_device);
+ vfio_device_put(&vdev->vfio_device);
}
-static int intel_vgpu_create(struct mdev_device *mdev)
+static int intel_vgpu_probe(struct mdev_device *mdev)
{
struct intel_vgpu *vgpu = NULL;
struct intel_vgpu_type *type;
+ struct kvmgt_vdev *vdev;
struct device *pdev;
void *gvt;
int ret;
@@ -702,40 +695,40 @@ static int intel_vgpu_create(struct mdev_device *mdev)
type = intel_gvt_ops->gvt_find_vgpu_type(gvt,
mdev_get_type_group_id(mdev));
- if (!type) {
- ret = -EINVAL;
- goto out;
- }
+ if (!type)
+ return -EINVAL;
vgpu = intel_gvt_ops->vgpu_create(gvt, type);
if (IS_ERR_OR_NULL(vgpu)) {
- ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
gvt_err("failed to create intel vgpu: %d\n", ret);
- goto out;
+ return vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
}
- INIT_WORK(&kvmgt_vdev(vgpu)->release_work, intel_vgpu_release_work);
+ vdev = kvmgt_vdev(vgpu);
+ INIT_WORK(&vdev->release_work, intel_vgpu_release_work);
+ vfio_init_group_dev(&vdev->vfio_device, &mdev->dev,
+ &intel_vgpu_dev_ops);
- kvmgt_vdev(vgpu)->mdev = mdev;
- mdev_set_drvdata(mdev, vgpu);
+ ret = vfio_register_group_dev(&vdev->vfio_device);
+ if (ret) {
+ intel_gvt_ops->vgpu_destroy(vgpu);
+ return ret;
+ }
+ dev_set_drvdata(&mdev->dev, vdev);
gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
dev_name(mdev_dev(mdev)));
- ret = 0;
-
-out:
- return ret;
+ return 0;
}
-static int intel_vgpu_remove(struct mdev_device *mdev)
+static void intel_vgpu_remove(struct mdev_device *mdev)
{
- struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
-
- if (handle_valid(vgpu->handle))
- return -EBUSY;
+ struct kvmgt_vdev *vdev = dev_get_drvdata(&mdev->dev);
+ struct intel_vgpu *vgpu = vdev->vgpu;
+ if (WARN_ON(handle_valid(vgpu->handle)))
+ return;
intel_gvt_ops->vgpu_destroy(vgpu);
- return 0;
}
static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
@@ -788,10 +781,11 @@ static int intel_vgpu_group_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
-static int intel_vgpu_open(struct mdev_device *mdev)
+static int intel_vgpu_open(struct vfio_device *vfio_dev)
{
- struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
+ struct kvmgt_vdev *vdev =
+ container_of(vfio_dev, struct kvmgt_vdev, vfio_device);
+ struct intel_vgpu *vgpu = vdev->vgpu;
unsigned long events;
int ret;
struct vfio_group *vfio_group;
@@ -800,7 +794,7 @@ static int intel_vgpu_open(struct mdev_device *mdev)
vdev->group_notifier.notifier_call = intel_vgpu_group_notifier;
events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
- ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
+ ret = vfio_register_notifier(vfio_dev->dev, VFIO_IOMMU_NOTIFY, &events,
&vdev->iommu_notifier);
if (ret != 0) {
gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
@@ -809,7 +803,7 @@ static int intel_vgpu_open(struct mdev_device *mdev)
}
events = VFIO_GROUP_NOTIFY_SET_KVM;
- ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
+ ret = vfio_register_notifier(vfio_dev->dev, VFIO_GROUP_NOTIFY, &events,
&vdev->group_notifier);
if (ret != 0) {
gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
@@ -817,7 +811,7 @@ static int intel_vgpu_open(struct mdev_device *mdev)
goto undo_iommu;
}
- vfio_group = vfio_group_get_external_user_from_dev(mdev_dev(mdev));
+ vfio_group = vfio_group_get_external_user_from_dev(vfio_dev->dev);
if (IS_ERR_OR_NULL(vfio_group)) {
ret = !vfio_group ? -EFAULT : PTR_ERR(vfio_group);
gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n");
@@ -833,11 +827,11 @@ static int intel_vgpu_open(struct mdev_device *mdev)
goto undo_group;
}
- ret = kvmgt_guest_init(mdev);
+ ret = kvmgt_guest_init(vdev);
if (ret)
goto undo_group;
- intel_gvt_ops->vgpu_activate(vgpu);
+ intel_gvt_ops->vgpu_activate(vdev->vgpu);
atomic_set(&vdev->released, 0);
return ret;
@@ -847,11 +841,11 @@ static int intel_vgpu_open(struct mdev_device *mdev)
vdev->vfio_group = NULL;
undo_register:
- vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
+ vfio_unregister_notifier(vfio_dev->dev, VFIO_GROUP_NOTIFY,
&vdev->group_notifier);
undo_iommu:
- vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ vfio_unregister_notifier(vfio_dev->dev, VFIO_IOMMU_NOTIFY,
&vdev->iommu_notifier);
out:
return ret;
@@ -884,12 +878,12 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
intel_gvt_ops->vgpu_release(vgpu);
- ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_IOMMU_NOTIFY,
+ ret = vfio_unregister_notifier(vdev->vfio_device.dev, VFIO_IOMMU_NOTIFY,
&vdev->iommu_notifier);
drm_WARN(&i915->drm, ret,
"vfio_unregister_notifier for iommu failed: %d\n", ret);
- ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_GROUP_NOTIFY,
+ ret = vfio_unregister_notifier(vdev->vfio_device.dev, VFIO_GROUP_NOTIFY,
&vdev->group_notifier);
drm_WARN(&i915->drm, ret,
"vfio_unregister_notifier for group failed: %d\n", ret);
@@ -907,11 +901,12 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
vgpu->handle = 0;
}
-static void intel_vgpu_release(struct mdev_device *mdev)
+static void intel_vgpu_release(struct vfio_device *vfio_dev)
{
- struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ struct kvmgt_vdev *vdev =
+ container_of(vfio_dev, struct kvmgt_vdev, vfio_device);
- __intel_vgpu_release(vgpu);
+ __intel_vgpu_release(vdev->vgpu);
}
static void intel_vgpu_release_work(struct work_struct *work)
@@ -997,11 +992,10 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
return 0;
}
-static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
+static ssize_t intel_vgpu_rw(struct kvmgt_vdev *vdev, char *buf,
size_t count, loff_t *ppos, bool is_write)
{
- struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
+ struct intel_vgpu *vgpu = vdev->vgpu;
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
int ret = -EINVAL;
@@ -1047,9 +1041,9 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
return ret == 0 ? count : ret;
}
-static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos)
+static bool gtt_entry(struct kvmgt_vdev *vdev, loff_t *ppos)
{
- struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ struct intel_vgpu *vgpu = vdev->vgpu;
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
struct intel_gvt *gvt = vgpu->gvt;
int offset;
@@ -1066,9 +1060,11 @@ static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos)
true : false;
}
-static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
+static ssize_t intel_vgpu_read(struct vfio_device *vfio_dev, char __user *buf,
size_t count, loff_t *ppos)
{
+ struct kvmgt_vdev *vdev =
+ container_of(vfio_dev, struct kvmgt_vdev, vfio_device);
unsigned int done = 0;
int ret;
@@ -1077,10 +1073,10 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
/* Only support GGTT entry 8 bytes read */
if (count >= 8 && !(*ppos % 8) &&
- gtt_entry(mdev, ppos)) {
+ gtt_entry(vdev, ppos)) {
u64 val;
- ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ret = intel_vgpu_rw(vdev, (char *)&val, sizeof(val),
ppos, false);
if (ret <= 0)
goto read_err;
@@ -1092,7 +1088,7 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
} else if (count >= 4 && !(*ppos % 4)) {
u32 val;
- ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ret = intel_vgpu_rw(vdev, (char *)&val, sizeof(val),
ppos, false);
if (ret <= 0)
goto read_err;
@@ -1104,7 +1100,7 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
} else if (count >= 2 && !(*ppos % 2)) {
u16 val;
- ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ret = intel_vgpu_rw(vdev, (char *)&val, sizeof(val),
ppos, false);
if (ret <= 0)
goto read_err;
@@ -1116,7 +1112,7 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
} else {
u8 val;
- ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos,
+ ret = intel_vgpu_rw(vdev, &val, sizeof(val), ppos,
false);
if (ret <= 0)
goto read_err;
@@ -1139,10 +1135,12 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
return -EFAULT;
}
-static ssize_t intel_vgpu_write(struct mdev_device *mdev,
+static ssize_t intel_vgpu_write(struct vfio_device *vfio_dev,
const char __user *buf,
size_t count, loff_t *ppos)
{
+ struct kvmgt_vdev *vdev =
+ container_of(vfio_dev, struct kvmgt_vdev, vfio_device);
unsigned int done = 0;
int ret;
@@ -1151,13 +1149,13 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev,
/* Only support GGTT entry 8 bytes write */
if (count >= 8 && !(*ppos % 8) &&
- gtt_entry(mdev, ppos)) {
+ gtt_entry(vdev, ppos)) {
u64 val;
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
- ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ret = intel_vgpu_rw(vdev, (char *)&val, sizeof(val),
ppos, true);
if (ret <= 0)
goto write_err;
@@ -1169,7 +1167,7 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev,
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
- ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ret = intel_vgpu_rw(vdev, (char *)&val, sizeof(val),
ppos, true);
if (ret <= 0)
goto write_err;
@@ -1181,7 +1179,7 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev,
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
- ret = intel_vgpu_rw(mdev, (char *)&val,
+ ret = intel_vgpu_rw(vdev, (char *)&val,
sizeof(val), ppos, true);
if (ret <= 0)
goto write_err;
@@ -1193,7 +1191,7 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev,
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
- ret = intel_vgpu_rw(mdev, &val, sizeof(val),
+ ret = intel_vgpu_rw(vdev, &val, sizeof(val),
ppos, true);
if (ret <= 0)
goto write_err;
@@ -1212,13 +1210,16 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev,
return -EFAULT;
}
-static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
+static int intel_vgpu_mmap(struct vfio_device *vfio_dev,
+ struct vm_area_struct *vma)
{
+ struct kvmgt_vdev *vdev =
+ container_of(vfio_dev, struct kvmgt_vdev, vfio_device);
unsigned int index;
u64 virtaddr;
unsigned long req_size, pgoff, req_start;
pgprot_t pg_prot;
- struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ struct intel_vgpu *vgpu = vdev->vgpu;
index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
if (index >= VFIO_PCI_ROM_REGION_INDEX)
@@ -1341,11 +1342,12 @@ static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags,
return func(vgpu, index, start, count, flags, data);
}
-static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
+static long intel_vgpu_ioctl(struct vfio_device *vfio_dev, unsigned int cmd,
unsigned long arg)
{
- struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
+ struct kvmgt_vdev *vdev =
+ container_of(vfio_dev, struct kvmgt_vdev, vfio_device);
+ struct intel_vgpu *vgpu = vdev->vgpu;
unsigned long minsz;
gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
@@ -1624,14 +1626,10 @@ static ssize_t
vgpu_id_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct mdev_device *mdev = mdev_from_dev(dev);
+ struct kvmgt_vdev *vdev = dev_get_drvdata(dev);
+ struct intel_vgpu *vgpu = vdev->vgpu;
- if (mdev) {
- struct intel_vgpu *vgpu = (struct intel_vgpu *)
- mdev_get_drvdata(mdev);
- return sprintf(buf, "%d\n", vgpu->id);
- }
- return sprintf(buf, "\n");
+ return sprintf(buf, "%d\n", vgpu->id);
}
static DEVICE_ATTR_RO(vgpu_id);
@@ -1651,18 +1649,28 @@ static const struct attribute_group *intel_vgpu_groups[] = {
NULL,
};
-static struct mdev_parent_ops intel_vgpu_ops = {
- .mdev_attr_groups = intel_vgpu_groups,
- .create = intel_vgpu_create,
- .remove = intel_vgpu_remove,
+static const struct vfio_device_ops intel_vgpu_dev_ops = {
+ .open = intel_vgpu_open,
+ .release = intel_vgpu_release,
+ .read = intel_vgpu_read,
+ .write = intel_vgpu_write,
+ .mmap = intel_vgpu_mmap,
+ .ioctl = intel_vgpu_ioctl,
+};
- .open = intel_vgpu_open,
- .release = intel_vgpu_release,
+static struct mdev_driver intel_vgpu_mdev_driver = {
+ .driver = {
+ .name = "intel_vgpu_mdev",
+ .owner = THIS_MODULE,
+ .mod_name = KBUILD_MODNAME,
+ .dev_groups = intel_vgpu_groups,
+ },
+ .probe = intel_vgpu_probe,
+ .remove = intel_vgpu_remove,
+};
- .read = intel_vgpu_read,
- .write = intel_vgpu_write,
- .mmap = intel_vgpu_mmap,
- .ioctl = intel_vgpu_ioctl,
+static struct mdev_parent_ops intel_vgpu_ops = {
+ .device_driver = &intel_vgpu_mdev_driver,
};
static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
@@ -1806,18 +1814,12 @@ static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
return ret;
}
-static int kvmgt_guest_init(struct mdev_device *mdev)
+static int kvmgt_guest_init(struct kvmgt_vdev *vdev)
{
struct kvmgt_guest_info *info;
- struct intel_vgpu *vgpu;
- struct kvmgt_vdev *vdev;
+ struct intel_vgpu *vgpu = vdev->vgpu;
struct kvm *kvm;
- vgpu = mdev_get_drvdata(mdev);
- if (handle_valid(vgpu->handle))
- return -EEXIST;
-
- vdev = kvmgt_vdev(vgpu);
kvm = vdev->kvm;
if (!kvm || kvm->mm != current->mm) {
gvt_vgpu_err("KVM is required to use Intel vGPU\n");
@@ -2125,13 +2127,25 @@ static const struct intel_gvt_mpt kvmgt_mpt = {
static int __init kvmgt_init(void)
{
- if (intel_gvt_register_hypervisor(&kvmgt_mpt) < 0)
- return -ENODEV;
+ int ret;
+
+ ret = mdev_register_driver(&intel_vgpu_mdev_driver);
+ if (ret)
+ return ret;
+
+ if (intel_gvt_register_hypervisor(&kvmgt_mpt) < 0) {
+ ret = -ENODEV;
+ goto err_driver;
+ }
return 0;
+err_driver:
+ mdev_unregister_driver(&intel_vgpu_mdev_driver);
+ return ret;
}
static void __exit kvmgt_exit(void)
{
+ mdev_unregister_driver(&intel_vgpu_mdev_driver);
intel_gvt_unregister_hypervisor();
}
--
2.31.1
More information about the Intel-gfx
mailing list