[PATCH v7 6/6] drm/i915/gvt: Adding user interface for dma-buf
Xiaoguang Chen
xiaoguang.chen at intel.com
Wed Jun 7 07:44:38 UTC 2017
User space should create the management fd for the dma-buf operation first.
Then user can query the plane information and create dma-buf if necessary
using the management fd.
Signed-off-by: Xiaoguang Chen <xiaoguang.chen at intel.com>
---
drivers/gpu/drm/i915/gvt/dmabuf.c | 37 ++++++++-
drivers/gpu/drm/i915/gvt/dmabuf.h | 5 ++
drivers/gpu/drm/i915/gvt/gvt.c | 3 +
drivers/gpu/drm/i915/gvt/gvt.h | 5 ++
drivers/gpu/drm/i915/gvt/hypercall.h | 3 +
drivers/gpu/drm/i915/gvt/kvmgt.c | 149 +++++++++++++++++++++++++++++++++++
drivers/gpu/drm/i915/gvt/mpt.h | 30 +++++++
drivers/gpu/drm/i915/gvt/vgpu.c | 2 +
8 files changed, 233 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 411165da..ce1041f 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -81,6 +81,28 @@ static void intel_vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
static void intel_vgpu_gem_release(struct drm_i915_gem_object *obj)
{
+ struct intel_vgpu_dmabuf_obj *dmabuf_obj;
+ struct intel_vgpu_fb_info *fb_info;
+ struct intel_vgpu *vgpu = NULL;
+ struct list_head *pos;
+
+ fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
+ if (WARN_ON(!fb_info && !fb_info->vgpu)) {
+ gvt_vgpu_err("gvt info is invalid\n");
+ goto out;
+ }
+
+ vgpu = fb_info->vgpu;
+ list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
+ dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
+ list);
+ if ((dmabuf_obj != NULL) && (dmabuf_obj->obj == obj)) {
+ kfree(dmabuf_obj);
+ break;
+ }
+ }
+ intel_gvt_hypervisor_put_vfio_device(vgpu);
+out:
kfree(obj->gvt_info);
}
@@ -225,6 +247,7 @@ int intel_vgpu_create_dmabuf(struct intel_vgpu *vgpu, void *args)
struct vfio_vgpu_create_dmabuf *gvt_dmabuf = args;
struct intel_vgpu_fb_info *fb_info;
int ret;
+ struct intel_vgpu_dmabuf_obj *dmabuf_obj;
ret = intel_vgpu_get_plane_info(dev, vgpu, &gvt_dmabuf->plane_info,
gvt_dmabuf->plane_id);
@@ -247,6 +270,16 @@ int intel_vgpu_create_dmabuf(struct intel_vgpu *vgpu, void *args)
fb_info->vgpu = vgpu;
obj->gvt_info = fb_info;
+ dmabuf_obj = kmalloc(sizeof(*dmabuf_obj), GFP_KERNEL);
+ if (!dmabuf_obj) {
+ gvt_vgpu_err("alloc dmabuf_obj failed\n");
+ ret = -ENOMEM;
+ goto out_free_info;
+ }
+ dmabuf_obj->obj = obj;
+ INIT_LIST_HEAD(&dmabuf_obj->list);
+ list_add_tail(&dmabuf_obj->list, &vgpu->dmabuf_obj_list_head);
+
dmabuf = i915_gem_prime_export(dev, &obj->base, DRM_CLOEXEC | DRM_RDWR);
if (IS_ERR(dmabuf)) {
@@ -260,11 +293,13 @@ int intel_vgpu_create_dmabuf(struct intel_vgpu *vgpu, void *args)
gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
goto out_free;
}
-
+ intel_gvt_hypervisor_get_vfio_device(vgpu);
gvt_dmabuf->fd = ret;
return 0;
out_free:
+ kfree(dmabuf_obj);
+out_free_info:
kfree(fb_info);
out:
i915_gem_object_put(obj);
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.h b/drivers/gpu/drm/i915/gvt/dmabuf.h
index 8be9979..cafa781 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.h
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.h
@@ -31,6 +31,11 @@ struct intel_vgpu_fb_info {
uint32_t fb_size;
};
+struct intel_vgpu_dmabuf_obj {
+ struct drm_i915_gem_object *obj;
+ struct list_head list;
+};
+
int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args);
int intel_vgpu_create_dmabuf(struct intel_vgpu *vgpu, void *args);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 2032917..d589830 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -54,6 +54,9 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.vgpu_reset = intel_gvt_reset_vgpu,
.vgpu_activate = intel_gvt_activate_vgpu,
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
+ .vgpu_query_plane = intel_vgpu_query_plane,
+ .vgpu_create_dmabuf = intel_vgpu_create_dmabuf,
+
};
/**
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 763a8c5..8f08c42 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -185,8 +185,11 @@ struct intel_vgpu {
struct kvm *kvm;
struct work_struct release_work;
atomic_t released;
+ struct vfio_device *vfio_device;
} vdev;
#endif
+ atomic_t mgr_fd_opened;
+ struct list_head dmabuf_obj_list_head;
};
struct intel_gvt_gm {
@@ -467,6 +470,8 @@ struct intel_gvt_ops {
void (*vgpu_reset)(struct intel_vgpu *);
void (*vgpu_activate)(struct intel_vgpu *);
void (*vgpu_deactivate)(struct intel_vgpu *);
+ int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
+ int (*vgpu_create_dmabuf)(struct intel_vgpu *vgpu, void *);
};
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index 32c345c..8f29c23 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -56,6 +56,9 @@ struct intel_gvt_mpt {
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
bool map);
int (*set_opregion)(void *vgpu);
+ int (*get_vfio_device)(unsigned long handle);
+ void (*put_vfio_device)(unsigned long handle);
+
};
extern struct intel_gvt_mpt xengt_mpt;
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 6b4652a..2d4ad46 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -41,6 +41,7 @@
#include <linux/kvm_host.h>
#include <linux/vfio.h>
#include <linux/mdev.h>
+#include <linux/anon_inodes.h>
#include "i915_drv.h"
#include "gvt.h"
@@ -523,6 +524,121 @@ static int kvmgt_set_opregion(void *p_vgpu)
return ret;
}
+static int kvmgt_get_vfio_device(unsigned long handle)
+{
+ struct kvmgt_guest_info *info;
+ struct intel_vgpu *vgpu;
+ struct vfio_device *device;
+
+ if (!handle_valid(handle))
+ return -ESRCH;
+
+ info = (struct kvmgt_guest_info *)handle;
+ vgpu = info->vgpu;
+
+ device = vfio_device_get_from_dev(mdev_dev(vgpu->vdev.mdev));
+ if (device == NULL)
+ return -ENODEV;
+ vgpu->vdev.vfio_device = device;
+
+ return 0;
+}
+
+static void kvmgt_put_vfio_device(unsigned long handle)
+{
+ struct kvmgt_guest_info *info;
+ struct intel_vgpu *vgpu;
+
+ if (!handle_valid(handle))
+ return;
+
+ info = (struct kvmgt_guest_info *)handle;
+ vgpu = info->vgpu;
+
+ vfio_device_put(vgpu->vdev.vfio_device);
+}
+
+static int intel_vgpu_dmabuf_mgr_fd_mmap(struct file *file,
+ struct vm_area_struct *vma)
+{
+ return -EPERM;
+}
+
+static int intel_vgpu_dmabuf_mgr_fd_release(struct inode *inode,
+ struct file *filp)
+{
+ struct intel_vgpu *vgpu = filp->private_data;
+ struct intel_vgpu_dmabuf_obj *obj;
+ struct list_head *pos;
+
+ if (WARN_ON(!vgpu->vdev.vfio_device))
+ return -ENODEV;
+
+ list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
+ obj = container_of(pos, struct intel_vgpu_dmabuf_obj, list);
+ if (WARN_ON(!obj))
+ return -ENODEV;
+ i915_gem_object_put(obj->obj);
+ }
+ kvmgt_put_vfio_device(vgpu->handle);
+ atomic_set(&vgpu->mgr_fd_opened, 0);
+
+ return 0;
+}
+
+static long intel_vgpu_dmabuf_mgr_fd_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ struct intel_vgpu *vgpu = filp->private_data;
+ int minsz;
+ int ret = 0;
+
+ if (ioctl == VFIO_DEVICE_QUERY_PLANE) {
+ struct vfio_vgpu_query_plane plane_info;
+
+ minsz = offsetofend(struct vfio_vgpu_query_plane, padding);
+ if (copy_from_user(&plane_info, (void __user *)arg, minsz))
+ return -EFAULT;
+ if (plane_info.argsz < minsz || plane_info.flags != 0)
+ return -EINVAL;
+ ret = intel_gvt_ops->vgpu_query_plane(vgpu, &plane_info);
+ if (ret != 0) {
+ gvt_vgpu_err("query plane failed:%d\n", ret);
+ return -EINVAL;
+ }
+ return copy_to_user((void __user *)arg, &plane_info, minsz) ?
+ -EFAULT : 0;
+ } else if (ioctl == VFIO_DEVICE_CREATE_DMABUF) {
+ struct vfio_vgpu_create_dmabuf dmabuf;
+
+ minsz = offsetofend(struct vfio_vgpu_create_dmabuf, plane_id);
+ if (copy_from_user(&dmabuf, (void __user *)arg, minsz))
+ return -EFAULT;
+ if (dmabuf.argsz < minsz || dmabuf.flags != 0)
+ return -EINVAL;
+ ret = kvmgt_get_vfio_device(vgpu->handle);
+ if (ret != 0)
+ return ret;
+
+ ret = intel_gvt_ops->vgpu_create_dmabuf(vgpu, &dmabuf);
+ if (ret != 0) {
+ kvmgt_put_vfio_device(vgpu->handle);
+ return ret;
+ }
+ return copy_to_user((void __user *)arg, &dmabuf, minsz) ?
+ -EFAULT : 0;
+ } else
+ gvt_vgpu_err("unsupported mgr fd operation\n");
+
+ return -EINVAL;
+}
+
+static const struct file_operations intel_vgpu_dmabuf_mgr_fd_ops = {
+ .release = intel_vgpu_dmabuf_mgr_fd_release,
+ .unlocked_ioctl = intel_vgpu_dmabuf_mgr_fd_ioctl,
+ .mmap = intel_vgpu_dmabuf_mgr_fd_mmap,
+ .llseek = noop_llseek,
+};
static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
{
struct intel_vgpu *vgpu = NULL;
@@ -1249,6 +1365,36 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
} else if (cmd == VFIO_DEVICE_RESET) {
intel_gvt_ops->vgpu_reset(vgpu);
return 0;
+ } else if (cmd == VFIO_DEVICE_GET_FD) {
+ int fd;
+ u32 type;
+ int ret;
+
+ if (atomic_read(&vgpu->mgr_fd_opened)) {
+ gvt_vgpu_err("mgr fd already opened\n");
+ return -EPERM;
+ }
+
+ if (copy_from_user(&type, (void __user *)arg, sizeof(type)))
+ return -EINVAL;
+ if (type != VFIO_DEVICE_DMABUF_MGR_FD)
+ return -EINVAL;
+
+ ret = kvmgt_get_vfio_device(vgpu->handle);
+ if (ret != 0)
+ return ret;
+
+ fd = anon_inode_getfd("intel-vgpu-dmabuf-mgr-fd",
+ &intel_vgpu_dmabuf_mgr_fd_ops,
+ vgpu, O_RDWR | O_CLOEXEC);
+ if (fd < 0) {
+ kvmgt_put_vfio_device(vgpu->handle);
+ gvt_vgpu_err("create dmabuf mgr fd failed\n");
+ return -EINVAL;
+ }
+ atomic_inc(&vgpu->mgr_fd_opened);
+
+ return fd;
}
return 0;
@@ -1612,6 +1758,9 @@ struct intel_gvt_mpt kvmgt_mpt = {
.write_gpa = kvmgt_write_gpa,
.gfn_to_mfn = kvmgt_gfn_to_pfn,
.set_opregion = kvmgt_set_opregion,
+ .get_vfio_device = kvmgt_get_vfio_device,
+ .put_vfio_device = kvmgt_put_vfio_device,
+
};
EXPORT_SYMBOL_GPL(kvmgt_mpt);
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index ab71300..1be961e 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -308,4 +308,34 @@ static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
return intel_gvt_host.mpt->set_opregion(vgpu);
}
+/**
+ * intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_get_vfio_device(struct intel_vgpu *vgpu)
+{
+ if (!intel_gvt_host.mpt->get_vfio_device)
+ return 0;
+
+ return intel_gvt_host.mpt->get_vfio_device(vgpu->handle);
+}
+
+/**
+ * intel_gvt_hypervisor_put_vfio_device - decrease vfio device ref count
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu)
+{
+ if (!intel_gvt_host.mpt->put_vfio_device)
+ return;
+
+ intel_gvt_host.mpt->put_vfio_device(vgpu->handle);
+}
+
#endif /* _GVT_MPT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 8e1d504..8747613 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -346,6 +346,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
vgpu->gvt = gvt;
vgpu->sched_ctl.weight = param->weight;
bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
+ INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
+ atomic_set(&vgpu->mgr_fd_opened, 0);
intel_vgpu_init_cfg_space(vgpu, param->primary);
--
2.7.4
More information about the intel-gvt-dev
mailing list