[PATCH 1/2] drm/i915/gvt: Store ->kvm reference in intel_vgpu struct.

Andrey Ryabinin arbn at yandex-team.com
Tue Feb 11 11:45:43 UTC 2025


'vfio_device' keeps the ->kvm pointer with elevated counter from the first
open of the device up until the last close(). So the kvm struct and its
dependencies (kvm kthreads, cgroups ...) kept alive even for VFIO device
that don't need ->kvm.

Copy ->kvm pointer from the vfio_device struct and store it in the
'intel_vgpu'. Note that kvm_page_track_[un]register_notifier() already
does get/put calls, keeping the kvm struct alive.

This will allow to release ->kvm from the vfio_device righ after the
first open call, so that devices not using kvm not keeping it alive.

Devices that are using kvm (like intel_vgpu) will be expected to mange
the lifetime of the kvm struct by themselves.

Fixes: 2b48f52f2bff ("vfio: fix deadlock between group lock and kvm lock")
Cc: <stable at vger.kernel.org>
Signed-off-by: Andrey Ryabinin <arbn at yandex-team.com>
---
 drivers/gpu/drm/i915/gvt/gvt.h   |  1 +
 drivers/gpu/drm/i915/gvt/kvmgt.c | 14 +++++++-------
 2 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 2c95aeef4e41..6c62467df22c 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -232,6 +232,7 @@ struct intel_vgpu {
 	unsigned long nr_cache_entries;
 	struct mutex cache_lock;
 
+	struct kvm *kvm;
 	struct kvm_page_track_notifier_node track_node;
 #define NR_BKT (1 << 18)
 	struct hlist_head ptable[NR_BKT];
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index b27ff77bfb50..cf418e2c560d 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -36,6 +36,7 @@
 #include <linux/init.h>
 #include <linux/mm.h>
 #include <linux/kthread.h>
+#include <linux/kvm_host.h>
 #include <linux/sched/mm.h>
 #include <linux/types.h>
 #include <linux/list.h>
@@ -649,7 +650,7 @@ static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu)
 		if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, itr->status))
 			continue;
 
-		if (vgpu->vfio_device.kvm == itr->vfio_device.kvm) {
+		if (vgpu->kvm == itr->kvm) {
 			ret = true;
 			goto out;
 		}
@@ -664,13 +665,13 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
 	struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
 	int ret;
 
+	vgpu->kvm = vgpu->vfio_device.kvm;
 	if (__kvmgt_vgpu_exist(vgpu))
 		return -EEXIST;
 
 	vgpu->track_node.track_write = kvmgt_page_track_write;
 	vgpu->track_node.track_remove_region = kvmgt_page_track_remove_region;
-	ret = kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
-					       &vgpu->track_node);
+	ret = kvm_page_track_register_notifier(vgpu->kvm, &vgpu->track_node);
 	if (ret) {
 		gvt_vgpu_err("KVM is required to use Intel vGPU\n");
 		return ret;
@@ -707,8 +708,7 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
 
 	debugfs_lookup_and_remove(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs);
 
-	kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
-					   &vgpu->track_node);
+	kvm_page_track_unregister_notifier(vgpu->kvm, &vgpu->track_node);
 
 	kvmgt_protect_table_destroy(vgpu);
 	gvt_cache_destroy(vgpu);
@@ -1560,7 +1560,7 @@ int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
 	if (kvmgt_gfn_is_write_protected(info, gfn))
 		return 0;
 
-	r = kvm_write_track_add_gfn(info->vfio_device.kvm, gfn);
+	r = kvm_write_track_add_gfn(info->kvm, gfn);
 	if (r)
 		return r;
 
@@ -1578,7 +1578,7 @@ int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
 	if (!kvmgt_gfn_is_write_protected(info, gfn))
 		return 0;
 
-	r = kvm_write_track_remove_gfn(info->vfio_device.kvm, gfn);
+	r = kvm_write_track_remove_gfn(info->kvm, gfn);
 	if (r)
 		return r;
 
-- 
2.45.3



More information about the dri-devel mailing list