[iommufd PATCH v2 1/2] i915/gvt: Move gvt mapping cache initialization to vGPU creation
Yi Liu
yi.l.liu at intel.com
Tue Nov 29 09:35:34 UTC 2022
vfio container registers .dma_unmap() callback after the device is opened.
So it's fine for mdev drivers to initialize internal mapping cache in
.open_device(). See vfio_device_container_register().
Now with iommufd an access ops with an unmap callback is registered
when the device is bound to iommufd which is before .open_device()
is called. This implies gvt's .dma_unmap() could be called before its
internal mapping cache is initialized.
The fix is moving gvt mapping cache initialization to vGPU creation.
While at it also move ptable initialization together.
Reviewed-by: Zhi Wang <zhi.a.wang at intel.com>
Signed-off-by: Yi Liu <yi.l.liu at intel.com>
---
drivers/gpu/drm/i915/gvt/gvt.h | 2 ++
drivers/gpu/drm/i915/gvt/kvmgt.c | 7 ++-----
drivers/gpu/drm/i915/gvt/vgpu.c | 2 ++
3 files changed, 6 insertions(+), 5 deletions(-)
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index dbf8d7470b2c..a3a7e16078ba 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -754,6 +754,8 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_debugfs_init(struct intel_gvt *gvt);
void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
+void gvt_cache_init(struct intel_vgpu *vgpu);
+void kvmgt_protect_table_init(struct intel_vgpu *info);
int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn);
int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn);
int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 7a45e5360caf..a9e4eda94057 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -322,7 +322,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
}
}
-static void gvt_cache_init(struct intel_vgpu *vgpu)
+void gvt_cache_init(struct intel_vgpu *vgpu)
{
vgpu->gfn_cache = RB_ROOT;
vgpu->dma_addr_cache = RB_ROOT;
@@ -330,7 +330,7 @@ static void gvt_cache_init(struct intel_vgpu *vgpu)
mutex_init(&vgpu->cache_lock);
}
-static void kvmgt_protect_table_init(struct intel_vgpu *info)
+void kvmgt_protect_table_init(struct intel_vgpu *info)
{
hash_init(info->ptable);
}
@@ -671,9 +671,6 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
vgpu->attached = true;
- kvmgt_protect_table_init(vgpu);
- gvt_cache_init(vgpu);
-
vgpu->track_node.track_write = kvmgt_page_track_write;
vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 56c71474008a..036e1a72a26b 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -382,6 +382,8 @@ int intel_gvt_create_vgpu(struct intel_vgpu *vgpu,
intel_gvt_update_reg_whitelist(vgpu);
mutex_unlock(&gvt->lock);
+ kvmgt_protect_table_init(vgpu);
+ gvt_cache_init(vgpu);
return 0;
out_clean_sched_policy:
--
2.34.1
More information about the intel-gvt-dev
mailing list