[Intel-gfx] [RFC PATCH v2 10/10] KVM: SVM: allow to avoid not needed updates to is_running
Maxim Levitsky
mlevitsk at redhat.com
Thu Apr 21 05:12:44 UTC 2022
Allow optionally to make KVM not update is_running unless it is
functionally needed which is only when a vCPU halts,
or is in the guest mode.
This means security wise that if a vCPU is scheduled out,
other vCPUs could still send doorbell messages to the
last physical CPU where this vCPU was last running.
If a malicious guest tries to do it can slow down
the victim CPU by about 40% in my testing, so this
should only be enabled if physical CPUs are not shared
among guests.
The option is avic_doorbell_strict and is true by
default, setting it to false allows this relaxed
non strict mode.
Signed-off-by: Maxim Levitsky <mlevitsk at redhat.com>
---
arch/x86/kvm/svm/avic.c | 19 ++++++++++++-------
arch/x86/kvm/svm/svm.c | 19 ++++++++++++++-----
arch/x86/kvm/svm/svm.h | 1 +
3 files changed, 27 insertions(+), 12 deletions(-)
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 9176c35662ada..1bfe58ee961b2 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -1641,7 +1641,7 @@ avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
- u64 entry;
+ u64 old_entry, new_entry;
int h_physical_id = kvm_cpu_get_apicid(cpu);
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1660,14 +1660,16 @@ void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (kvm_vcpu_is_blocking(vcpu))
return;
- entry = READ_ONCE(*(svm->avic_physical_id_cache));
- WARN_ON(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
+ old_entry = READ_ONCE(*(svm->avic_physical_id_cache));
+ new_entry = old_entry;
- entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
- entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
- entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
+ new_entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
+ new_entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
+ new_entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
+
+ if (old_entry != new_entry)
+ WRITE_ONCE(*(svm->avic_physical_id_cache), new_entry);
- WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
}
@@ -1777,6 +1779,9 @@ void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
{
+ if (!avic_doorbell_strict)
+ __nested_avic_put(vcpu);
+
if (!kvm_vcpu_apicv_active(vcpu))
return;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 3d9ab1e7b2b52..7e79fefc81650 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -190,6 +190,10 @@ module_param(avic, bool, 0444);
static bool force_avic;
module_param_unsafe(force_avic, bool, 0444);
+bool avic_doorbell_strict = true;
+module_param(avic_doorbell_strict, bool, 0444);
+
+
bool __read_mostly dump_invalid_vmcb;
module_param(dump_invalid_vmcb, bool, 0644);
@@ -1395,16 +1399,21 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (kvm_vcpu_apicv_active(vcpu))
__avic_vcpu_load(vcpu, cpu);
-
__nested_avic_load(vcpu, cpu);
}
static void svm_vcpu_put(struct kvm_vcpu *vcpu)
{
- if (kvm_vcpu_apicv_active(vcpu))
- __avic_vcpu_put(vcpu);
-
- __nested_avic_put(vcpu);
+ /*
+ * Forbid AVIC's peers to send interrupts
+ * to this CPU unless we are in non strict mode,
+ * in which case, we will do so only when this vCPU blocks
+ */
+ if (avic_doorbell_strict) {
+ if (kvm_vcpu_apicv_active(vcpu))
+ __avic_vcpu_put(vcpu);
+ __nested_avic_put(vcpu);
+ }
svm_prepare_host_switch(vcpu);
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 7d1a5028750e6..7139bbb534f9e 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -36,6 +36,7 @@ extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
extern bool npt_enabled;
extern int vgif;
extern bool intercept_smi;
+extern bool avic_doorbell_strict;
/*
* Clean bits in VMCB.
--
2.26.3
More information about the Intel-gfx
mailing list