[Intel-gfx] [PATCH 23/30] KVM: x86: nSVM: implement nested LBR virtualization
Maxim Levitsky
mlevitsk at redhat.com
Mon Feb 7 15:28:40 UTC 2022
This was tested with kvm-unit-test that was developed
for this purpose.
Signed-off-by: Maxim Levitsky <mlevitsk at redhat.com>
---
arch/x86/kvm/svm/nested.c | 21 +++++++++++++++++++--
arch/x86/kvm/svm/svm.c | 8 ++++++++
arch/x86/kvm/svm/svm.h | 1 +
3 files changed, 28 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 9f7bc7db08dd3..4a228a76b27d7 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -536,8 +536,19 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
vmcb_mark_dirty(svm->vmcb, VMCB_DR);
}
- if (unlikely(svm->vmcb01.ptr->control.virt_ext & LBR_CTL_ENABLE_MASK))
+ if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
+
+ /* Copy LBR related registers from vmcb12,
+ * but make sure that we only pick LBR enable bit from the guest.
+ */
+
+ svm_copy_lbrs(vmcb12, svm->vmcb);
+ svm->vmcb->save.dbgctl &= LBR_CTL_ENABLE_MASK;
+ svm_update_lbrv(&svm->vcpu);
+
+ } else if (unlikely(svm->vmcb01.ptr->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
svm_copy_lbrs(svm->vmcb01.ptr, svm->vmcb);
+ }
}
static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
@@ -592,6 +603,9 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
svm->vmcb->control.virt_ext = svm->vmcb01.ptr->control.virt_ext &
LBR_CTL_ENABLE_MASK;
+ if (svm->lbrv_enabled)
+ svm->vmcb->control.virt_ext |=
+ (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
nested_svm_transition_tlb_flush(vcpu);
@@ -858,7 +872,10 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
svm_switch_vmcb(svm, &svm->vmcb01);
- if (unlikely(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
+ if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
+ svm_copy_lbrs(svm->nested.vmcb02.ptr, vmcb12);
+ svm_update_lbrv(vcpu);
+ } else if (unlikely(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
svm_copy_lbrs(svm->nested.vmcb02.ptr, svm->vmcb);
svm_update_lbrv(vcpu);
}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 294e016f575a8..76aa6054d9db2 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -890,6 +890,10 @@ void svm_update_lbrv(struct kvm_vcpu *vcpu)
bool current_enable_lbrv = !!(svm->vmcb->control.virt_ext &
LBR_CTL_ENABLE_MASK);
+ if (unlikely(is_guest_mode(vcpu) && svm->lbrv_enabled))
+ if (unlikely(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))
+ enable_lbrv = true;
+
if (enable_lbrv == current_enable_lbrv)
return;
@@ -3987,6 +3991,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
guest_cpuid_has(vcpu, X86_FEATURE_NRIPS);
svm->tsc_scaling_enabled = tsc_scaling && guest_cpuid_has(vcpu, X86_FEATURE_TSCRATEMSR);
+ svm->lbrv_enabled = lbrv && guest_cpuid_has(vcpu, X86_FEATURE_LBRV);
svm_recalc_instruction_intercepts(vcpu, svm);
@@ -4791,6 +4796,9 @@ static __init void svm_set_cpu_caps(void)
if (tsc_scaling)
kvm_cpu_cap_set(X86_FEATURE_TSCRATEMSR);
+ if (lbrv)
+ kvm_cpu_cap_set(X86_FEATURE_LBRV);
+
/* Nested VM can receive #VMEXIT instead of triggering #GP */
kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK);
}
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index b83e06d5d942a..0012ba5affcba 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -220,6 +220,7 @@ struct vcpu_svm {
/* cached guest cpuid flags for faster access */
bool nrips_enabled : 1;
bool tsc_scaling_enabled : 1;
+ bool lbrv_enabled : 1;
u32 ldr_reg;
u32 dfr_reg;
--
2.26.3
More information about the Intel-gfx
mailing list