On 5/29/20 8:39 AM, Paolo Bonzini wrote:
According to the AMD manual, the effect of turning off EFER.SVME while a
guest is running is undefined.  We make it leave guest mode immediately,
similar to the effect of clearing the VMX bit in MSR_IA32_FEAT_CTL.


I see that svm_set_efer() is called in enter_svm_guest_mode() and nested_svm_vmexit(). In the VMRUN path, we have already checked EFER.SVME in nested_vmcb_checks(). So if it was not set, we wouldn't come to enter_svm_guest_mode(). Your fix is only for the #VMEXIT path then ?


Signed-off-by: Paolo Bonzini <pbonz...@redhat.com>
---
  arch/x86/kvm/svm/nested.c | 16 ++++++++++++++++
  arch/x86/kvm/svm/svm.c    | 10 ++++++++--
  arch/x86/kvm/svm/svm.h    |  1 +
  3 files changed, 25 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index bd3a89cd4070..369eca73fe3e 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -618,6 +618,22 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
        return 0;
  }
+/*
+ * Forcibly leave nested mode in order to be able to reset the VCPU later on.
+ */
+void svm_leave_nested(struct vcpu_svm *svm)
+{
+       if (is_guest_mode(&svm->vcpu)) {
+               struct vmcb *hsave = svm->nested.hsave;
+               struct vmcb *vmcb = svm->vmcb;
+
+               svm->nested.nested_run_pending = 0;
+               leave_guest_mode(&svm->vcpu);
+               copy_vmcb_control_area(&vmcb->control, &hsave->control);
+               nested_svm_uninit_mmu_context(&svm->vcpu);
+       }
+}
+
  static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
  {
        u32 offset, msr, value;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index bc08221f6743..b4db9a980469 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -265,6 +265,7 @@ static int get_npt_level(struct kvm_vcpu *vcpu)
void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
  {
+       struct vcpu_svm *svm = to_svm(vcpu);
        vcpu->arch.efer = efer;
if (!npt_enabled) {
@@ -275,8 +276,13 @@ void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
                        efer &= ~EFER_LME;
        }
- to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
-       mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
+       if (!(efer & EFER_SVME)) {
+               svm_leave_nested(svm);
+               svm_set_gif(svm, true);
+       }
+
+       svm->vmcb->save.efer = efer | EFER_SVME;
+       mark_dirty(svm->vmcb, VMCB_CR);
  }
static int is_external_interrupt(u32 info)
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index be8e830f83fa..6ac4c00a5d82 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -389,6 +389,7 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
                          struct vmcb *nested_vmcb);
+void svm_leave_nested(struct vcpu_svm *svm);
  int nested_svm_vmrun(struct vcpu_svm *svm);
  void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
  int nested_svm_vmexit(struct vcpu_svm *svm);

Reply via email to