Move the kvm_mmu_reset_context calls to nested_svm_init_mmu_context and
nested_svm_uninit_mmu_context, so that the state of the MMU is consistent
with the vcpu->arch.mmu and vcpu->arch.walk_mmu state.  Remove an
unnecessary kvm_mmu_load, which can wait until the first vcpu_run.

Signed-off-by: Paolo Bonzini <pbonz...@redhat.com>
---
 arch/x86/kvm/svm/nested.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 024e27bebba3..54a3384a60f8 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -90,12 +90,17 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu 
*vcpu)
        vcpu->arch.mmu->shadow_root_level = vcpu->arch.tdp_level;
        reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
        vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
+
+       /* Guest paging mode is active - reset mmu */
+       kvm_mmu_reset_context(vcpu);
 }
 
 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
 {
        vcpu->arch.mmu = &vcpu->arch.root_mmu;
        vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
+
+       kvm_mmu_reset_context(vcpu);
 }
 
 void recalc_intercepts(struct vcpu_svm *svm)
@@ -277,9 +282,6 @@ static void nested_prepare_vmcb_control(struct vcpu_svm 
*svm)
        if (svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE)
                nested_svm_init_mmu_context(&svm->vcpu);
 
-       /* Guest paging mode is active - reset mmu */
-       kvm_mmu_reset_context(&svm->vcpu);
-
        svm_flush_tlb(&svm->vcpu);
 
        svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
@@ -573,8 +575,6 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
        kvm_vcpu_unmap(&svm->vcpu, &map, true);
 
        nested_svm_uninit_mmu_context(&svm->vcpu);
-       kvm_mmu_reset_context(&svm->vcpu);
-       kvm_mmu_load(&svm->vcpu);
 
        /*
         * Drop what we picked up for L2 via svm_complete_interrupts() so it
-- 
2.18.2


Reply via email to