Looking at SVM now...

On 31/01/2018 08:10, KarimAllah Ahmed wrote:
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index f40d0da..89495cf 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -529,6 +529,7 @@ struct svm_cpu_data {
>       struct kvm_ldttss_desc *tss_desc;
>  
>       struct page *save_area;
> +     struct vmcb *current_vmcb;
>  };
>  
>  static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
> @@ -1703,11 +1704,17 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
>       __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
>       kvm_vcpu_uninit(vcpu);
>       kmem_cache_free(kvm_vcpu_cache, svm);
> +     /*
> +      * The vmcb page can be recycled, causing a false negative in
> +      * svm_vcpu_load(). So do a full IBPB now.
> +      */
> +     indirect_branch_prediction_barrier();
>  }
>  
>  static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>  {
>       struct vcpu_svm *svm = to_svm(vcpu);
> +     struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
>       int i;
>  
>       if (unlikely(cpu != vcpu->cpu)) {
> @@ -1736,6 +1743,10 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int 
> cpu)
>       if (static_cpu_has(X86_FEATURE_RDTSCP))
>               wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
>  
> +     if (sd->current_vmcb != svm->vmcb) {
> +             sd->current_vmcb = svm->vmcb;
> +             indirect_branch_prediction_barrier();
> +     }
>       avic_vcpu_load(vcpu, cpu);
>  }
>  
> @@ -3684,6 +3695,22 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct 
> msr_data *msr)
>       case MSR_IA32_TSC:
>               kvm_write_tsc(vcpu, msr);
>               break;
> +     case MSR_IA32_PRED_CMD:
> +             if (!msr->host_initiated &&
> +                 !guest_cpuid_has(vcpu, X86_FEATURE_IBPB))
> +                     return 1;
> +
> +             if (data & ~PRED_CMD_IBPB)
> +                     return 1;
> +
> +             if (!data)
> +                     break;
> +
> +             wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);

This is missing an

        { .index = MSR_IA32_PRED_CMD,                   .always = false },

in the direct_access_msrs array.  Once you do this, nested is taken care of
automatically.

Likewise for MSR_IA32_SPEC_CTRL in patch 5.

Paolo

> +             if (is_guest_mode(vcpu))
> +                     break;
> +             set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
> +             break;
>       case MSR_STAR:
>               svm->vmcb->save.star = data;
>               break;
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index d46a61b..96e672e 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -2285,6 +2285,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int 
> cpu)
>       if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
>               per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
>               vmcs_load(vmx->loaded_vmcs->vmcs);
> +             indirect_branch_prediction_barrier();
>       }
>  
>       if (!already_loaded) {
> @@ -3342,6 +3343,25 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct 
> msr_data *msr_info)
>       case MSR_IA32_TSC:
>               kvm_write_tsc(vcpu, msr_info);
>               break;
> +     case MSR_IA32_PRED_CMD:
> +             if (!msr_info->host_initiated &&
> +                 !guest_cpuid_has(vcpu, X86_FEATURE_IBPB))
> +                     return 1;
> +
> +             if (data & ~PRED_CMD_IBPB)
> +                     return 1;
> +
> +             if (!data)
> +                     break;
> +
> +             wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
> +
> +             if (is_guest_mode(vcpu))
> +                     break;
> +
> +             vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, 
> MSR_IA32_PRED_CMD,
> +                                           MSR_TYPE_W);
> +             break;
>       case MSR_IA32_CR_PAT:
>               if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
>                       if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
> @@ -10046,7 +10066,8 @@ static inline bool nested_vmx_merge_msr_bitmap(struct 
> kvm_vcpu *vcpu,
>       unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
>  
>       /* This shortcut is ok because we support only x2APIC MSRs so far. */
> -     if (!nested_cpu_has_virt_x2apic_mode(vmcs12))
> +     if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
> +         !to_vmx(vcpu)->save_spec_ctrl_on_exit)
>               return false;
>  
>       page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap);
> @@ -10079,6 +10100,14 @@ static inline bool 
> nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
>                               MSR_TYPE_W);
>               }
>       }
> +
> +     if (to_vmx(vcpu)->save_spec_ctrl_on_exit) {
> +             nested_vmx_disable_intercept_for_msr(
> +                             msr_bitmap_l1, msr_bitmap_l0,
> +                             MSR_IA32_PRED_CMD,
> +                             MSR_TYPE_R);
> +     }
> +
>       kunmap(page);
>       kvm_release_page_clean(page);
>  
> 

Reply via email to