The current implementation only fills MC MSRs on vcpu0 and leaves MC MSRs on other vcpus empty in the broadcast case. When guest reads 0 from MSR_IA32_MCG_STATUS on vcpuN (N > 0), it may think it's not possible to recover the execution on that vcpu and then get panic, although MSR_IA32_MCG_STATUS filled on vcpu0 may imply the injected vMCE is actually recoverable. To avoid such unnecessary guest panic, set MSR_IA32_MCG_STATUS on vcpuN (N > 0) to MCG_STATUS_MCIP | MCG_STATUS_RIPV.
Signed-off-by: Haozhong Zhang <haozhong.zh...@intel.com> --- Cc: Christoph Egger <cheg...@amazon.de> Cc: Liu Jinsong <jinsong....@alibaba-inc.com> Cc: Jan Beulich <jbeul...@suse.com> Cc: Andrew Cooper <andrew.coop...@citrix.com> --- xen/arch/x86/cpu/mcheck/mcaction.c | 14 ++++---- xen/arch/x86/cpu/mcheck/vmce.c | 67 +++++++++++++++++++++++++------------- xen/arch/x86/cpu/mcheck/vmce.h | 2 +- 3 files changed, 53 insertions(+), 30 deletions(-) diff --git a/xen/arch/x86/cpu/mcheck/mcaction.c b/xen/arch/x86/cpu/mcheck/mcaction.c index cc90e7c..8b2b834 100644 --- a/xen/arch/x86/cpu/mcheck/mcaction.c +++ b/xen/arch/x86/cpu/mcheck/mcaction.c @@ -88,21 +88,21 @@ mc_memerr_dhandler(struct mca_binfo *binfo, goto vmce_failed; } + if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) + vmce_vcpuid = VMCE_INJECT_BROADCAST; + else + vmce_vcpuid = global->mc_vcpuid; + bank->mc_addr = gfn << PAGE_SHIFT | (bank->mc_addr & (PAGE_SIZE -1 )); - if ( fill_vmsr_data(bank, d, - global->mc_gstatus) == -1 ) + if ( fill_vmsr_data(bank, d, global->mc_gstatus, + vmce_vcpuid == VMCE_INJECT_BROADCAST) == -1 ) { mce_printk(MCE_QUIET, "Fill vMCE# data for DOM%d " "failed\n", bank->mc_domid); goto vmce_failed; } - if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) - vmce_vcpuid = VMCE_INJECT_BROADCAST; - else - vmce_vcpuid = global->mc_vcpuid; - /* We will inject vMCE to DOMU*/ if ( inject_vmce(d, vmce_vcpuid) < 0 ) { diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c index d83a3f2..456d6f3 100644 --- a/xen/arch/x86/cpu/mcheck/vmce.c +++ b/xen/arch/x86/cpu/mcheck/vmce.c @@ -386,36 +386,59 @@ int inject_vmce(struct domain *d, int vcpu) return ret; } +static int vcpu_fill_mc_msrs(struct vcpu *v, uint64_t mcg_status, + uint64_t mci_status, uint64_t mci_addr, + uint64_t mci_misc) +{ + if ( v->arch.vmce.mcg_status & MCG_STATUS_MCIP ) + { + mce_printk(MCE_QUIET, "MCE: %pv: guest has not handled previous" + " vMCE yet!\n", v); + return -EBUSY; + } + + spin_lock(&v->arch.vmce.lock); + + v->arch.vmce.mcg_status = mcg_status; + /* + * 1. Skip bank 0 to avoid 'bank 0 quirk' of old processors + * 2. Filter MCi_STATUS MSCOD model specific error code to guest + */ + v->arch.vmce.bank[1].mci_status = mci_status & MCi_STATUS_MSCOD_MASK; + v->arch.vmce.bank[1].mci_addr = mci_addr; + v->arch.vmce.bank[1].mci_misc = mci_misc; + + spin_unlock(&v->arch.vmce.lock); + + return 0; +} + int fill_vmsr_data(struct mcinfo_bank *mc_bank, struct domain *d, - uint64_t gstatus) + uint64_t gstatus, bool broadcast) { struct vcpu *v = d->vcpu[0]; + int ret; - if ( mc_bank->mc_domid != (uint16_t)~0 ) - { - if ( v->arch.vmce.mcg_status & MCG_STATUS_MCIP ) - { - mce_printk(MCE_QUIET, "MCE: guest has not handled previous" - " vMCE yet!\n"); - return -1; - } - - spin_lock(&v->arch.vmce.lock); + if ( mc_bank->mc_domid == (uint16_t)~0 ) + return -EINVAL; - v->arch.vmce.mcg_status = gstatus; - /* - * 1. Skip bank 0 to avoid 'bank 0 quirk' of old processors - * 2. Filter MCi_STATUS MSCOD model specific error code to guest - */ - v->arch.vmce.bank[1].mci_status = mc_bank->mc_status & - MCi_STATUS_MSCOD_MASK; - v->arch.vmce.bank[1].mci_addr = mc_bank->mc_addr; - v->arch.vmce.bank[1].mci_misc = mc_bank->mc_misc; + ret = vcpu_fill_mc_msrs(v, gstatus, mc_bank->mc_status, + mc_bank->mc_addr, mc_bank->mc_misc); + if ( ret || !broadcast ) + goto out; - spin_unlock(&v->arch.vmce.lock); + for_each_vcpu ( d, v ) + { + if ( v == d->vcpu[0] ) + continue; + ret = vcpu_fill_mc_msrs(v, MCG_STATUS_MCIP | MCG_STATUS_RIPV, + 0, 0, 0); + if ( ret ) + break; } - return 0; + out: + return ret; } /* It's said some ram is setup as mmio_direct for UC cache attribute */ diff --git a/xen/arch/x86/cpu/mcheck/vmce.h b/xen/arch/x86/cpu/mcheck/vmce.h index 163ce3c..74f6381 100644 --- a/xen/arch/x86/cpu/mcheck/vmce.h +++ b/xen/arch/x86/cpu/mcheck/vmce.h @@ -17,7 +17,7 @@ int vmce_amd_rdmsr(const struct vcpu *, uint32_t msr, uint64_t *val); int vmce_amd_wrmsr(struct vcpu *, uint32_t msr, uint64_t val); int fill_vmsr_data(struct mcinfo_bank *mc_bank, struct domain *d, - uint64_t gstatus); + uint64_t gstatus, bool broadcast); #define VMCE_INJECT_BROADCAST (-1) int inject_vmce(struct domain *d, int vcpu); -- 2.10.1 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org https://lists.xen.org/xen-devel