And use them in preference to cpumask_defaults on context switch. HVM domains must not be masked (to avoid interfering with cpuid calls within the guest), so always lazily context switch to the host default.
Signed-off-by: Andrew Cooper <andrew.coop...@citrix.com> --- CC: Jan Beulich <jbeul...@suse.com> --- xen/arch/x86/cpu/amd.c | 4 +++- xen/arch/x86/cpu/intel.c | 5 ++++- xen/arch/x86/domain.c | 9 +++++++++ xen/include/asm-x86/domain.h | 2 ++ 4 files changed, 18 insertions(+), 2 deletions(-) diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c index cc1344a..a925614 100644 --- a/xen/arch/x86/cpu/amd.c +++ b/xen/arch/x86/cpu/amd.c @@ -203,7 +203,9 @@ static void __init noinline probe_masking_msrs(void) void amd_ctxt_switch_levelling(const struct domain *nextd) { struct cpumasks *these_masks = &this_cpu(cpumasks); - const struct cpumasks *masks = &cpumask_defaults; + const struct cpumasks *masks = + (nextd && is_pv_domain(nextd) && nextd->arch.pv_domain.masks) + ? nextd->arch.pv_domain.masks : &cpumask_defaults; #define LAZY(cap, msr, field) \ ({ \ diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c index 87c66d2..5d2ce3f 100644 --- a/xen/arch/x86/cpu/intel.c +++ b/xen/arch/x86/cpu/intel.c @@ -143,7 +143,7 @@ static void __init probe_masking_msrs(void) void intel_ctxt_switch_levelling(const struct domain *nextd) { struct cpumasks *these_masks = &this_cpu(cpumasks); - const struct cpumasks *masks = &cpumask_defaults; + const struct cpumasks *masks; if (cpu_has_cpuid_faulting) { set_cpuid_faulting(nextd && is_pv_domain(nextd) && @@ -152,6 +152,9 @@ void intel_ctxt_switch_levelling(const struct domain *nextd) return; } + masks = (nextd && is_pv_domain(nextd) && nextd->arch.pv_domain.masks) + ? nextd->arch.pv_domain.masks : &cpumask_defaults; + #define LAZY(msr, field) \ ({ \ if ( msr && (these_masks->field != masks->field) ) \ diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index e9f1af1..84fd74b 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -578,6 +578,12 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags, goto fail; clear_page(d->arch.pv_domain.gdt_ldt_l1tab); + d->arch.pv_domain.masks = xmalloc(struct cpumasks); + if ( !d->arch.pv_domain.masks ) + goto fail; + memcpy(d->arch.pv_domain.masks, &cpumask_defaults, + sizeof(*d->arch.pv_domain.masks)); + rc = create_perdomain_mapping(d, GDT_LDT_VIRT_START, GDT_LDT_MBYTES << (20 - PAGE_SHIFT), NULL, NULL); @@ -690,7 +696,10 @@ void arch_domain_destroy(struct domain *d) free_perdomain_mappings(d); if ( is_pv_domain(d) ) + { free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab); + xfree(d->arch.pv_domain.masks); + } free_xenheap_page(d->shared_info); cleanup_domain_irq_mapping(d); diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index 0fce09e..dbed9ed 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -249,6 +249,8 @@ struct pv_domain /* map_domain_page() mapping cache. */ struct mapcache_domain mapcache; + + struct cpumasks *masks; }; struct monitor_write_data { -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel