First, if XSAVE is available in hardware but not visible to the guest, the dynamic leaves shouldn't be filled in.
Second, the comment concerning XSS state is wrong. VT-x doesn't manage host/guest state automatically, but there is provision for "host only" bits to be set, so the implications are still accurate. Introduce xstate_compressed_size() to mirror the uncompressed one. Cross check it at boot. Signed-off-by: Andrew Cooper <andrew.coop...@citrix.com> Reviewed-by: Jan Beulich <jbeul...@suse.com> --- CC: Jan Beulich <jbeul...@suse.com> CC: Roger Pau Monné <roger....@citrix.com> CC: Oleksii Kurochko <oleksii.kuroc...@gmail.com> v3: * Adjust commit message about !XSAVE guests * Rebase over boot time cross check * Use raw policy v4: * Drop the TODO comment. The CPUID path is always liable to pass 0 here. * ASSERT() a nonzero c->size like we do in the uncompressed helper. --- xen/arch/x86/cpuid.c | 24 +++++++-------------- xen/arch/x86/include/asm/xstate.h | 1 + xen/arch/x86/xstate.c | 36 +++++++++++++++++++++++++++++++ 3 files changed, 45 insertions(+), 16 deletions(-) diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c index 7a38e032146a..a822e80c7ea7 100644 --- a/xen/arch/x86/cpuid.c +++ b/xen/arch/x86/cpuid.c @@ -330,23 +330,15 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf, case XSTATE_CPUID: switch ( subleaf ) { - case 1: - if ( !p->xstate.xsavec && !p->xstate.xsaves ) - break; - - /* - * TODO: Figure out what to do for XSS state. VT-x manages host - * vs guest MSR_XSS automatically, so as soon as we start - * supporting any XSS states, the wrong XSS will be in context. - */ - BUILD_BUG_ON(XSTATE_XSAVES_ONLY != 0); - fallthrough; case 0: - /* - * Read CPUID[0xD,0/1].EBX from hardware. They vary with enabled - * XSTATE, and appropriate XCR0|XSS are in context. - */ - res->b = cpuid_count_ebx(leaf, subleaf); + if ( p->basic.xsave ) + res->b = xstate_uncompressed_size(v->arch.xcr0); + break; + + case 1: + if ( p->xstate.xsavec ) + res->b = xstate_compressed_size(v->arch.xcr0 | + v->arch.msrs->xss.raw); break; } break; diff --git a/xen/arch/x86/include/asm/xstate.h b/xen/arch/x86/include/asm/xstate.h index bfb66dd766b6..da1d89d2f416 100644 --- a/xen/arch/x86/include/asm/xstate.h +++ b/xen/arch/x86/include/asm/xstate.h @@ -109,6 +109,7 @@ void xstate_free_save_area(struct vcpu *v); int xstate_alloc_save_area(struct vcpu *v); void xstate_init(struct cpuinfo_x86 *c); unsigned int xstate_uncompressed_size(uint64_t xcr0); +unsigned int xstate_compressed_size(uint64_t xstates); static inline uint64_t xgetbv(unsigned int index) { diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c index 8edc4792a8fd..31bf2dc95f57 100644 --- a/xen/arch/x86/xstate.c +++ b/xen/arch/x86/xstate.c @@ -619,6 +619,36 @@ unsigned int xstate_uncompressed_size(uint64_t xcr0) return size; } +unsigned int xstate_compressed_size(uint64_t xstates) +{ + unsigned int i, size = XSTATE_AREA_MIN_SIZE; + + if ( xstates == 0 ) + return 0; + + if ( xstates <= (X86_XCR0_SSE | X86_XCR0_FP) ) + return size; + + /* + * For the compressed size, every non-legacy component matters. Some + * componenets require aligning to 64 first. + */ + xstates &= ~(X86_XCR0_SSE | X86_XCR0_FP); + for_each_set_bit ( i, &xstates, 63 ) + { + const struct xstate_component *c = &raw_cpu_policy.xstate.comp[i]; + + ASSERT(c->size); + + if ( c->align ) + size = ROUNDUP(size, 64); + + size += c->size; + } + + return size; +} + struct xcheck_state { uint64_t states; uint32_t uncomp_size; @@ -681,6 +711,12 @@ static void __init check_new_xstate(struct xcheck_state *s, uint64_t new) s->states, &new, hw_size, s->comp_size); s->comp_size = hw_size; + + xen_size = xstate_compressed_size(s->states); + + if ( xen_size != hw_size ) + panic("XSTATE 0x%016"PRIx64", compressed hw size %#x != xen size %#x\n", + s->states, hw_size, xen_size); } else if ( hw_size ) /* Compressed size reported, but no XSAVEC ? */ { -- 2.39.2