Migration from KVM to TCG is broken anyway. The changing offsets do break
migration of a KVM guest from Intel to AMD or vice versa, because of the
difference in CPUID. That however is not changed by this patch.

Paolo

Il mer 7 lug 2021, 03:09 Richard Henderson <richard.hender...@linaro.org>
ha scritto:

> On 7/5/21 3:46 AM, David Edmondson wrote:
> > Given that TCG is now the only consumer of X86XSaveArea, move the
> > structure definition and associated offset declarations and checks to a
> > TCG specific header.
> >
> > Signed-off-by: David Edmondson <david.edmond...@oracle.com>
> > ---
> >   target/i386/cpu.h            | 57 ------------------------------------
> >   target/i386/tcg/fpu_helper.c |  1 +
> >   target/i386/tcg/tcg-cpu.h    | 57 ++++++++++++++++++++++++++++++++++++
> >   3 files changed, 58 insertions(+), 57 deletions(-)
> >
> > diff --git a/target/i386/cpu.h b/target/i386/cpu.h
> > index 96b672f8bd..0f7ddbfeae 100644
> > --- a/target/i386/cpu.h
> > +++ b/target/i386/cpu.h
> > @@ -1305,48 +1305,6 @@ typedef struct XSavePKRU {
> >       uint32_t padding;
> >   } XSavePKRU;
> >
> > -#define XSAVE_FCW_FSW_OFFSET    0x000
> > -#define XSAVE_FTW_FOP_OFFSET    0x004
> > -#define XSAVE_CWD_RIP_OFFSET    0x008
> > -#define XSAVE_CWD_RDP_OFFSET    0x010
> > -#define XSAVE_MXCSR_OFFSET      0x018
> > -#define XSAVE_ST_SPACE_OFFSET   0x020
> > -#define XSAVE_XMM_SPACE_OFFSET  0x0a0
> > -#define XSAVE_XSTATE_BV_OFFSET  0x200
> > -#define XSAVE_AVX_OFFSET        0x240
> > -#define XSAVE_BNDREG_OFFSET     0x3c0
> > -#define XSAVE_BNDCSR_OFFSET     0x400
> > -#define XSAVE_OPMASK_OFFSET     0x440
> > -#define XSAVE_ZMM_HI256_OFFSET  0x480
> > -#define XSAVE_HI16_ZMM_OFFSET   0x680
> > -#define XSAVE_PKRU_OFFSET       0xa80
> > -
> > -typedef struct X86XSaveArea {
> > -    X86LegacyXSaveArea legacy;
> > -    X86XSaveHeader header;
> > -
> > -    /* Extended save areas: */
> > -
> > -    /* AVX State: */
> > -    XSaveAVX avx_state;
> > -
> > -    /* Ensure that XSaveBNDREG is properly aligned. */
> > -    uint8_t padding[XSAVE_BNDREG_OFFSET
> > -                    - sizeof(X86LegacyXSaveArea)
> > -                    - sizeof(X86XSaveHeader)
> > -                    - sizeof(XSaveAVX)];
> > -
> > -    /* MPX State: */
> > -    XSaveBNDREG bndreg_state;
> > -    XSaveBNDCSR bndcsr_state;
> > -    /* AVX-512 State: */
> > -    XSaveOpmask opmask_state;
> > -    XSaveZMM_Hi256 zmm_hi256_state;
> > -    XSaveHi16_ZMM hi16_zmm_state;
> > -    /* PKRU State: */
> > -    XSavePKRU pkru_state;
> > -} X86XSaveArea;
> > -
> >   QEMU_BUILD_BUG_ON(sizeof(XSaveAVX) != 0x100);
> >   QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG) != 0x40);
> >   QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR) != 0x40);
> > @@ -1355,21 +1313,6 @@ QEMU_BUILD_BUG_ON(sizeof(XSaveZMM_Hi256) !=
> 0x200);
> >   QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM) != 0x400);
> >   QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8);
> >
> > -QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.fcw) !=
> XSAVE_FCW_FSW_OFFSET);
> > -QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.ftw) !=
> XSAVE_FTW_FOP_OFFSET);
> > -QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.fpip) !=
> XSAVE_CWD_RIP_OFFSET);
> > -QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.fpdp) !=
> XSAVE_CWD_RDP_OFFSET);
> > -QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.mxcsr) !=
> XSAVE_MXCSR_OFFSET);
> > -QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.fpregs) !=
> XSAVE_ST_SPACE_OFFSET);
> > -QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.xmm_regs) !=
> XSAVE_XMM_SPACE_OFFSET);
> > -QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, avx_state) !=
> XSAVE_AVX_OFFSET);
> > -QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndreg_state) !=
> XSAVE_BNDREG_OFFSET);
> > -QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndcsr_state) !=
> XSAVE_BNDCSR_OFFSET);
> > -QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, opmask_state) !=
> XSAVE_OPMASK_OFFSET);
> > -QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, zmm_hi256_state) !=
> XSAVE_ZMM_HI256_OFFSET);
> > -QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, hi16_zmm_state) !=
> XSAVE_HI16_ZMM_OFFSET);
> > -QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, pkru_state) !=
> XSAVE_PKRU_OFFSET);
> > -
> >   typedef struct ExtSaveArea {
> >       uint32_t feature, bits;
> >       uint32_t offset, size;
> > diff --git a/target/i386/tcg/fpu_helper.c b/target/i386/tcg/fpu_helper.c
> > index 4e11965067..74bbe94b80 100644
> > --- a/target/i386/tcg/fpu_helper.c
> > +++ b/target/i386/tcg/fpu_helper.c
> > @@ -20,6 +20,7 @@
> >   #include "qemu/osdep.h"
> >   #include <math.h>
> >   #include "cpu.h"
> > +#include "tcg-cpu.h"
> >   #include "exec/helper-proto.h"
> >   #include "fpu/softfloat.h"
> >   #include "fpu/softfloat-macros.h"
> > diff --git a/target/i386/tcg/tcg-cpu.h b/target/i386/tcg/tcg-cpu.h
> > index 36bd300af0..53a8494455 100644
> > --- a/target/i386/tcg/tcg-cpu.h
> > +++ b/target/i386/tcg/tcg-cpu.h
> > @@ -19,6 +19,63 @@
> >   #ifndef TCG_CPU_H
> >   #define TCG_CPU_H
> >
> > +#define XSAVE_FCW_FSW_OFFSET    0x000
> > +#define XSAVE_FTW_FOP_OFFSET    0x004
> > +#define XSAVE_CWD_RIP_OFFSET    0x008
> > +#define XSAVE_CWD_RDP_OFFSET    0x010
> > +#define XSAVE_MXCSR_OFFSET      0x018
> > +#define XSAVE_ST_SPACE_OFFSET   0x020
> > +#define XSAVE_XMM_SPACE_OFFSET  0x0a0
> > +#define XSAVE_XSTATE_BV_OFFSET  0x200
> > +#define XSAVE_AVX_OFFSET        0x240
> > +#define XSAVE_BNDREG_OFFSET     0x3c0
> > +#define XSAVE_BNDCSR_OFFSET     0x400
> > +#define XSAVE_OPMASK_OFFSET     0x440
> > +#define XSAVE_ZMM_HI256_OFFSET  0x480
> > +#define XSAVE_HI16_ZMM_OFFSET   0x680
> > +#define XSAVE_PKRU_OFFSET       0xa80
> > +
> > +typedef struct X86XSaveArea {
> > +    X86LegacyXSaveArea legacy;
> > +    X86XSaveHeader header;
> > +
> > +    /* Extended save areas: */
> > +
> > +    /* AVX State: */
> > +    XSaveAVX avx_state;
> > +
> > +    /* Ensure that XSaveBNDREG is properly aligned. */
> > +    uint8_t padding[XSAVE_BNDREG_OFFSET
> > +                    - sizeof(X86LegacyXSaveArea)
> > +                    - sizeof(X86XSaveHeader)
> > +                    - sizeof(XSaveAVX)];
> > +
> > +    /* MPX State: */
> > +    XSaveBNDREG bndreg_state;
> > +    XSaveBNDCSR bndcsr_state;
> > +    /* AVX-512 State: */
> > +    XSaveOpmask opmask_state;
> > +    XSaveZMM_Hi256 zmm_hi256_state;
> > +    XSaveHi16_ZMM hi16_zmm_state;
> > +    /* PKRU State: */
> > +    XSavePKRU pkru_state;
> > +} X86XSaveArea;
> > +
> > +QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.fcw) !=
> XSAVE_FCW_FSW_OFFSET);
> > +QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.ftw) !=
> XSAVE_FTW_FOP_OFFSET);
> > +QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.fpip) !=
> XSAVE_CWD_RIP_OFFSET);
> > +QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.fpdp) !=
> XSAVE_CWD_RDP_OFFSET);
> > +QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.mxcsr) !=
> XSAVE_MXCSR_OFFSET);
> > +QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.fpregs) !=
> XSAVE_ST_SPACE_OFFSET);
> > +QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.xmm_regs) !=
> XSAVE_XMM_SPACE_OFFSET);
> > +QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, avx_state) !=
> XSAVE_AVX_OFFSET);
> > +QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndreg_state) !=
> XSAVE_BNDREG_OFFSET);
> > +QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndcsr_state) !=
> XSAVE_BNDCSR_OFFSET);
> > +QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, opmask_state) !=
> XSAVE_OPMASK_OFFSET);
> > +QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, zmm_hi256_state) !=
> XSAVE_ZMM_HI256_OFFSET);
> > +QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, hi16_zmm_state) !=
> XSAVE_HI16_ZMM_OFFSET);
> > +QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, pkru_state) !=
> XSAVE_PKRU_OFFSET);
>
> My only quibble is that these offsets are otherwise unused.  This just
> becomes validation
> of compiler layout.
>
> I presume that XSAVE_BNDREG_OFFSET is not merely
> ROUND_UP(offsetof(avx_state) +
> sizeof(avx_state), some_pow2)?
>
> Do these offsets need to be migrated?  Otherwise, how can one start a vm
> with kvm and then
> migrate to tcg?  I presume the offsets above are constant for a given cpu,
> and that
> whatever cpu provides different offsets is not supported by tcg?  Given
> the lack of avx,
> that's trivial these days...
>
>
> r~
>
>

Reply via email to