On 05/22/2014 04:11 AM, Tom Musta wrote: > On 5/21/2014 1:20 AM, Alexey Kardashevskiy wrote: >> This adds migration support for registers saved before transaction started. >> >> Signed-off-by: Alexey Kardashevskiy <a...@ozlabs.ru> >> --- >> target-ppc/cpu.h | 19 +++++++++++++++++++ >> target-ppc/kvm.c | 38 ++++++++++++++++++++++++++++++++++++++ >> target-ppc/machine.c | 35 +++++++++++++++++++++++++++++++++++ >> 3 files changed, 92 insertions(+) >> >> diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h >> index 72ed763..52baf20 100644 >> --- a/target-ppc/cpu.h >> +++ b/target-ppc/cpu.h >> @@ -426,6 +426,9 @@ struct ppc_slb_t { >> #define MSR_TAG 62 /* Tag-active mode (POWERx ?) >> */ >> #define MSR_ISF 61 /* Sixty-four-bit interrupt mode on 630 >> */ >> #define MSR_SHV 60 /* hypervisor state >> hflags */ >> +#define MSR_TS0 34 /* Transactional state, 2 bits (Book3s) >> */ >> +#define MSR_TS1 33 >> +#define MSR_TM 32 /* Transactional Memory Available (Book3s) >> */ >> #define MSR_CM 31 /* Computation mode for BookE >> hflags */ >> #define MSR_ICM 30 /* Interrupt computation mode for BookE >> */ >> #define MSR_THV 29 /* hypervisor state for 32 bits PowerPC >> hflags */ >> @@ -502,6 +505,8 @@ struct ppc_slb_t { >> #define msr_pmm ((env->msr >> MSR_PMM) & 1) >> #define msr_ri ((env->msr >> MSR_RI) & 1) >> #define msr_le ((env->msr >> MSR_LE) & 1) >> +#define msr_ts ((env->msr >> MSR_TS1) & 3) >> + >> /* Hypervisor bit is more specific */ >> #if defined(TARGET_PPC64) >> #define MSR_HVB (1ULL << MSR_SHV) >> @@ -1081,6 +1086,20 @@ struct CPUPPCState { >> */ >> uint8_t fit_period[4]; >> uint8_t wdt_period[4]; >> + >> + /* Transactional memory state */ >> + target_ulong tm_gpr[32]; >> + ppc_avr_t tm_vsr[64]; >> + uint64_t tm_cr; >> + uint64_t tm_lr; >> + uint64_t tm_ctr; >> + uint64_t tm_fpscr; >> + uint64_t tm_amr; >> + uint64_t tm_ppr; >> + uint64_t tm_vrsave; >> + uint32_t tm_vscr; >> + uint64_t tm_dscr; >> + uint64_t tm_tar; >> }; >> >> #define SET_FIT_PERIOD(a_, b_, c_, d_) \ >> diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c >> index ca31027..6f10654 100644 >> --- a/target-ppc/kvm.c >> +++ b/target-ppc/kvm.c >> @@ -861,6 +861,25 @@ int kvm_arch_put_registers(CPUState *cs, int level) >> } >> >> #ifdef TARGET_PPC64 >> + if (msr_ts) { >> + for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) { >> + kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]); >> + } >> + for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) { >> + kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]); >> + } >> + kvm_set_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr); >> + kvm_set_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr); >> + kvm_set_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr); >> + kvm_set_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr); >> + kvm_set_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr); >> + kvm_set_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr); >> + kvm_set_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave); >> + kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr); >> + kvm_set_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr); >> + kvm_set_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar); >> + } >> + >> if (cap_papr) { >> if (kvm_put_vpa(cs) < 0) { >> DPRINTF("Warning: Unable to set VPA information to KVM\n"); >> @@ -1087,6 +1106,25 @@ int kvm_arch_get_registers(CPUState *cs) >> } >> >> #ifdef TARGET_PPC64 >> + if (msr_ts) { >> + for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) { >> + kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]); >> + } >> + for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) { >> + kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]); >> + } >> + kvm_get_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr); >> + kvm_get_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr); >> + kvm_get_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr); >> + kvm_get_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr); >> + kvm_get_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr); >> + kvm_get_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr); >> + kvm_get_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave); >> + kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr); >> + kvm_get_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr); >> + kvm_get_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar); >> + } >> + >> if (cap_papr) { >> if (kvm_get_vpa(cs) < 0) { >> DPRINTF("Warning: Unable to get VPA information from >> KVM\n"); >> diff --git a/target-ppc/machine.c b/target-ppc/machine.c >> index df7cfc5..9c0e697 100644 >> --- a/target-ppc/machine.c >> +++ b/target-ppc/machine.c >> @@ -252,6 +252,38 @@ static const VMStateDescription vmstate_vsx = { >> }, >> }; >> >> +#ifdef TARGET_PPC64 >> +/* Transactional memory state */ >> +static bool tm_needed(void *opaque) >> +{ >> + PowerPCCPU *cpu = opaque; >> + CPUPPCState *env = &cpu->env; >> + return msr_ts; >> +} >> + >> +static const VMStateDescription vmstate_tm = { >> + .name = "cpu/tm", >> + .version_id = 1, >> + .minimum_version_id = 1, >> + .minimum_version_id_old = 1, >> + .fields = (VMStateField []) { >> + VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32), >> + VMSTATE_AVR_ARRAY(env.tm_vsr, PowerPCCPU, 64), >> + VMSTATE_UINT64(env.tm_cr, PowerPCCPU), >> + VMSTATE_UINT64(env.tm_lr, PowerPCCPU), >> + VMSTATE_UINT64(env.tm_ctr, PowerPCCPU), >> + VMSTATE_UINT64(env.tm_fpscr, PowerPCCPU), >> + VMSTATE_UINT64(env.tm_amr, PowerPCCPU), >> + VMSTATE_UINT64(env.tm_ppr, PowerPCCPU), >> + VMSTATE_UINT64(env.tm_vrsave, PowerPCCPU), >> + VMSTATE_UINT32(env.tm_vscr, PowerPCCPU), >> + VMSTATE_UINT64(env.tm_dscr, PowerPCCPU), >> + VMSTATE_UINT64(env.tm_tar, PowerPCCPU), >> + VMSTATE_END_OF_LIST() >> + }, >> +}; >> +#endif >> + >> static bool sr_needed(void *opaque) >> { >> #ifdef TARGET_PPC64 >> @@ -522,6 +554,9 @@ const VMStateDescription vmstate_ppc_cpu = { >> .needed = sr_needed, >> } , { >> #ifdef TARGET_PPC64 >> + .vmsd = &vmstate_tm, >> + .needed = tm_needed, >> + } , { >> .vmsd = &vmstate_slb, >> .needed = slb_needed, >> } , { >> > >
> TM is not limited in the ISA to 64-bit implementations. Why restrict > this to TARGET_PPC64? TS/TM bits in MSR are in top 32bits which are unavailable for 32bit machine, and we are emulating a machine here, this is pretty much why. -- Alexey