The rlwnm specification includes the ROTL32 operation, which is defined to be a left rotation of two copies of the least significant 32 bits of the source GPR.
The current implementation is incorrect on 64-bit implementations in that it rotates a single copy of the least significant 32 bits, padding with zeroes in the most significant bits. Fix the code to properly implement this ROTL32 operation. Signed-off-by: Tom Musta <tommu...@gmail.com> --- target-ppc/translate.c | 18 +++++++++--------- 1 files changed, 9 insertions(+), 9 deletions(-) diff --git a/target-ppc/translate.c b/target-ppc/translate.c index a27d063..48f13a9 100644 --- a/target-ppc/translate.c +++ b/target-ppc/translate.c @@ -1695,7 +1695,7 @@ static void gen_rlwnm(DisasContext *ctx) uint32_t mb, me; TCGv t0; #if defined(TARGET_PPC64) - TCGv_i32 t1, t2; + TCGv t1; #endif mb = MB(ctx->opcode); @@ -1703,14 +1703,11 @@ static void gen_rlwnm(DisasContext *ctx) t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1f); #if defined(TARGET_PPC64) - t1 = tcg_temp_new_i32(); - t2 = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(t1, cpu_gpr[rS(ctx->opcode)]); - tcg_gen_trunc_i64_i32(t2, t0); - tcg_gen_rotl_i32(t1, t1, t2); - tcg_gen_extu_i32_i64(t0, t1); - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); + t1 = tcg_temp_new_i64(); + tcg_gen_deposit_i64(t1, cpu_gpr[rS(ctx->opcode)], + cpu_gpr[rS(ctx->opcode)], 32, 32); + tcg_gen_rotl_i64(t0, t1, t0); + tcg_temp_free_i64(t1); #else tcg_gen_rotl_i32(t0, cpu_gpr[rS(ctx->opcode)], t0); #endif @@ -1721,6 +1718,9 @@ static void gen_rlwnm(DisasContext *ctx) #endif tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me)); } else { +#if defined(TARGET_PPC64) + tcg_gen_andi_tl(t0, t0, MASK(32, 63)); +#endif tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0); } tcg_temp_free(t0); -- 1.7.1