I.e. gen_push_v, gen_pop_T0, gen_stack_A0. More centralization of handling of segment bases.
Signed-off-by: Richard Henderson <r...@twiddle.net> --- target-i386/translate.c | 49 +++++++++++++++---------------------------------- 1 file changed, 15 insertions(+), 34 deletions(-) diff --git a/target-i386/translate.c b/target-i386/translate.c index ec2716d..911f6e8 100644 --- a/target-i386/translate.c +++ b/target-i386/translate.c @@ -480,10 +480,9 @@ static inline void gen_jmp_im(target_ulong pc) /* Compute SEG:REG into A0. SEG is selected from the override segment (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to indicate no override. */ -static void gen_lea_v_seg(DisasContext *s, TCGv a0, int def_seg, int ovr_seg) +static void gen_lea_v_seg(DisasContext *s, TCGMemOp aflag, TCGv a0, + int def_seg, int ovr_seg) { - TCGMemOp aflag = s->aflag; - switch (aflag) { #ifdef TARGET_X86_64 case MO_64: @@ -540,12 +539,12 @@ static void gen_lea_v_seg(DisasContext *s, TCGv a0, int def_seg, int ovr_seg) static inline void gen_string_movl_A0_ESI(DisasContext *s) { - gen_lea_v_seg(s, cpu_regs[R_ESI], R_DS, s->override); + gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override); } static inline void gen_string_movl_A0_EDI(DisasContext *s) { - gen_lea_v_seg(s, cpu_regs[R_EDI], R_ES, -1); + gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1); } static inline void gen_op_movl_T0_Dshift(TCGMemOp ot) @@ -2005,7 +2004,7 @@ static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm) tcg_abort(); } - gen_lea_v_seg(s, sum, def_seg, ovr_seg); + gen_lea_v_seg(s, s->aflag, sum, def_seg, ovr_seg); } static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm) @@ -2068,7 +2067,7 @@ static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm) /* used for LEA and MOV AX, mem */ static void gen_add_A0_ds_seg(DisasContext *s) { - gen_lea_v_seg(s, cpu_A0, R_DS, s->override); + gen_lea_v_seg(s, s->aflag, cpu_A0, R_DS, s->override); } /* generate modrm memory load or store of 'reg'. TMP0 is used if reg == @@ -2305,21 +2304,13 @@ static void gen_push_v(DisasContext *s, TCGv val) if (CODE64(s)) { a_ot = MO_64; - } else if (s->ss32) { - a_ot = MO_32; + } else { + a_ot = s->ss32 ? MO_32 : MO_16; if (s->addseg) { new_esp = cpu_tmp4; tcg_gen_mov_tl(new_esp, cpu_A0); - gen_op_addl_A0_seg(s, R_SS); - } else { - tcg_gen_ext32u_tl(cpu_A0, cpu_A0); } - } else { - a_ot = MO_16; - new_esp = cpu_tmp4; - tcg_gen_ext16u_tl(cpu_A0, cpu_A0); - tcg_gen_mov_tl(new_esp, cpu_A0); - gen_op_addl_A0_seg(s, R_SS); + gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1); } gen_op_st_v(s, d_ot, val, cpu_A0); @@ -2330,37 +2321,27 @@ static void gen_push_v(DisasContext *s, TCGv val) static TCGMemOp gen_pop_T0(DisasContext *s) { TCGMemOp d_ot = mo_pushpop(s, s->dflag); - TCGv addr = cpu_A0; + TCGv addr; if (CODE64(s)) { addr = cpu_regs[R_ESP]; - } else if (!s->ss32) { - tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_ESP]); - gen_op_addl_A0_seg(s, R_SS); - } else if (s->addseg) { - tcg_gen_mov_tl(cpu_A0, cpu_regs[R_ESP]); - gen_op_addl_A0_seg(s, R_SS); } else { - tcg_gen_ext32u_tl(cpu_A0, cpu_regs[R_ESP]); + gen_lea_v_seg(s, s->ss32 ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1); + addr = cpu_A0; } gen_op_ld_v(s, d_ot, cpu_T[0], addr); return d_ot; } -static void gen_pop_update(DisasContext *s, TCGMemOp ot) +static inline void gen_pop_update(DisasContext *s, TCGMemOp ot) { gen_stack_update(s, 1 << ot); } -static void gen_stack_A0(DisasContext *s) +static inline void gen_stack_A0(DisasContext *s) { - gen_op_movl_A0_reg(R_ESP); - if (!s->ss32) - tcg_gen_ext16u_tl(cpu_A0, cpu_A0); - tcg_gen_mov_tl(cpu_T[1], cpu_A0); - if (s->addseg) - gen_op_addl_A0_seg(s, R_SS); + gen_lea_v_seg(s, s->ss32 ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1); } /* NOTE: wrap around in 16 bit not fully handled */ -- 1.8.3.1