For A64, any input to an indirect branch can cause this. For A32, many indirect branch paths force the branch to be aligned, but BXWritePC does not. This includes the BX instruction but also other interworking changes to PC. Prior to v8, this case is UNDEFINED. With v8, this is CONSTRAINED UNDEFINED and may either raise an exception or force align the PC.
We choose to raise an exception because we have the infrastructure, it makes the generated code for gen_bx simpler, and it has the possibility of catching more guest bugs. Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- target/arm/syndrome.h | 5 ++++ target/arm/translate-a64.c | 12 +++++++++ target/arm/translate.c | 50 +++++++++++++++++++++++++++----------- 3 files changed, 53 insertions(+), 14 deletions(-) diff --git a/target/arm/syndrome.h b/target/arm/syndrome.h index c590a109da..569b0c1115 100644 --- a/target/arm/syndrome.h +++ b/target/arm/syndrome.h @@ -275,4 +275,9 @@ static inline uint32_t syn_illegalstate(void) return EC_ILLEGALSTATE << ARM_EL_EC_SHIFT; } +static inline uint32_t syn_pcalignment(void) +{ + return EC_PCALIGNMENT << ARM_EL_EC_SHIFT; +} + #endif /* TARGET_ARM_SYNDROME_H */ diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index 333bc836b2..c394bddac6 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -14754,6 +14754,7 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) CPUARMState *env = cpu->env_ptr; uint32_t insn; + /* Singlestep exceptions have the highest priority. */ if (s->ss_active && !s->pstate_ss) { /* Singlestep state is Active-pending. * If we're in this state at the start of a TB then either @@ -14771,6 +14772,17 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) return; } + if (s->base.pc_next & 3) { + /* + * PC alignment fault. This has priority over the instruction abort + * that we would receive from a translation fault via arm_ldl_code. + */ + gen_exception_insn(s, s->base.pc_next, EXCP_UDEF, + syn_pcalignment(), default_exception_el(s)); + s->base.pc_next = QEMU_ALIGN_UP(s->base.pc_next, 4); + return; + } + s->pc_curr = s->base.pc_next; insn = arm_ldl_code(env, s->base.pc_next, s->sctlr_b); s->insn = insn; diff --git a/target/arm/translate.c b/target/arm/translate.c index 5e0fc8a0a0..00ddd4879c 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -9452,19 +9452,8 @@ static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) dc->insn_start = tcg_last_op(); } -static bool arm_pre_translate_insn(DisasContext *dc) +static bool arm_check_ss_active(DisasContext *dc) { -#ifdef CONFIG_USER_ONLY - /* Intercept jump to the magic kernel page. */ - if (dc->base.pc_next >= 0xffff0000) { - /* We always get here via a jump, so know we are not in a - conditional execution block. */ - gen_exception_internal(EXCP_KERNEL_TRAP); - dc->base.is_jmp = DISAS_NORETURN; - return true; - } -#endif - if (dc->ss_active && !dc->pstate_ss) { /* Singlestep state is Active-pending. * If we're in this state at the start of a TB then either @@ -9485,6 +9474,21 @@ static bool arm_pre_translate_insn(DisasContext *dc) return false; } +static bool arm_check_kernelpage(DisasContext *dc) +{ +#ifdef CONFIG_USER_ONLY + /* Intercept jump to the magic kernel page. */ + if (dc->base.pc_next >= 0xffff0000) { + /* We always get here via a jump, so know we are not in a + conditional execution block. */ + gen_exception_internal(EXCP_KERNEL_TRAP); + dc->base.is_jmp = DISAS_NORETURN; + return true; + } +#endif + return false; +} + static void arm_post_translate_insn(DisasContext *dc) { if (dc->condjmp && !dc->base.is_jmp) { @@ -9500,7 +9504,25 @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) CPUARMState *env = cpu->env_ptr; unsigned int insn; - if (arm_pre_translate_insn(dc)) { + /* Singlestep exceptions have the highest priority. */ + if (arm_check_ss_active(dc)) { + dc->base.pc_next += 4; + return; + } + + if (dc->base.pc_next & 3) { + /* + * PC alignment fault. This has priority over the instruction abort + * that we would receive from a translation fault via arm_ldl_code + * (or the execution of the kernelpage entrypoint). + */ + gen_exception_insn(dc, dc->base.pc_next, EXCP_UDEF, + syn_pcalignment(), default_exception_el(dc)); + dc->base.pc_next = QEMU_ALIGN_UP(dc->base.pc_next, 4); + return; + } + + if (arm_check_kernelpage(dc)) { dc->base.pc_next += 4; return; } @@ -9570,7 +9592,7 @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) uint32_t insn; bool is_16bit; - if (arm_pre_translate_insn(dc)) { + if (arm_check_ss_active(dc) || arm_check_kernelpage(dc)) { dc->base.pc_next += 2; return; } -- 2.25.1