This is a minor improvement for the core_list_find routine in coremark.
Basically for riscv, and likely other targets, we can end up with an unconditional jump to a return statement. This is a result of compensation code created by bb-reorder, and no jump optimization pass runs after bb-reorder to clean this stuff up.
This patch utilizes preexisting code to identify suitable branch targets as well as preexisting code to emit a suitable return, so it's pretty simple. Note that when we arrange to do this optimization, the original return block may become unreachable. So we conditionally call delete_unreachable_blocks to fix that up.
This triggers ~160 times during an x86_64 bootstrap. Naturally it bootstraps and regression tests on x86_64.
I've also bootstrapped this on riscv64, regression testing with qemu shows some regressions, but AFAICT they're actually qemu bugs with signal handling/delivery -- qemu user mode emulation is not consistently calling user defined signal handlers. Given the same binary, sometimes they'll get called and the test passes, other times the handler isn't called and the test (of course) fails. I'll probably spend some time to try and chase this down for the sake of making testing easier.
OK for the trunk? Jeff
commit f9a9119fa47f94348305a883fd88c23647fb1b07 Author: Jeff Law <jeffreya...@gmail.com> Date: Sun Sep 25 12:23:59 2022 -0400 gcc/ * cfgcleanup.cc (bb_is_just_return): No longer static. * cfgcleanup.h (bb_is_just_return): Add prototype. * cfgrtl.cc (fixup_reorder_chain): Do not create an unconditional jump to a return block. Conditionally remove unreachable blocks. gcc/testsuite/gcc.target/riscv/ * ret-1.c: New test. diff --git a/gcc/cfgcleanup.cc b/gcc/cfgcleanup.cc index a8b0139bb4d..a363e0b4da3 100644 --- a/gcc/cfgcleanup.cc +++ b/gcc/cfgcleanup.cc @@ -2599,7 +2599,7 @@ trivially_empty_bb_p (basic_block bb) return value. Fill in *RET and *USE with the return and use insns if any found, otherwise NULL. All CLOBBERs are ignored. */ -static bool +bool bb_is_just_return (basic_block bb, rtx_insn **ret, rtx_insn **use) { *ret = *use = NULL; diff --git a/gcc/cfgcleanup.h b/gcc/cfgcleanup.h index a6d882f98a4..f1021ca835f 100644 --- a/gcc/cfgcleanup.h +++ b/gcc/cfgcleanup.h @@ -30,5 +30,6 @@ extern int flow_find_head_matching_sequence (basic_block, basic_block, extern bool delete_unreachable_blocks (void); extern void delete_dead_jumptables (void); extern bool cleanup_cfg (int); +extern bool bb_is_just_return (basic_block, rtx_insn **, rtx_insn **); #endif /* GCC_CFGCLEANUP_H */ diff --git a/gcc/cfgrtl.cc b/gcc/cfgrtl.cc index a05c338a4c8..90cd6ee56a7 100644 --- a/gcc/cfgrtl.cc +++ b/gcc/cfgrtl.cc @@ -3901,6 +3901,7 @@ fixup_reorder_chain (void) /* Now add jumps and labels as needed to match the blocks new outgoing edges. */ + bool remove_unreachable_blocks = false; for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb ; bb = (basic_block) bb->aux) { @@ -4043,10 +4044,30 @@ fixup_reorder_chain (void) continue; } + /* If E_FALL->dest is just a return block, then we can emit a + return rather than a jump to the return block. */ + rtx_insn *ret, *use; + basic_block dest; + if (bb_is_just_return (e_fall->dest, &ret, &use) + && (PATTERN (ret) == simple_return_rtx || PATTERN (ret) == ret_rtx)) + { + ret_label = PATTERN (ret); + dest = EXIT_BLOCK_PTR_FOR_FN (cfun); + + /* E_FALL->dest might become unreachable as a result of + replacing the jump with a return. So arrange to remove + unreachable blocks. */ + remove_unreachable_blocks = true; + } + else + { + dest = e_fall->dest; + } + /* We got here if we need to add a new jump insn. Note force_nonfallthru can delete E_FALL and thus we have to save E_FALL->src prior to the call to force_nonfallthru. */ - nb = force_nonfallthru_and_redirect (e_fall, e_fall->dest, ret_label); + nb = force_nonfallthru_and_redirect (e_fall, dest, ret_label); if (nb) { nb->aux = bb->aux; @@ -4134,6 +4155,12 @@ fixup_reorder_chain (void) ei_next (&ei2); } } + + /* Replacing a jump with a return may have exposed an unreachable + block. Conditionally remove them if such transformations were + made. */ + if (remove_unreachable_blocks) + delete_unreachable_blocks (); } /* Perform sanity checks on the insn chain. diff --git a/gcc/testsuite/gcc.target/riscv/ret-1.c b/gcc/testsuite/gcc.target/riscv/ret-1.c new file mode 100644 index 00000000000..28133aa4226 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/ret-1.c @@ -0,0 +1,41 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -dp" } */ +/* This was extracted from coremark. */ + + +typedef signed short ee_s16; +typedef struct list_data_s +{ + ee_s16 data16; + ee_s16 idx; +} list_data; + +typedef struct list_head_s +{ + struct list_head_s *next; + struct list_data_s *info; +} list_head; + + +list_head * +core_list_find(list_head *list, list_data *info) +{ + if (info->idx >= 0) + { + while (list && (list->info->idx != info->idx)) + list = list->next; + return list; + } + else + { + while (list && ((list->info->data16 & 0xff) != info->data16)) + list = list->next; + return list; + } +} + +/* There is only one legitimate unconditional jump, so test for that, + which will catch the case where bb-reorder leaves a jump to a ret + in the IL. */ +/* { dg-final { scan-assembler-times "jump" 1 } } */ +