Hi, This patch adds more fine grained alignment checks to SH's builtin strcmp and strncmp expanders. If one of the input pointers is known to be at least 4 byte aligned, there's no need to check it.
Tested on sh-elf with make -k check RUNTESTFLAGS="--target_board=sh-sim \{-m2/-ml,-m2/-mb,-m2a/-mb,-m4/-ml,-m4/-mb,-m4a/-ml,-m4a/-mb}" Committed to trunk as r228118. Cheers, Oleg gcc/ChangeLog: PR target/67675 * config/sh/sh-mem.cc (sh_expand_cmpstr): Check alignment of addr1 and addr2 individually. Don't emit logical or insn if one is known to be aligned approriately. (sh_expand_cmpnstr): Likewise. gcc/testsuite/ChangeLog: PR target/67675 * gcc.target/sh/pr67675.c: New.
Index: gcc/config/sh/sh-mem.cc =================================================================== --- gcc/config/sh/sh-mem.cc (revision 228117) +++ gcc/config/sh/sh-mem.cc (working copy) @@ -224,11 +224,10 @@ rtx_code_label *L_loop_long = gen_label_rtx (); rtx_code_label *L_end_loop_long = gen_label_rtx (); - int align = INTVAL (operands[3]); + const unsigned int addr1_alignment = MEM_ALIGN (operands[1]) / BITS_PER_UNIT; + const unsigned int addr2_alignment = MEM_ALIGN (operands[2]) / BITS_PER_UNIT; - emit_move_insn (tmp0, const0_rtx); - - if (align < 4) + if (addr1_alignment < 4 && addr2_alignment < 4) { emit_insn (gen_iorsi3 (tmp1, s1_addr, s2_addr)); emit_insn (gen_tstsi_t (tmp1, GEN_INT (3))); @@ -235,6 +234,18 @@ jump = emit_jump_insn (gen_branch_false (L_loop_byte)); add_int_reg_note (jump, REG_BR_PROB, prob_likely); } + else if (addr1_alignment < 4 && addr2_alignment >= 4) + { + emit_insn (gen_tstsi_t (s1_addr, GEN_INT (3))); + jump = emit_jump_insn (gen_branch_false (L_loop_byte)); + add_int_reg_note (jump, REG_BR_PROB, prob_likely); + } + else if (addr1_alignment >= 4 && addr2_alignment < 4) + { + emit_insn (gen_tstsi_t (s2_addr, GEN_INT (3))); + jump = emit_jump_insn (gen_branch_false (L_loop_byte)); + add_int_reg_note (jump, REG_BR_PROB, prob_likely); + } addr1 = adjust_automodify_address (addr1, SImode, s1_addr, 0); addr2 = adjust_automodify_address (addr2, SImode, s2_addr, 0); @@ -352,6 +363,9 @@ rtx len = force_reg (SImode, operands[3]); int constp = CONST_INT_P (operands[3]); + const unsigned int addr1_alignment = MEM_ALIGN (operands[1]) / BITS_PER_UNIT; + const unsigned int addr2_alignment = MEM_ALIGN (operands[2]) / BITS_PER_UNIT; + /* Loop on a register count. */ if (constp) { @@ -362,7 +376,6 @@ rtx_code_label *L_loop_long = gen_label_rtx (); rtx_code_label *L_end_loop_long = gen_label_rtx (); - int align = INTVAL (operands[4]); int bytes = INTVAL (operands[3]); int witers = bytes / 4; @@ -373,7 +386,7 @@ emit_move_insn (tmp0, const0_rtx); - if (align < 4) + if (addr1_alignment < 4 && addr2_alignment < 4) { emit_insn (gen_iorsi3 (tmp1, s1_addr, s2_addr)); emit_insn (gen_tstsi_t (tmp1, GEN_INT (3))); @@ -380,6 +393,18 @@ jump = emit_jump_insn (gen_branch_false (L_loop_byte)); add_int_reg_note (jump, REG_BR_PROB, prob_likely); } + else if (addr1_alignment < 4 && addr2_alignment >= 4) + { + emit_insn (gen_tstsi_t (s1_addr, GEN_INT (3))); + jump = emit_jump_insn (gen_branch_false (L_loop_byte)); + add_int_reg_note (jump, REG_BR_PROB, prob_likely); + } + else if (addr1_alignment >= 4 && addr2_alignment < 4) + { + emit_insn (gen_tstsi_t (s2_addr, GEN_INT (3))); + jump = emit_jump_insn (gen_branch_false (L_loop_byte)); + add_int_reg_note (jump, REG_BR_PROB, prob_likely); + } /* word count. Do we have iterations ? */ emit_insn (gen_lshrsi3 (lenw, len, GEN_INT (2))); Index: gcc/testsuite/gcc.target/sh/pr67675.c =================================================================== --- gcc/testsuite/gcc.target/sh/pr67675.c (revision 0) +++ gcc/testsuite/gcc.target/sh/pr67675.c (working copy) @@ -0,0 +1,62 @@ +/* Check that run time alignment tests are generated only for inputs of + unknown alignment. */ +/* { dg-do compile } */ +/* { dg-options "-O2" } */ +/* { dg-final { scan-assembler-not "jmp|bsr|jsr" } } */ +/* { dg-final { scan-assembler-times {tst #3,r0} 6 } } */ +/* { dg-final { scan-assembler-times {or r[0-9],r[0-9]} 1 } } */ + +int +test_00 (const char* x, const char* y) +{ + /* 1x or reg,reg, 1x tst #3,r0 */ + return __builtin_strcmp (x, y); +} + +int +test_01 (const char* x, const char* y) +{ + /* 1x tst #3,r0 */ + return __builtin_strcmp (__builtin_assume_aligned (x, 4), y); +} + +int +test_02 (const char* x, const char* y) +{ + /* 1x tst #3,r0 */ + return __builtin_strcmp (x, __builtin_assume_aligned (y, 4)); +} + +int +test_03 (const char* x, const char* y) +{ + return __builtin_strcmp (__builtin_assume_aligned (x, 4), + __builtin_assume_aligned (y, 4)); +} + +int +test_04 (const char* x, const char* y) +{ + /* 1x tst #3,r0 */ + return __builtin_strcmp (x, "1234567"); +} + +int +test_05 (const char* x, const char* y) +{ + /* 1x tst #3,r0 */ + return __builtin_strcmp ("1234567", y); +} + +int +test_06 (const char* s1) +{ + /* 1x tst #3,r0 */ + return __builtin_strncmp (s1, "abcdabcd", 8); +} + +int +test_07 (const char* s1) +{ + return __builtin_strncmp (__builtin_assume_aligned (s1, 4), "abcdabcd", 8); +}