http://gcc.gnu.org/bugzilla/show_bug.cgi?id=57819

            Bug ID: 57819
           Summary: Suboptimal shift patterns
           Product: gcc
           Version: 4.8.0
            Status: UNCONFIRMED
          Keywords: missed-optimization
          Severity: normal
          Priority: P3
         Component: target
          Assignee: unassigned at gcc dot gnu.org
          Reporter: ktietz at gcc dot gnu.org
            Target: x86_64-*-* i686-*-*

Following sample-code demonstrates that patterns for i386 architecture's md for
bt* optimizations are weak.  In patterns is just handled that left-hand side
mode and righ-hand-side mode are equal (DI/SI).  As right-hand operand of a
shift can be 8-bit, 16, 32, etc bit wide, simple cast pessimize optimization.

The sample here is for 64-bit.

/* { dg-do compile } */
/* { dg-options "-O2 -mtune=core2" } */

void foo (void);

__extension__ typedef __INTPTR_TYPE__ intptr_t;

int test (intptr_t x, intptr_t n)
{
  n &= (sizeof (intptr_t) * 8) - 1;

  if (x & ((intptr_t)0x01 << n))
    foo ();

  return 0;
}

int test2 (intptr_t x, intptr_t n)
{
  if (x & ((intptr_t)0x01 << ((int) n & ((sizeof (intptr_t) * 8) - 1))))
    foo ();

  return 0;
}

/* { dg-final { scan-assembler-not "and\[lq\]\[ \t\]" } } */

It procudes following assembly output:

test:
    subq    $40, %rsp
    .seh_stackalloc    40
    .seh_endprologue
    btq    %rdx, %rcx
    jnc    .L2
    call    foo
.L2:
    xorl    %eax, %eax
    addq    $40, %rsp
    ret

As proper optimized variant and

test2:
    subq    $40, %rsp
    movq    %rcx, %rax
    movl    %edx, %ecx
    sarq    %cl, %rax
    testb    $1, %al
    je    .L8
    call    foo
.L8:
    xorl    %eax, %eax
    addq    $40, %rsp
    ret

Reply via email to