https://gcc.gnu.org/bugzilla/show_bug.cgi?id=109771

            Bug ID: 109771
           Summary: Unnecessary pblendw for vectorized or
           Product: gcc
           Version: 13.1.0
            Status: UNCONFIRMED
          Severity: normal
          Priority: P3
         Component: rtl-optimization
          Assignee: unassigned at gcc dot gnu.org
          Reporter: chfast at gmail dot com
  Target Milestone: ---

I have an example of vectorization of 4x64-bit struct (representation of
256-bit integer). The implementation just uses for loop of count 4.

This is vectorized in isolation however when combined with some non-trivial
control-flow and additional wrapping functions the final assembly contains
weird pblendw instructions.

pblendw xmm1, xmm3, 240          (GCC 13, x86-64-v2)
movlpd  xmm1, QWORD PTR [rdi+16] (GCC 13, x86-64-v1)
shufpd  xmm1, xmm3, 2            (GCC 12)

I believe this is some kind of regression in GCC 13 because I have a bigger
context where GCC 12 was optimizing it "correctly". However, I lost this
information during test reduction.

https://godbolt.org/z/jzK44h3js

cpp:

struct u256 {
    unsigned long w[4];
};

inline u256 or_(u256 x, u256 y) {
    u256 z;
    for (int i = 0; i < 4; ++i) 
        z.w[i] = x.w[i] | y.w[i];
    return z;
}

inline void or_to(u256& z, u256 y) { z = or_(z, y); }

void op_or(u256* t) { or_to(t[1], t[0]); }

void test(u256* t) {
    void* tbl[]{&&CLOBBER, &&OR};
CLOBBER:
    goto * 0;
OR:
    op_or(t);
    goto * 0;
}


x86-64-v2 asm:

test(u256*):
        xorl    %eax, %eax
        jmp     *%rax
        movdqu  32(%rdi), %xmm3
        movdqu  (%rdi), %xmm1
        movdqu  16(%rdi), %xmm2
        movdqu  48(%rdi), %xmm0
        por     %xmm3, %xmm1
        movups  %xmm1, 32(%rdi)
        movdqa  %xmm2, %xmm1
        pblendw $240, %xmm0, %xmm1
        pblendw $240, %xmm2, %xmm0
        por     %xmm1, %xmm0
        movups  %xmm0, 48(%rdi)
        jmp     *%rax

Reply via email to