https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101929
Bug ID: 101929
Summary: r12-2549 regress x264_r by 4% on CLX.
Product: gcc
Version: 12.0
Status: UNCONFIRMED
Severity: normal
Priority: P3
Component: tree-optimization
Assignee: unassigned at gcc dot gnu.org
Reporter: crazylht at gmail dot com
CC: hjl.tools at gmail dot com, wwwhhhyyy333 at gmail dot com
Target Milestone: ---
Host: x86_64-pc-linux-gnu
Target: x86_64-*-* i?86-*-*
The regression is in x264_pixel_satd_8x4
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
typedef unsigned short uint16_t;
// in: a pseudo-simd number of the form x+(y<<16)
// return: abs(x)+(abs(y)<<16)
static inline
uint32_t abs2( uint32_t a )
{
uint32_t s = ((a>>15)&0x10001)*0xffff;
return (a+s)^s;
}
#define HADAMARD4(d0, d1, d2, d3, s0, s1, s2, s3) {\
int t0 = s0 + s1;\
int t1 = s0 - s1;\
int t2 = s2 + s3;\
int t3 = s2 - s3;\
d0 = t0 + t2;\
d2 = t0 - t2;\
d1 = t1 + t3;\
d3 = t1 - t3;\
}
int
x264_pixel_satd_8x4( uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 )
{
uint32_t tmp[4][4];
uint32_t a0, a1, a2, a3;
int sum = 0;
for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
{
a0 = (pix1[0] - pix2[0]) + ((pix1[4] - pix2[4]) << 16);
a1 = (pix1[1] - pix2[1]) + ((pix1[5] - pix2[5]) << 16);
a2 = (pix1[2] - pix2[2]) + ((pix1[6] - pix2[6]) << 16);
a3 = (pix1[3] - pix2[3]) + ((pix1[7] - pix2[7]) << 16);
HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], a0,a1,a2,a3 );
}
for( int i = 0; i < 4; i++ )
{
HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i]
);
sum += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
}
return (((uint16_t)sum) + ((uint32_t)sum>>16)) >> 1;
}
after increase cost of vector CTOR, slp1 won't vector for below
git diff my.slp1 original.slp1
- _820 = {_187, _189, _187, _189};
- vect_t2_188.65_821 = VIEW_CONVERT_EXPR<vector(4) int>(_820);
- vect__200.67_823 = vect_t0_184.64_819 - vect_t2_188.65_821;
- vect__191.66_822 = vect_t0_184.64_819 + vect_t2_188.65_821;
- _824 = VEC_PERM_EXPR <vect__191.66_822, vect__200.67_823, { 0, 1, 6, 7 }>;
- vect__192.68_825 = VIEW_CONVERT_EXPR<vector(4) unsigned int>(_824);
t3_190 = (int) _189;
_191 = t0_184 + t2_188;
_192 = (unsigned int) _191;
+ tmp[0][0] = _192;
_194 = t0_184 - t2_188;
_195 = (unsigned int) _194;
+ tmp[0][2] = _195;
_197 = t1_186 + t3_190;
_198 = (unsigned int) _197;
+ tmp[0][1] = _198;
_200 = t1_186 - t3_190;
_201 = (unsigned int) _200;
- MEM <vector(4) unsigned int> [(unsigned int *)&tmp] = vect__192.68_825;
+ tmp[0][3] = _201;
but the vectorized version can somehow help fre to eliminate redundant vector
load and then got even better performace.
git diff dump.veclower21 dump.fre5
MEM <vector(4) unsigned int> [(unsigned int *)&tmp + 48B] = vect__54.89_852;
- vect__63.9_482 = MEM <vector(4) unsigned int> [(unsigned int *)&tmp];
- vect__64.12_478 = MEM <vector(4) unsigned int> [(unsigned int *)&tmp + 16B];
- vect__65.13_477 = vect__63.9_482 + vect__64.12_478;
+ vect__65.13_477 = vect__192.68_825 + vect__273.75_834;
vect_t0_100.14_476 = VIEW_CONVERT_EXPR<vector(4) int>(vect__65.13_477);
- vect__67.15_475 = vect__63.9_482 - vect__64.12_478;
+ vect__67.15_475 = vect__192.68_825 - vect__273.75_834;
vect_t1_101.16_474 = VIEW_CONVERT_EXPR<vector(4) int>(vect__67.15_475);
- vect__68.19_470 = MEM <vector(4) unsigned int> [(unsigned int *)&tmp + 32B];
- vect__69.22_466 = MEM <vector(4) unsigned int> [(unsigned int *)&tmp + 48B];
- vect__70.23_465 = vect__68.19_470 + vect__69.22_466;
+ vect__70.23_465 = vect__354.82_843 + vect__54.89_852;
If slp1 can realize this and add the upper part to comparison of scalar cost vs
vector cost, gcc should do vectorization, but currently it doesn't.