Hi, We transform
ptr_extend:DI (plus:SI (FOO:SI) (const_int Y))) to (plus:DI (ptr_extend:DI (FOO:SI)) (const_int Y)) since this is how Pmode != ptr_mode is supported even if the resulting address may overflow/underflow. It is also true for x32 which has zero_extend instead of ptr_extend. I have tried different approaches to avoid transforming (zero_extend:DI (plus:SI (FOO:SI) (const_int Y))) to (plus:DI (zero_extend:DI (FOO:SI)) (const_int Y)) without success. This patch relaxes the condition to check POINTERS_EXTEND_UNSIGNED != 0 instead if POINTERS_EXTEND_UNSIGNED < 0 to cover both ptr_extend and zero_extend. We can investigate a better approach for ptr_extend and zero_extend later. For now, I believe it is the saftest way to support ptr_extend and zero_extend. Any comments? Thanks. H.J. --- gcc/ 2011-08-06 H.J. Lu <hongjiu...@intel.com> PR middle-end/49721 * explow.c (convert_memory_address_addr_space): Also permute the conversion and addition of constant for zero-extend. gcc/testsuite/ 2011-08-06 H.J. Lu <hongjiu...@intel.com> PR middle-end/49721 * gfortran.dg/pr49721.f: New. diff --git a/gcc/explow.c b/gcc/explow.c index f8262db..ed2f621 100644 --- a/gcc/explow.c +++ b/gcc/explow.c @@ -384,18 +384,23 @@ convert_memory_address_addr_space (enum machine_mode to_mode ATTRIBUTE_UNUSED, case PLUS: case MULT: - /* For addition we can safely permute the conversion and addition - operation if one operand is a constant and converting the constant - does not change it or if one operand is a constant and we are - using a ptr_extend instruction (POINTERS_EXTEND_UNSIGNED < 0). + /* FIXME: For addition, we used to permute the conversion and + * addition operation only if one operand is a constant and + converting the constant does not change it or if one operand + is a constant and we are using a ptr_extend instruction + (POINTERS_EXTEND_UNSIGNED < 0) even if the resulting address + may overflow/underflow. We relax the condition to include + zero-extend (POINTERS_EXTEND_UNSIGNED > 0) since the other + parts of the compiler depend on it. See PR 49721. + We can always safely permute them if we are making the address narrower. */ if (GET_MODE_SIZE (to_mode) < GET_MODE_SIZE (from_mode) || (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)) - && (XEXP (x, 1) == convert_memory_address_addr_space - (to_mode, XEXP (x, 1), as) - || POINTERS_EXTEND_UNSIGNED < 0))) + && (POINTERS_EXTEND_UNSIGNED != 0 + || XEXP (x, 1) == convert_memory_address_addr_space + (to_mode, XEXP (x, 1), as)))) return gen_rtx_fmt_ee (GET_CODE (x), to_mode, convert_memory_address_addr_space (to_mode, XEXP (x, 0), as), diff --git a/gcc/testsuite/gfortran.dg/pr49721.f b/gcc/testsuite/gfortran.dg/pr49721.f new file mode 100644 index 0000000..39e2ed7 --- /dev/null +++ b/gcc/testsuite/gfortran.dg/pr49721.f @@ -0,0 +1,35 @@ +! PR middle-end/49721 +! { dg-do compile } +! { dg-options "-O3 -funroll-loops" } + + subroutine midbloc6(c,a2,a2i,q) + parameter (ndim2=6) + parameter (ndim=3) + dimension ri(ndim2),cr(ndim2,ndim2),xj(ndim2,ndim2),q(*) + @,sai(ndim2,ndim2),cm(ndim2,ndim2),w(ndim2,ndim2) + dimension vr(ndim2,ndim2),vi(ndim2,ndim2),s1(ndim2,ndim2),p(ndim) + dimension xq(6),qb(2),qc(2),ifl(6),iplane(3) + save + call eig66(cr,rr,ri,vr,vi) + xq(i)=asin(ri(i))/x2pi + i9=6 + qb(1)=q(1)/x2pi + do 180 i=1,2 + do 170 j=1,6 + 120 if(xq(j)) 130,190,140 + 130 if(qb(i)-0.5d0) 160,150,150 + 140 if(qb(i)-0.5d0) 150,150,160 + 150 continue + tst=abs(abs(qb(i))-abs(xq(j))) + 160 continue + 170 continue + iplane(i)=k + 180 continue + 190 continue + n1=iplane(3) + if(i9.eq.6) then + z=vr(1,n1)*vi(2,n1)-vr(2,n1)*vi(1,n1)+vr(3,n1)*vi(4,n1)-vr(4,n1) + endif + sai(6,i)=vi(i,n1)/z + call dacond6(a2,zero) + end