Committed. Richard.
2014-11-13 Richard Biener <rguent...@suse.de> * match.pd: Remove redundant pattern. Inline remaining stuff from match-rotate.pd and match-conversions.pd. * match-rotate.pd: Remove. * match-conversions.pd: Likewise. Index: gcc/match.pd =================================================================== --- gcc/match.pd (revision 217502) +++ gcc/match.pd (working copy) @@ -779,14 +779,6 @@ (define_operator_list inverted_tcc_compa (if (INTEGRAL_TYPE_P (type)) (plus @0 { build_int_cst (TREE_TYPE (@0), 1); } ))) -/* One ternary pattern. */ - -/* Due to COND_EXPRs weirdness in GIMPLE the following won't work - without some hacks in the code generator. */ -(simplify - (cond (bit_not @0) @1 @2) - (cond @0 @2 @1)) - /* match-and-simplify handles constant folding so we can just do the decomposition here. */ (simplify @@ -799,12 +791,76 @@ (define_operator_list inverted_tcc_compa (abs @0)) +/* (x << CNT1) OP (x >> CNT2) -> x r<< CNT1 OP being +, |, ^ */ +(for op (plus bit_ior bit_xor) +(simplify + (op:c (lshift @0 INTEGER_CST@1) (rshift @0 INTEGER_CST@2)) + (if (INTEGRAL_TYPE_P (type) && TYPE_UNSIGNED (type) + && TYPE_PRECISION (type) == GET_MODE_PRECISION (TYPE_MODE (type)) + && tree_fits_uhwi_p (@1) && tree_fits_uhwi_p (@2) + && wi::eq_p (TYPE_PRECISION (type), wi::add (@1, @2))) + (lrotate @0 @1)))) + + +/* From fold_unary in order of appearance. */ + +/* If we have (type) (a CMP b) and type is an integral type, return + new expression involving the new type. Canonicalize + (type) (a CMP b) to (a CMP b) ? (type) true : (type) false for + non-integral type. + Do not fold the result as that would not simplify further, also + folding again results in recursions. */ +/* ??? Eh, do we want sth like (define-ops cmp lt le eq ...) to not + repeat this too many times? */ +(for cmp (lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt) + (simplify + (convert (cmp@2 @0 @1)) + (if (TREE_CODE (type) == BOOLEAN_TYPE) + (cmp @0 @1)) + /* Not sure if the following makes sense for GIMPLE. */ + (if (!INTEGRAL_TYPE_P (type) && !VOID_TYPE_P (type) + && TREE_CODE (type) != VECTOR_TYPE) + (cond @2 + { constant_boolean_node (true, type); } + { constant_boolean_node (false, type); })))) + + +/* Convert (T1)(~(T2)X) into ~(T1)X if T1 and T2 are integral types + of the same precision, and X is an integer type not narrower than + types T1 or T2, i.e. the cast (T2)X isn't an extension. */ +(simplify + (convert (bit_not@0 (convert @1))) + (if (INTEGRAL_TYPE_P (type) + && INTEGRAL_TYPE_P (TREE_TYPE (@0)) + && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)) + && INTEGRAL_TYPE_P (TREE_TYPE (@1)) + && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1))) + (bit_not (convert @1)))) + +/* Convert (T1)(X * Y) into (T1)X * (T1)Y if T1 is narrower than the + type of X and Y (integer types only). */ +(simplify + (convert (mult @0 @1)) + (if (INTEGRAL_TYPE_P (type) + && INTEGRAL_TYPE_P (TREE_TYPE (@0)) + && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (@0)) + /* ??? These kind of patterns are a bad idea - see PR41043. We + create a lot of redundant statements if operands are used multiple + times. Maybe we want a flag for this. But eventually these + kind of transforms should be done in a pass. */ + && (GENERIC + || TREE_CODE (@0) != SSA_NAME || TREE_CODE (@1) != SSA_NAME + || ((TREE_CODE (@0) != SSA_NAME || has_single_use (@0)) + && (TREE_CODE (@1) != SSA_NAME || has_single_use (@1))))) + (if (TYPE_OVERFLOW_WRAPS (type)) + (mult (convert @0) (convert @1))) + (with { tree utype = unsigned_type_for (type); } + (convert (mult (convert:utype @0) (convert:utype @1)))))) + #include "match-bitwise.pd" -#include "match-rotate.pd" #include "match-builtin.pd" #include "match-comparison.pd" -#include "match-conversions.pd" /* ????s