This patch adds a partial_integral_type_p function, to go along
with the full_integral_type_p added by the previous patch.

Of the changes that didn't previously have an INTEGRAL_TYPE_P check:

- the convert_to_integer_1 hunks are dominated by a case version
  of INTEGRAL_TYPE_P.

- the merge_ranges hunk is dominated by an ENUMERAL_TYPE case.

- vectorizable_reduction has the comment:

      /* Do not try to vectorize bit-precision reductions.  */

  and so I think was only concerned with integers.

- vectorizable_assignment has the comment:

      /* We do not handle bit-precision changes.  */

  and the later:

      /* But a conversion that does not change the bit-pattern is ok.  */
      && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
            > TYPE_PRECISION (TREE_TYPE (op)))
           && TYPE_UNSIGNED (TREE_TYPE (op)))

  would only make sense if OP is also an integral type.

- vectorizable_shift is inherently restricted to integers.

Tested on aarch64-linux-gnu and x86_64-linux-gnu.  OK to install?

Richard


2017-08-17  Richard Sandiford  <richard.sandif...@linaro.org>
            Alan Hayward  <alan.hayw...@arm.com>
            David Sherwood  <david.sherw...@arm.com>

gcc/
        * tree.h (partial_integral_type_p): New function.
        * convert.c (convert_to_integer_1): Use it.
        * expr.c (store_fieldexpand_expr_real_2, expand_expr_real_1): Likewise.
        * fold-const.c (merge_ranges): Likewise.
        * tree-ssa-math-opts.c (convert_mult_to_fma): Likewise.
        * tree-tailcall.c (process_assignment): Likewise.
        * tree-vect-loop.c (vectorizable_reduction): Likewise.
        * tree-vect-stmts.c (vectorizable_conversion): Likewise.
        (vectorizable_assignment, vectorizable_shift): Likewise.

Index: gcc/tree.h
===================================================================
--- gcc/tree.h  2017-08-18 08:35:58.031690315 +0100
+++ gcc/tree.h  2017-08-18 08:36:07.208306339 +0100
@@ -5414,4 +5414,13 @@ full_integral_type_p (const_tree t)
   return INTEGRAL_TYPE_P (t) && scalar_type_is_full_p (t);
 }
 
+/* Return true if T is an integral type that has fewer bits than
+   its underlying mode.  */
+
+inline bool
+partial_integral_type_p (const_tree t)
+{
+  return INTEGRAL_TYPE_P (t) && !scalar_type_is_full_p (t);
+}
+
 #endif  /* GCC_TREE_H  */
Index: gcc/convert.c
===================================================================
--- gcc/convert.c       2017-08-10 14:36:09.015436664 +0100
+++ gcc/convert.c       2017-08-18 08:36:07.203306339 +0100
@@ -711,8 +711,7 @@ convert_to_integer_1 (tree type, tree ex
             the signed-to-unsigned case the high-order bits have to
             be cleared.  */
          if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (TREE_TYPE (expr))
-             && (TYPE_PRECISION (TREE_TYPE (expr))
-                 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (expr)))))
+             && partial_integral_type_p (TREE_TYPE (expr)))
            code = CONVERT_EXPR;
          else
            code = NOP_EXPR;
@@ -725,7 +724,7 @@ convert_to_integer_1 (tree type, tree ex
         type corresponding to its mode, then do a nop conversion
         to TYPE.  */
       else if (TREE_CODE (type) == ENUMERAL_TYPE
-              || outprec != GET_MODE_PRECISION (TYPE_MODE (type)))
+              || partial_integral_type_p (type))
        {
          expr = convert (lang_hooks.types.type_for_mode
                          (TYPE_MODE (type), TYPE_UNSIGNED (type)), expr);
Index: gcc/expr.c
===================================================================
--- gcc/expr.c  2017-08-03 10:40:54.807600276 +0100
+++ gcc/expr.c  2017-08-18 08:36:07.204306339 +0100
@@ -6834,8 +6834,7 @@ store_field (rtx target, HOST_WIDE_INT b
       if (nop_def)
        {
          tree type = TREE_TYPE (exp);
-         if (INTEGRAL_TYPE_P (type)
-             && TYPE_PRECISION (type) < GET_MODE_BITSIZE (TYPE_MODE (type))
+         if (partial_integral_type_p (type)
              && bitsize == TYPE_PRECISION (type))
            {
              tree op = gimple_assign_rhs1 (nop_def);
@@ -8243,8 +8242,7 @@ #define REDUCE_BIT_FIELD(expr)    (reduce_b
   /* An operation in what may be a bit-field type needs the
      result to be reduced to the precision of the bit-field type,
      which is narrower than that of the type's mode.  */
-  reduce_bit_field = (INTEGRAL_TYPE_P (type)
-                     && GET_MODE_PRECISION (mode) > TYPE_PRECISION (type));
+  reduce_bit_field = partial_integral_type_p (type);
 
   if (reduce_bit_field && modifier == EXPAND_STACK_PARM)
     target = 0;
@@ -9669,9 +9667,7 @@ expand_expr_real_1 (tree exp, rtx target
   /* An operation in what may be a bit-field type needs the
      result to be reduced to the precision of the bit-field type,
      which is narrower than that of the type's mode.  */
-  reduce_bit_field = (!ignore
-                     && INTEGRAL_TYPE_P (type)
-                     && GET_MODE_PRECISION (mode) > TYPE_PRECISION (type));
+  reduce_bit_field = (!ignore && partial_integral_type_p (type));
 
   /* If we are going to ignore this result, we need only do something
      if there is a side-effect somewhere in the expression.  If there
Index: gcc/fold-const.c
===================================================================
--- gcc/fold-const.c    2017-08-18 08:35:58.028990315 +0100
+++ gcc/fold-const.c    2017-08-18 08:36:07.206306339 +0100
@@ -5063,8 +5063,7 @@ merge_ranges (int *pin_p, tree *plow, tr
                switch (TREE_CODE (TREE_TYPE (low0)))
                  {
                  case ENUMERAL_TYPE:
-                   if (TYPE_PRECISION (TREE_TYPE (low0))
-                       != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (low0))))
+                   if (partial_integral_type_p (TREE_TYPE (low0)))
                      break;
                    /* FALLTHROUGH */
                  case INTEGER_TYPE:
@@ -5086,8 +5085,7 @@ merge_ranges (int *pin_p, tree *plow, tr
                switch (TREE_CODE (TREE_TYPE (high1)))
                  {
                  case ENUMERAL_TYPE:
-                   if (TYPE_PRECISION (TREE_TYPE (high1))
-                       != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (high1))))
+                   if (partial_integral_type_p (TREE_TYPE (high1)))
                      break;
                    /* FALLTHROUGH */
                  case INTEGER_TYPE:
Index: gcc/tree-ssa-math-opts.c
===================================================================
--- gcc/tree-ssa-math-opts.c    2017-08-10 14:36:07.848478816 +0100
+++ gcc/tree-ssa-math-opts.c    2017-08-18 08:36:07.206306339 +0100
@@ -3563,9 +3563,7 @@ convert_mult_to_fma (gimple *mul_stmt, t
     return false;
 
   /* We don't want to do bitfield reduction ops.  */
-  if (INTEGRAL_TYPE_P (type)
-      && (TYPE_PRECISION (type)
-         != GET_MODE_PRECISION (TYPE_MODE (type))))
+  if (partial_integral_type_p (type))
     return false;
 
   /* If the target doesn't support it, don't generate it.  We assume that
Index: gcc/tree-tailcall.c
===================================================================
--- gcc/tree-tailcall.c 2017-06-30 12:50:37.493697233 +0100
+++ gcc/tree-tailcall.c 2017-08-18 08:36:07.206306339 +0100
@@ -288,9 +288,7 @@ process_assignment (gassign *stmt,
          /* Even if the type modes are the same, if the precision of the
             type is smaller than mode's precision,
             reduce_to_bit_field_precision would generate additional code.  */
-         if (INTEGRAL_TYPE_P (TREE_TYPE (dest))
-             && (GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (dest)))
-                 > TYPE_PRECISION (TREE_TYPE (dest))))
+         if (partial_integral_type_p (TREE_TYPE (dest)))
            return FAIL;
        }
 
Index: gcc/tree-vect-loop.c
===================================================================
--- gcc/tree-vect-loop.c        2017-08-16 08:50:54.198550019 +0100
+++ gcc/tree-vect-loop.c        2017-08-18 08:36:07.207306339 +0100
@@ -5848,8 +5848,7 @@ vectorizable_reduction (gimple *stmt, gi
     return false;
 
   /* Do not try to vectorize bit-precision reductions.  */
-  if ((TYPE_PRECISION (scalar_type)
-       != GET_MODE_PRECISION (TYPE_MODE (scalar_type))))
+  if (partial_integral_type_p (scalar_type))
     return false;
 
   /* All uses but the last are expected to be defined in the loop.
Index: gcc/tree-vect-stmts.c
===================================================================
--- gcc/tree-vect-stmts.c       2017-08-17 09:05:42.987968487 +0100
+++ gcc/tree-vect-stmts.c       2017-08-18 08:36:07.208306339 +0100
@@ -4097,12 +4097,8 @@ vectorizable_conversion (gimple *stmt, g
     return false;
 
   if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
-      && ((INTEGRAL_TYPE_P (lhs_type)
-          && (TYPE_PRECISION (lhs_type)
-              != GET_MODE_PRECISION (TYPE_MODE (lhs_type))))
-         || (INTEGRAL_TYPE_P (rhs_type)
-             && (TYPE_PRECISION (rhs_type)
-                 != GET_MODE_PRECISION (TYPE_MODE (rhs_type))))))
+      && (partial_integral_type_p (lhs_type)
+         || partial_integral_type_p (rhs_type)))
     {
       if (dump_enabled_p ())
        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -4696,10 +4692,8 @@ vectorizable_assignment (gimple *stmt, g
   if ((CONVERT_EXPR_CODE_P (code)
        || code == VIEW_CONVERT_EXPR)
       && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
-      && ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
-          != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
-         || ((TYPE_PRECISION (TREE_TYPE (op))
-              != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op))))))
+      && (partial_integral_type_p (TREE_TYPE (scalar_dest))
+         || partial_integral_type_p (TREE_TYPE (op)))
       /* But a conversion that does not change the bit-pattern is ok.  */
       && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
            > TYPE_PRECISION (TREE_TYPE (op)))
@@ -4875,8 +4869,7 @@ vectorizable_shift (gimple *stmt, gimple
 
   scalar_dest = gimple_assign_lhs (stmt);
   vectype_out = STMT_VINFO_VECTYPE (stmt_info);
-  if (TYPE_PRECISION (TREE_TYPE (scalar_dest))
-      != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
+  if (partial_integral_type_p (TREE_TYPE (scalar_dest)))
     {
       if (dump_enabled_p ())
         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,

Reply via email to