https://gcc.gnu.org/g:33386d14210aa6e5cc9e1d65652261fbfc087b95

commit r15-5549-g33386d14210aa6e5cc9e1d65652261fbfc087b95
Author: Kewen Lin <li...@linux.ibm.com>
Date:   Thu Nov 21 07:41:33 2024 +0000

    rs6000: Simplify some conditions or code related to TARGET_DIRECT_MOVE
    
    When I was making a patch to rework TARGET_P8_VECTOR, I
    noticed that there are some redundant checks and dead code
    related to TARGET_DIRECT_MOVE, so I made this patch as one
    separated preparatory patch, it consists of:
      - Check either TARGET_DIRECT_MOVE or TARGET_P8_VECTOR only
        according to the context, rather than checking both of
        them since they are actually the same (TARGET_DIRECT_MOVE
        is defined as TARGET_P8_VECTOR).
      - Simplify TARGET_VSX && TARGET_DIRECT_MOVE as
        TARGET_DIRECT_MOVE since direct move ensures VSX enabled.
      - Replace some TARGET_POWERPC64 && TARGET_DIRECT_MOVE as
        TARGET_DIRECT_MOVE_64BIT to simplify it.
      - Remove some dead code guarded with TARGET_DIRECT_MOVE
        but the condition never holds here.
    
    gcc/ChangeLog:
    
            * config/rs6000/rs6000.cc (rs6000_option_override_internal): 
Simplify
            TARGET_P8_VECTOR && TARGET_DIRECT_MOVE as TARGET_P8_VECTOR.
            (rs6000_output_move_128bit): Simplify TARGET_VSX && 
TARGET_DIRECT_MOVE
            as TARGET_DIRECT_MOVE.
            * config/rs6000/rs6000.h (TARGET_XSCVDPSPN): Simplify conditions
            TARGET_DIRECT_MOVE || TARGET_P8_VECTOR as TARGET_P8_VECTOR.
            (TARGET_XSCVSPDPN): Likewise.
            (TARGET_DIRECT_MOVE_128): Simplify TARGET_DIRECT_MOVE &&
            TARGET_POWERPC64 as TARGET_DIRECT_MOVE_64BIT.
            (TARGET_VEXTRACTUB): Likewise.
            (TARGET_DIRECT_MOVE_64BIT): Simplify TARGET_P8_VECTOR &&
            TARGET_DIRECT_MOVE as TARGET_DIRECT_MOVE.
            * config/rs6000/rs6000.md (signbit<mode>2, @signbit<mode>2_dm,
            *signbit<mode>2_dm_mem, floatsi<mode>2_lfiwax,
            floatsi<SFDF:mode>2_lfiwax_<QHI:mode>_mem_zext,
            floatunssi<mode>2_lfiwzx, float<QHI:mode><SFDF:mode>2,
            *float<QHI:mode><SFDF:mode>2_internal, 
floatuns<QHI:mode><SFDF:mode>2,
            *floatuns<QHI:mode><SFDF:mode>2_internal, p8_mtvsrd_v16qidi2,
            p8_mtvsrd_df, p8_xxpermdi_<mode>, reload_vsx_from_gpr<mode>,
            p8_mtvsrd_sf, reload_vsx_from_gprsf, p8_mfvsrd_3_<mode>,
            reload_gpr_from_vsx<mode>, reload_gpr_from_vsxsf, unpack<mode>_dm):
            Simplify TARGET_DIRECT_MOVE && TARGET_POWERPC64 as
            TARGET_DIRECT_MOVE_64BIT.
            (unpack<mode>_nodm): Simplify !TARGET_DIRECT_MOVE || 
!TARGET_POWERPC64
            as !TARGET_DIRECT_MOVE_64BIT.
            (fix_trunc<mode>si2, fix_trunc<mode>si2_stfiwx,
            fix_trunc<mode>si2_internal): Simplify TARGET_P8_VECTOR &&
            TARGET_DIRECT_MOVE as TARGET_DIRECT_MOVE.
            (fix_trunc<mode>si2_stfiwx, fixuns_trunc<mode>si2_stfiwx): Remove 
some
            dead code as the guard TARGET_DIRECT_MOVE there never holds.
            (fixuns_trunc<mode>si2_stfiwx): Change TARGET_P8_VECTOR with
            TARGET_DIRECT_MOVE which is a better fit.
            * config/rs6000/vsx.md (define_peephole2 for SFmode in GPR): 
Simplify
            TARGET_DIRECT_MOVE && TARGET_POWERPC64 as TARGET_DIRECT_MOVE_64BIT.

Diff:
---
 gcc/config/rs6000/rs6000.cc |  4 +--
 gcc/config/rs6000/rs6000.h  | 11 +++-----
 gcc/config/rs6000/rs6000.md | 62 ++++++++++++++++++---------------------------
 gcc/config/rs6000/vsx.md    |  2 +-
 4 files changed, 32 insertions(+), 47 deletions(-)

diff --git a/gcc/config/rs6000/rs6000.cc b/gcc/config/rs6000/rs6000.cc
index 0d7ee1e5bdf2..9cdf704824ce 100644
--- a/gcc/config/rs6000/rs6000.cc
+++ b/gcc/config/rs6000/rs6000.cc
@@ -4055,7 +4055,7 @@ rs6000_option_override_internal (bool global_init_p)
      support. If we only have ISA 2.06 support, and the user did not specify
      the switch, leave it set to -1 so the movmisalign patterns are enabled,
      but we don't enable the full vectorization support  */
-  if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
+  if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR)
     TARGET_ALLOW_MOVMISALIGN = 1;
 
   else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
@@ -13799,7 +13799,7 @@ rs6000_output_move_128bit (rtx operands[])
                    ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
                    : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
 
-         else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
+         else if (TARGET_DIRECT_MOVE && src_vsx_p)
            return "#";
        }
 
diff --git a/gcc/config/rs6000/rs6000.h b/gcc/config/rs6000/rs6000.h
index d460eb065448..e0c41e1dfd26 100644
--- a/gcc/config/rs6000/rs6000.h
+++ b/gcc/config/rs6000/rs6000.h
@@ -469,13 +469,11 @@ extern int rs6000_vector_align[];
 
 /* TARGET_DIRECT_MOVE is redundant to TARGET_P8_VECTOR, so alias it to that.  
*/
 #define TARGET_DIRECT_MOVE     TARGET_P8_VECTOR
-#define TARGET_XSCVDPSPN       (TARGET_DIRECT_MOVE || TARGET_P8_VECTOR)
-#define TARGET_XSCVSPDPN       (TARGET_DIRECT_MOVE || TARGET_P8_VECTOR)
+#define TARGET_XSCVDPSPN       TARGET_P8_VECTOR
+#define TARGET_XSCVSPDPN       TARGET_P8_VECTOR
 #define TARGET_VADDUQM         (TARGET_P8_VECTOR && TARGET_POWERPC64)
-#define TARGET_DIRECT_MOVE_128 (TARGET_P9_VECTOR && TARGET_DIRECT_MOVE \
-                                && TARGET_POWERPC64)
-#define TARGET_VEXTRACTUB      (TARGET_P9_VECTOR && TARGET_DIRECT_MOVE \
-                                && TARGET_POWERPC64)
+#define TARGET_DIRECT_MOVE_128 (TARGET_P9_VECTOR && TARGET_DIRECT_MOVE_64BIT)
+#define TARGET_VEXTRACTUB      (TARGET_P9_VECTOR && TARGET_DIRECT_MOVE_64BIT)
 
 /* Whether we should avoid (SUBREG:SI (REG:SF) and (SUBREG:SF (REG:SI).  */
 #define TARGET_NO_SF_SUBREG    TARGET_DIRECT_MOVE_64BIT
@@ -555,7 +553,6 @@ extern int rs6000_vector_align[];
    the calculation in 64-bit GPRs and then is transfered to the vector
    registers.  */
 #define TARGET_DIRECT_MOVE_64BIT       (TARGET_DIRECT_MOVE             \
-                                        && TARGET_P8_VECTOR            \
                                         && TARGET_POWERPC64)
 
 /* Inlining allows targets to define the meanings of bits in target_info
diff --git a/gcc/config/rs6000/rs6000.md b/gcc/config/rs6000/rs6000.md
index 8eda2f7bb0d7..2598059280bf 100644
--- a/gcc/config/rs6000/rs6000.md
+++ b/gcc/config/rs6000/rs6000.md
@@ -5296,7 +5296,7 @@
        (match_dup 6))]
   "TARGET_HARD_FLOAT
    && (!FLOAT128_IEEE_P (<MODE>mode)
-       || (TARGET_POWERPC64 && TARGET_DIRECT_MOVE))"
+       || TARGET_DIRECT_MOVE_64BIT)"
 {
   if (FLOAT128_IEEE_P (<MODE>mode))
     {
@@ -5339,7 +5339,7 @@
   [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
        (unspec:DI [(match_operand:SIGNBIT 1 "gpc_reg_operand" "wa,r")]
                   UNSPEC_SIGNBIT))]
-  "TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
+  "TARGET_DIRECT_MOVE_64BIT"
   "@
    mfvsrd %0,%x1
    #"
@@ -5358,7 +5358,7 @@
   [(set (match_operand:DI 0 "gpc_reg_operand" "=b")
        (unspec:DI [(match_operand:SIGNBIT 1 "memory_operand" "m")]
                   UNSPEC_SIGNBIT))]
-  "TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
+  "TARGET_DIRECT_MOVE_64BIT"
   "#"
   "&& 1"
   [(set (match_dup 0)
@@ -5872,7 +5872,7 @@
   rtx src = operands[1];
   rtx tmp;
 
-  if (!MEM_P (src) && TARGET_POWERPC64 && TARGET_DIRECT_MOVE)
+  if (!MEM_P (src) && TARGET_DIRECT_MOVE_64BIT)
     tmp = convert_to_mode (DImode, src, false);
   else
     {
@@ -5928,7 +5928,7 @@
          (match_operand:QHI 1 "indexed_or_indirect_operand" "Z,Z"))))
    (clobber (match_scratch:DI 2 "=d,wa"))]
   "TARGET_HARD_FLOAT && <SI_CONVERT_FP> && TARGET_P9_VECTOR
-   && TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
+   && TARGET_DIRECT_MOVE_64BIT"
   "#"
   "&& 1"
   [(pc)]
@@ -5968,7 +5968,7 @@
   rtx src = operands[1];
   rtx tmp;
 
-  if (!MEM_P (src) && TARGET_POWERPC64 && TARGET_DIRECT_MOVE)
+  if (!MEM_P (src) && TARGET_DIRECT_MOVE_64BIT)
     tmp = convert_to_mode (DImode, src, true);
   else
     {
@@ -6187,7 +6187,7 @@
              (clobber (match_scratch:DI 2))
              (clobber (match_scratch:DI 3))
              (clobber (match_scratch:<QHI:MODE> 4))])]
-  "TARGET_P9_VECTOR && TARGET_DIRECT_MOVE && TARGET_POWERPC64"
+  "TARGET_P9_VECTOR && TARGET_DIRECT_MOVE_64BIT"
 {
   if (MEM_P (operands[1]))
     operands[1] = rs6000_force_indexed_or_indirect_mem (operands[1]);
@@ -6200,7 +6200,7 @@
    (clobber (match_scratch:DI 2 "=v,wa,v"))
    (clobber (match_scratch:DI 3 "=X,r,X"))
    (clobber (match_scratch:<QHI:MODE> 4 "=X,X,v"))]
-  "TARGET_P9_VECTOR && TARGET_DIRECT_MOVE && TARGET_POWERPC64"
+  "TARGET_P9_VECTOR && TARGET_DIRECT_MOVE_64BIT"
   "#"
   "&& reload_completed"
   [(const_int 0)]
@@ -6240,7 +6240,7 @@
                    (match_operand:QHI 1 "input_operand")))
              (clobber (match_scratch:DI 2))
              (clobber (match_scratch:DI 3))])]
-  "TARGET_P9_VECTOR && TARGET_DIRECT_MOVE && TARGET_POWERPC64"
+  "TARGET_P9_VECTOR && TARGET_DIRECT_MOVE_64BIT"
 {
   if (MEM_P (operands[1]))
     operands[1] = rs6000_force_indexed_or_indirect_mem (operands[1]);
@@ -6252,7 +6252,7 @@
         (match_operand:QHI 1 "reg_or_indexed_operand" "v,r,Z")))
    (clobber (match_scratch:DI 2 "=v,wa,wa"))
    (clobber (match_scratch:DI 3 "=X,r,X"))]
-  "TARGET_P9_VECTOR && TARGET_DIRECT_MOVE && TARGET_POWERPC64"
+  "TARGET_P9_VECTOR && TARGET_DIRECT_MOVE_64BIT"
   "#"
   "&& reload_completed"
   [(const_int 0)]
@@ -6285,7 +6285,7 @@
        (fix:SI (match_operand:SFDF 1 "gpc_reg_operand")))]
   "TARGET_HARD_FLOAT"
 {
-  if (!(TARGET_P8_VECTOR && TARGET_DIRECT_MOVE))
+  if (!TARGET_DIRECT_MOVE)
     {
       rtx src = force_reg (<MODE>mode, operands[1]);
 
@@ -6310,7 +6310,7 @@
        (fix:SI (match_operand:SFDF 1 "gpc_reg_operand" "d")))
    (clobber (match_scratch:DI 2 "=d"))]
   "TARGET_HARD_FLOAT && TARGET_STFIWX && can_create_pseudo_p ()
-   && !(TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)"
+   && !TARGET_DIRECT_MOVE"
   "#"
   "&& 1"
   [(pc)]
@@ -6329,12 +6329,6 @@
       emit_insn (gen_stfiwx (dest, tmp));
       DONE;
     }
-  else if (TARGET_POWERPC64 && TARGET_DIRECT_MOVE && !MEM_P (dest))
-    {
-      dest = gen_lowpart (DImode, dest);
-      emit_move_insn (dest, tmp);
-      DONE;
-    }
   else
     {
       rtx stack = rs6000_allocate_stack_temp (SImode, false, true);
@@ -6352,7 +6346,7 @@
    (clobber (match_operand:DI 2 "gpc_reg_operand" "=1,d"))
    (clobber (match_operand:DI 3 "offsettable_mem_operand" "=o,o"))]
   "TARGET_HARD_FLOAT
-   && !(TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)"
+   && !TARGET_DIRECT_MOVE"
   "#"
   "&& 1"
   [(pc)]
@@ -6458,7 +6452,7 @@
    (clobber (match_scratch:DI 2 "=d"))]
   "TARGET_HARD_FLOAT && TARGET_FCTIWUZ
    && TARGET_STFIWX && can_create_pseudo_p ()
-   && !TARGET_P8_VECTOR"
+   && !TARGET_DIRECT_MOVE"
   "#"
   "&& 1"
   [(pc)]
@@ -6477,12 +6471,6 @@
       emit_insn (gen_stfiwx (dest, tmp));
       DONE;
     }
-  else if (TARGET_POWERPC64 && TARGET_DIRECT_MOVE)
-    {
-      dest = gen_lowpart (DImode, dest);
-      emit_move_insn (dest, tmp);
-      DONE;
-    }
   else
     {
       rtx stack = rs6000_allocate_stack_temp (SImode, false, true);
@@ -9572,7 +9560,7 @@
   [(set (match_operand:V16QI 0 "register_operand" "=wa")
     (unspec:V16QI [(match_operand:DI 1 "register_operand" "r")]
                  UNSPEC_P8V_MTVSRD))]
-  "TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
+  "TARGET_DIRECT_MOVE_64BIT"
   "mtvsrd %x0,%1"
   [(set_attr "type" "mtvsr")])
 
@@ -9606,7 +9594,7 @@
   [(set (match_operand:DF 0 "register_operand" "=wa")
        (unspec:DF [(match_operand:DI 1 "register_operand" "r")]
                   UNSPEC_P8V_MTVSRD))]
-  "TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
+  "TARGET_DIRECT_MOVE_64BIT"
   "mtvsrd %x0,%1"
   [(set_attr "type" "mtvsr")])
 
@@ -9616,7 +9604,7 @@
                (match_operand:DF 1 "register_operand" "wa")
                (match_operand:DF 2 "register_operand" "wa")]
                UNSPEC_P8V_XXPERMDI))]
-  "TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
+  "TARGET_DIRECT_MOVE_64BIT"
   "xxpermdi %x0,%x1,%x2,0"
   [(set_attr "type" "vecperm")])
 
@@ -9626,7 +9614,7 @@
         [(match_operand:FMOVE128_GPR 1 "register_operand" "r")]
         UNSPEC_P8V_RELOAD_FROM_GPR))
    (clobber (match_operand:IF 2 "register_operand" "=wa"))]
-  "TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
+  "TARGET_DIRECT_MOVE_64BIT"
   "#"
   "&& reload_completed"
   [(const_int 0)]
@@ -9671,7 +9659,7 @@
   [(set (match_operand:SF 0 "register_operand" "=wa")
        (unspec:SF [(match_operand:DI 1 "register_operand" "r")]
                   UNSPEC_P8V_MTVSRD))]
-  "TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
+  "TARGET_DIRECT_MOVE_64BIT"
   "mtvsrd %x0,%1"
   [(set_attr "type" "mtvsr")])
 
@@ -9680,7 +9668,7 @@
        (unspec:SF [(match_operand:SF 1 "register_operand" "r")]
                   UNSPEC_P8V_RELOAD_FROM_GPR))
    (clobber (match_operand:DI 2 "register_operand" "=r"))]
-  "TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
+  "TARGET_DIRECT_MOVE_64BIT"
   "#"
   "&& reload_completed"
   [(const_int 0)]
@@ -9706,7 +9694,7 @@
   [(set (match_operand:DF 0 "register_operand" "=r")
        (unspec:DF [(match_operand:FMOVE128_GPR 1 "register_operand" "wa")]
                   UNSPEC_P8V_RELOAD_FROM_VSX))]
-  "TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
+  "TARGET_DIRECT_MOVE_64BIT"
   "mfvsrd %0,%x1"
   [(set_attr "type" "mfvsr")])
 
@@ -9716,7 +9704,7 @@
         [(match_operand:FMOVE128_GPR 1 "register_operand" "wa")]
         UNSPEC_P8V_RELOAD_FROM_VSX))
    (clobber (match_operand:FMOVE128_GPR 2 "register_operand" "=wa"))]
-  "TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
+  "TARGET_DIRECT_MOVE_64BIT"
   "#"
   "&& reload_completed"
   [(const_int 0)]
@@ -9744,7 +9732,7 @@
        (unspec:SF [(match_operand:SF 1 "register_operand" "wa")]
                   UNSPEC_P8V_RELOAD_FROM_VSX))
    (clobber (match_operand:V4SF 2 "register_operand" "=wa"))]
-  "TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
+  "TARGET_DIRECT_MOVE_64BIT"
   "#"
   "&& reload_completed"
   [(const_int 0)]
@@ -14873,7 +14861,7 @@
         [(match_operand:FMOVE128 1 "register_operand" "d,d,r,d,r")
          (match_operand:QI 2 "const_0_to_1_operand" "i,i,i,i,i")]
         UNSPEC_UNPACK_128BIT))]
-  "TARGET_POWERPC64 && TARGET_DIRECT_MOVE && FLOAT128_2REG_P (<MODE>mode)"
+  "TARGET_DIRECT_MOVE_64BIT && FLOAT128_2REG_P (<MODE>mode)"
   "#"
   "&& reload_completed"
   [(set (match_dup 0) (match_dup 3))]
@@ -14896,7 +14884,7 @@
         [(match_operand:FMOVE128 1 "register_operand" "d,d,r")
          (match_operand:QI 2 "const_0_to_1_operand" "i,i,i")]
         UNSPEC_UNPACK_128BIT))]
-  "(!TARGET_POWERPC64 || !TARGET_DIRECT_MOVE) && FLOAT128_2REG_P (<MODE>mode)"
+  "!TARGET_DIRECT_MOVE_64BIT && FLOAT128_2REG_P (<MODE>mode)"
   "#"
   "&& reload_completed"
   [(set (match_dup 0) (match_dup 3))]
diff --git a/gcc/config/rs6000/vsx.md b/gcc/config/rs6000/vsx.md
index b2fc39acf4e8..f4f7113f5fe8 100644
--- a/gcc/config/rs6000/vsx.md
+++ b/gcc/config/rs6000/vsx.md
@@ -6338,7 +6338,7 @@
    (set (match_operand:SF SFBOOL_MTVSR_D "vsx_register_operand")
        (unspec:SF [(match_dup SFBOOL_SHL_D)] UNSPEC_P8V_MTVSRD))]
 
-  "TARGET_POWERPC64 && TARGET_DIRECT_MOVE
+  "TARGET_DIRECT_MOVE_64BIT
    /* The REG_P (xxx) tests prevents SUBREG's, which allows us to use REGNO
       to compare registers, when the mode is different.  */
    && REG_P (operands[SFBOOL_MFVSR_D]) && REG_P (operands[SFBOOL_BOOL_D])

Reply via email to