Hello!

We can extend existing code attributes.  Also, the patch includes some
stylistic changes in XOP shift patterns.

No functional changes.

2011-10-28  Uros Bizjak  <ubiz...@gmail.com>

        * config/i386/i386.md (shift_insn): Rename code attribute from
        shiftrt_insn.  Also handle ashift RTX.
        (shift): Rename code attribute from shiftrt.  Also handle ashift RTX.
        (<shift_insn>*): Rename from <shiftrt_insn>*. Update asm templates.
        * config/i386/sse.md (any_lshift): Rename code iterator from lshift.
        (lshift_insn): Remove code attribute.
        (lshift): Remove code attribute.
        (vlshr<mode>3): Use lshiftrt RTX.
        (vashr<mode>3): Use ashiftrt RTX.
        (vashl<mode>3): Use ashift RTX.
        (avx2_<lshift>v<mode>): Rename from avx2_<shift_insn>v<mode>.  Use
        any_lshift code iterator.  Update asm template.
        * config/i386/i386.c (bdesc_args) <__builtin_ia32_psll>: Update.

Patch was bootstrapped and regression tested on x86_64-pc-linux-gnu
{,-m32}  and committed to mainline SVN.

Uros.
Index: i386.md
===================================================================
--- i386.md     (revision 180619)
+++ i386.md     (working copy)
@@ -776,10 +776,11 @@
 (define_code_iterator any_shiftrt [lshiftrt ashiftrt])
 
 ;; Base name for define_insn
-(define_code_attr shiftrt_insn [(lshiftrt "lshr") (ashiftrt "ashr")])
+(define_code_attr shift_insn
+  [(ashift "ashl") (lshiftrt "lshr") (ashiftrt "ashr")])
 
 ;; Base name for insn mnemonic.
-(define_code_attr shiftrt [(lshiftrt "shr") (ashiftrt "sar")])
+(define_code_attr shift [(ashift "sll") (lshiftrt "shr") (ashiftrt "sar")])
 
 ;; Mapping of rotate operators
 (define_code_iterator any_rotate [rotate rotatert])
@@ -9579,7 +9580,7 @@
 
 ;; See comment above `ashl<mode>3' about how this works.
 
-(define_expand "<shiftrt_insn><mode>3"
+(define_expand "<shift_insn><mode>3"
   [(set (match_operand:SDWIM 0 "<shift_operand>" "")
        (any_shiftrt:SDWIM (match_operand:SDWIM 1 "<shift_operand>" "")
                           (match_operand:QI 2 "nonmemory_operand" "")))]
@@ -9587,7 +9588,7 @@
   "ix86_expand_binary_operator (<CODE>, <MODE>mode, operands); DONE;")
 
 ;; Avoid useless masking of count operand.
-(define_insn_and_split "*<shiftrt_insn><mode>3_mask"
+(define_insn_and_split "*<shift_insn><mode>3_mask"
   [(set (match_operand:SWI48 0 "nonimmediate_operand" "=rm")
        (any_shiftrt:SWI48
          (match_operand:SWI48 1 "nonimmediate_operand" "0")
@@ -9613,7 +9614,7 @@
   [(set_attr "type" "ishift")
    (set_attr "mode" "<MODE>")])
 
-(define_insn_and_split "*<shiftrt_insn><mode>3_doubleword"
+(define_insn_and_split "*<shift_insn><mode>3_doubleword"
   [(set (match_operand:DWI 0 "register_operand" "=r")
        (any_shiftrt:DWI (match_operand:DWI 1 "register_operand" "0")
                         (match_operand:QI 2 "nonmemory_operand" "<S>c")))
@@ -9622,7 +9623,7 @@
   "#"
   "(optimize && flag_peephole2) ? epilogue_completed : reload_completed"
   [(const_int 0)]
-  "ix86_split_<shiftrt_insn> (operands, NULL_RTX, <MODE>mode); DONE;"
+  "ix86_split_<shift_insn> (operands, NULL_RTX, <MODE>mode); DONE;"
   [(set_attr "type" "multi")])
 
 ;; By default we don't ask for a scratch register, because when DWImode
@@ -9639,7 +9640,7 @@
    (match_dup 3)]
   "TARGET_CMOVE"
   [(const_int 0)]
-  "ix86_split_<shiftrt_insn> (operands, operands[3], <DWI>mode); DONE;")
+  "ix86_split_<shift_insn> (operands, operands[3], <DWI>mode); DONE;")
 
 (define_insn "x86_64_shrd"
   [(set (match_operand:DI 0 "nonimmediate_operand" "+r*m")
@@ -9755,16 +9756,16 @@
   DONE;
 })
 
-(define_insn "*bmi2_<shiftrt_insn><mode>3_1"
+(define_insn "*bmi2_<shift_insn><mode>3_1"
   [(set (match_operand:SWI48 0 "register_operand" "=r")
        (any_shiftrt:SWI48 (match_operand:SWI48 1 "nonimmediate_operand" "rm")
                           (match_operand:SWI48 2 "register_operand" "r")))]
   "TARGET_BMI2"
-  "<shiftrt>x\t{%2, %1, %0|%0, %1, %2}"
+  "<shift>x\t{%2, %1, %0|%0, %1, %2}"
   [(set_attr "type" "ishiftx")
    (set_attr "mode" "<MODE>")])
 
-(define_insn "*<shiftrt_insn><mode>3_1"
+(define_insn "*<shift_insn><mode>3_1"
   [(set (match_operand:SWI48 0 "nonimmediate_operand" "=rm,r")
        (any_shiftrt:SWI48
          (match_operand:SWI48 1 "nonimmediate_operand" "0,rm")
@@ -9780,9 +9781,9 @@
     default:
       if (operands[2] == const1_rtx
          && (TARGET_SHIFT1 || optimize_function_for_size_p (cfun)))
-       return "<shiftrt>{<imodesuffix>}\t%0";
+       return "<shift>{<imodesuffix>}\t%0";
       else
-       return "<shiftrt>{<imodesuffix>}\t{%2, %0|%0, %2}";
+       return "<shift>{<imodesuffix>}\t{%2, %0|%0, %2}";
     }
 }
   [(set_attr "isa" "*,bmi2")
@@ -9807,17 +9808,17 @@
        (any_shiftrt:SWI48 (match_dup 1) (match_dup 2)))]
   "operands[2] = gen_lowpart (<MODE>mode, operands[2]);")
 
-(define_insn "*bmi2_<shiftrt_insn>si3_1_zext"
+(define_insn "*bmi2_<shift_insn>si3_1_zext"
   [(set (match_operand:DI 0 "register_operand" "=r")
        (zero_extend:DI
          (any_shiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "rm")
                          (match_operand:SI 2 "register_operand" "r"))))]
   "TARGET_64BIT && TARGET_BMI2"
-  "<shiftrt>x\t{%2, %1, %k0|%k0, %1, %2}"
+  "<shift>x\t{%2, %1, %k0|%k0, %1, %2}"
   [(set_attr "type" "ishiftx")
    (set_attr "mode" "SI")])
 
-(define_insn "*<shiftrt_insn>si3_1_zext"
+(define_insn "*<shift_insn>si3_1_zext"
   [(set (match_operand:DI 0 "register_operand" "=r,r")
        (zero_extend:DI
          (any_shiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "0,rm")
@@ -9833,9 +9834,9 @@
     default:
       if (operands[2] == const1_rtx
          && (TARGET_SHIFT1 || optimize_function_for_size_p (cfun)))
-       return "<shiftrt>{l}\t%k0";
+       return "<shift>{l}\t%k0";
       else
-       return "<shiftrt>{l}\t{%2, %k0|%k0, %2}";
+       return "<shift>{l}\t{%2, %k0|%k0, %2}";
     }
 }
   [(set_attr "isa" "*,bmi2")
@@ -9861,7 +9862,7 @@
        (zero_extend:DI (any_shiftrt:SI (match_dup 1) (match_dup 2))))]
   "operands[2] = gen_lowpart (SImode, operands[2]);")
 
-(define_insn "*<shiftrt_insn><mode>3_1"
+(define_insn "*<shift_insn><mode>3_1"
   [(set (match_operand:SWI12 0 "nonimmediate_operand" "=<r>m")
        (any_shiftrt:SWI12
          (match_operand:SWI12 1 "nonimmediate_operand" "0")
@@ -9871,9 +9872,9 @@
 {
   if (operands[2] == const1_rtx
       && (TARGET_SHIFT1 || optimize_function_for_size_p (cfun)))
-    return "<shiftrt>{<imodesuffix>}\t%0";
+    return "<shift>{<imodesuffix>}\t%0";
   else
-    return "<shiftrt>{<imodesuffix>}\t{%2, %0|%0, %2}";
+    return "<shift>{<imodesuffix>}\t{%2, %0|%0, %2}";
 }
   [(set_attr "type" "ishift")
    (set (attr "length_immediate")
@@ -9885,7 +9886,7 @@
        (const_string "*")))
    (set_attr "mode" "<MODE>")])
 
-(define_insn "*<shiftrt_insn>qi3_1_slp"
+(define_insn "*<shift_insn>qi3_1_slp"
   [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+qm"))
        (any_shiftrt:QI (match_dup 0)
                        (match_operand:QI 1 "nonmemory_operand" "cI")))
@@ -9897,9 +9898,9 @@
 {
   if (operands[1] == const1_rtx
       && (TARGET_SHIFT1 || optimize_function_for_size_p (cfun)))
-    return "<shiftrt>{b}\t%0";
+    return "<shift>{b}\t%0";
   else
-    return "<shiftrt>{b}\t{%1, %0|%0, %1}";
+    return "<shift>{b}\t{%1, %0|%0, %1}";
 }
   [(set_attr "type" "ishift1")
    (set (attr "length_immediate")
@@ -9914,7 +9915,7 @@
 ;; This pattern can't accept a variable shift count, since shifts by
 ;; zero don't affect the flags.  We assume that shifts by constant
 ;; zero are optimized away.
-(define_insn "*<shiftrt_insn><mode>3_cmp"
+(define_insn "*<shift_insn><mode>3_cmp"
   [(set (reg FLAGS_REG)
        (compare
          (any_shiftrt:SWI
@@ -9932,9 +9933,9 @@
 {
   if (operands[2] == const1_rtx
       && (TARGET_SHIFT1 || optimize_function_for_size_p (cfun)))
-    return "<shiftrt>{<imodesuffix>}\t%0";
+    return "<shift>{<imodesuffix>}\t%0";
   else
-    return "<shiftrt>{<imodesuffix>}\t{%2, %0|%0, %2}";
+    return "<shift>{<imodesuffix>}\t{%2, %0|%0, %2}";
 }
   [(set_attr "type" "ishift")
    (set (attr "length_immediate")
@@ -9946,7 +9947,7 @@
        (const_string "*")))
    (set_attr "mode" "<MODE>")])
 
-(define_insn "*<shiftrt_insn>si3_cmp_zext"
+(define_insn "*<shift_insn>si3_cmp_zext"
   [(set (reg FLAGS_REG)
        (compare
          (any_shiftrt:SI (match_operand:SI 1 "register_operand" "0")
@@ -9964,9 +9965,9 @@
 {
   if (operands[2] == const1_rtx
       && (TARGET_SHIFT1 || optimize_function_for_size_p (cfun)))
-    return "<shiftrt>{l}\t%k0";
+    return "<shift>{l}\t%k0";
   else
-    return "<shiftrt>{l}\t{%2, %k0|%k0, %2}";
+    return "<shift>{l}\t{%2, %k0|%k0, %2}";
 }
   [(set_attr "type" "ishift")
    (set (attr "length_immediate")
@@ -9978,7 +9979,7 @@
        (const_string "*")))
    (set_attr "mode" "SI")])
 
-(define_insn "*<shiftrt_insn><mode>3_cconly"
+(define_insn "*<shift_insn><mode>3_cconly"
   [(set (reg FLAGS_REG)
        (compare
          (any_shiftrt:SWI
@@ -9994,9 +9995,9 @@
 {
   if (operands[2] == const1_rtx
       && (TARGET_SHIFT1 || optimize_function_for_size_p (cfun)))
-    return "<shiftrt>{<imodesuffix>}\t%0";
+    return "<shift>{<imodesuffix>}\t%0";
   else
-    return "<shiftrt>{<imodesuffix>}\t{%2, %0|%0, %2}";
+    return "<shift>{<imodesuffix>}\t{%2, %0|%0, %2}";
 }
   [(set_attr "type" "ishift")
    (set (attr "length_immediate")
Index: sse.md
===================================================================
--- sse.md      (revision 180619)
+++ sse.md      (working copy)
@@ -168,14 +168,8 @@
    (V2DI "vec") (V4DI "avx2")])
 
 ;; Mapping of logic-shift operators
-(define_code_iterator lshift [lshiftrt ashift])
+(define_code_iterator any_lshift [ashift lshiftrt])
 
-;; Base name for define_insn
-(define_code_attr lshift_insn [(lshiftrt "srl") (ashift "sll")])
-
-;; Base name for insn mnemonic
-(define_code_attr lshift [(lshiftrt "lshr") (ashift "lshl")])
-
 (define_mode_attr ssedoublemode
   [(V16HI "V16SI") (V8HI "V8SI")])
 
@@ -11270,9 +11264,10 @@
 
 ;; XOP packed shift instructions.
 (define_expand "vlshr<mode>3"
-  [(match_operand:VI12_128 0 "register_operand" "")
-   (match_operand:VI12_128 1 "register_operand" "")
-   (match_operand:VI12_128 2 "nonimmediate_operand" "")]
+  [(set (match_operand:VI12_128 0 "register_operand" "")
+       (lshiftrt:VI12_128
+         (match_operand:VI12_128 1 "register_operand" "")
+         (match_operand:VI12_128 2 "nonimmediate_operand" "")))]
   "TARGET_XOP"
 {
   rtx neg = gen_reg_rtx (<MODE>mode);
@@ -11305,9 +11300,10 @@
   "TARGET_AVX2")
 
 (define_expand "vashr<mode>3"
-  [(match_operand:VI128_128 0 "register_operand" "")
-   (match_operand:VI128_128 1 "register_operand" "")
-   (match_operand:VI128_128 2 "nonimmediate_operand" "")]
+  [(set (match_operand:VI128_128 0 "register_operand" "")
+       (ashiftrt:VI128_128
+         (match_operand:VI128_128 1 "register_operand" "")
+         (match_operand:VI128_128 2 "nonimmediate_operand" "")))]
   "TARGET_XOP"
 {
   rtx neg = gen_reg_rtx (<MODE>mode);
@@ -11338,9 +11334,10 @@
   "TARGET_AVX2")
 
 (define_expand "vashl<mode>3"
-  [(match_operand:VI12_128 0 "register_operand" "")
-   (match_operand:VI12_128 1 "register_operand" "")
-   (match_operand:VI12_128 2 "register_operand" "")]
+  [(set (match_operand:VI12_128 0 "register_operand" "")
+       (ashift:VI12_128
+         (match_operand:VI12_128 1 "register_operand" "")
+         (match_operand:VI12_128 2 "nonimmediate_operand" "")))]
   "TARGET_XOP"
 {
   emit_insn (gen_xop_ashl<mode>3 (operands[0], operands[1], operands[2]));
@@ -12472,22 +12469,22 @@
 
 (define_insn "avx2_ashrv<mode>"
   [(set (match_operand:VI4_AVX2 0 "register_operand" "=x")
-       (ashiftrt:VI4_AVX2 (match_operand:VI4_AVX2 1 "register_operand" "x")
-                          (match_operand:VI4_AVX2 2 "nonimmediate_operand"
-                                                    "xm")))]
+       (ashiftrt:VI4_AVX2
+         (match_operand:VI4_AVX2 1 "register_operand" "x")
+         (match_operand:VI4_AVX2 2 "nonimmediate_operand" "xm")))]
   "TARGET_AVX2"
   "vpsravd\t{%2, %1, %0|%0, %1, %2}"
   [(set_attr "type" "sseishft")
    (set_attr "prefix" "vex")
    (set_attr "mode" "<sseinsnmode>")])
 
-(define_insn "avx2_<lshift>v<mode>"
+(define_insn "avx2_<shift_insn>v<mode>"
   [(set (match_operand:VI48_AVX2 0 "register_operand" "=x")
-       (lshift:VI48_AVX2 (match_operand:VI48_AVX2 1 "register_operand" "x")
-                         (match_operand:VI48_AVX2 2 "nonimmediate_operand"
-                                                    "xm")))]
+       (any_lshift:VI48_AVX2
+         (match_operand:VI48_AVX2 1 "register_operand" "x")
+         (match_operand:VI48_AVX2 2 "nonimmediate_operand" "xm")))]
   "TARGET_AVX2"
-  "vp<lshift_insn>v<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
+  "vp<shift>v<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
   [(set_attr "type" "sseishft")
    (set_attr "prefix" "vex")
    (set_attr "mode" "<sseinsnmode>")])
Index: i386.c
===================================================================
--- i386.c      (revision 180619)
+++ i386.c      (working copy)
@@ -26366,10 +26366,10 @@ static const struct builtin_description bdesc_args
   { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_permv2ti, "__builtin_ia32_permti256", 
IX86_BUILTIN_VPERMTI256, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI_INT },
   { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_extracti128, 
"__builtin_ia32_extract128i256", IX86_BUILTIN_VEXTRACT128I256, UNKNOWN, (int) 
V2DI_FTYPE_V4DI_INT },
   { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_inserti128, 
"__builtin_ia32_insert128i256", IX86_BUILTIN_VINSERT128I256, UNKNOWN, (int) 
V4DI_FTYPE_V4DI_V2DI_INT },
-  { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_lshlvv4di, "__builtin_ia32_psllv4di", 
IX86_BUILTIN_PSLLVV4DI, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI },
-  { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_lshlvv2di, "__builtin_ia32_psllv2di", 
IX86_BUILTIN_PSLLVV2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
-  { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_lshlvv8si, "__builtin_ia32_psllv8si", 
IX86_BUILTIN_PSLLVV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI },
-  { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_lshlvv4si, "__builtin_ia32_psllv4si", 
IX86_BUILTIN_PSLLVV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
+  { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_ashlvv4di, "__builtin_ia32_psllv4di", 
IX86_BUILTIN_PSLLVV4DI, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI },
+  { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_ashlvv2di, "__builtin_ia32_psllv2di", 
IX86_BUILTIN_PSLLVV2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
+  { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_ashlvv8si, "__builtin_ia32_psllv8si", 
IX86_BUILTIN_PSLLVV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI },
+  { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_ashlvv4si, "__builtin_ia32_psllv4si", 
IX86_BUILTIN_PSLLVV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
   { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_ashrvv8si, "__builtin_ia32_psrav8si", 
IX86_BUILTIN_PSRAVV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI },
   { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_ashrvv4si, "__builtin_ia32_psrav4si", 
IX86_BUILTIN_PSRAVV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
   { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_lshrvv4di, "__builtin_ia32_psrlv4di", 
IX86_BUILTIN_PSRLVV4DI, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI },

Reply via email to