Hi All,

This adds a way to generate special sequences for creation of constants for
which we don't have single instructions sequences which would have normally
lead to a GP -> FP transfer or a literal load.

The patch starts out by adding support for creating 1 << 63 using fneg (mov 0).

Bootstrapped Regtested on aarch64-none-linux-gnu and no issues.

Ok for master?

Thanks,
Tamar

gcc/ChangeLog:

        PR tree-optimization/109154
        * config/aarch64/aarch64-protos.h (aarch64_simd_special_constant_p):
        New.
        * config/aarch64/aarch64-simd.md (*aarch64_simd_mov<VQMOV:mode>): Add
        new coden for special constants.
        * config/aarch64/aarch64.cc (aarch64_extract_vec_duplicate_wide_int):
        Take optional mode.
        (aarch64_simd_special_constant_p): New.
        * config/aarch64/aarch64.md (*movdi_aarch64): Add new codegen for
        special constants.
        * config/aarch64/constraints.md (Dx): new.

gcc/testsuite/ChangeLog:

        PR tree-optimization/109154
        * gcc.target/aarch64/fneg-abs_1.c: Updated.
        * gcc.target/aarch64/fneg-abs_2.c: Updated.
        * gcc.target/aarch64/fneg-abs_4.c: Updated.

--- inline copy of patch -- 
diff --git a/gcc/config/aarch64/aarch64-protos.h 
b/gcc/config/aarch64/aarch64-protos.h
index 
70303d6fd953e0c397b9138ede8858c2db2e53db..2af9f6a774c20268bf90756c17064bbff8f8ff87
 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -827,6 +827,7 @@ bool aarch64_sve_ptrue_svpattern_p (rtx, struct 
simd_immediate_info *);
 bool aarch64_simd_valid_immediate (rtx, struct simd_immediate_info *,
                        enum simd_immediate_check w = AARCH64_CHECK_MOV);
 rtx aarch64_check_zero_based_sve_index_immediate (rtx);
+bool aarch64_simd_special_constant_p (rtx, rtx, machine_mode);
 bool aarch64_sve_index_immediate_p (rtx);
 bool aarch64_sve_arith_immediate_p (machine_mode, rtx, bool);
 bool aarch64_sve_sqadd_sqsub_immediate_p (machine_mode, rtx, bool);
diff --git a/gcc/config/aarch64/aarch64-simd.md 
b/gcc/config/aarch64/aarch64-simd.md
index 
7b4d5a37a9795fefda785aaacc246918826ed0a2..63c802d942a186b5a94c66d2e83828a82a88ffa8
 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -181,17 +181,28 @@ (define_insn_and_split "*aarch64_simd_mov<VQMOV:mode>"
      [?r , r ; multiple           , *   , 8] #
      [w  , Dn; neon_move<q>       , simd, 4] << 
aarch64_output_simd_mov_immediate (operands[1], 128);
      [w  , Dz; fmov               , *   , 4] fmov\t%d0, xzr
+     [w  , Dx; neon_move          , simd, 8] #
   }
   "&& reload_completed
-   && !(FP_REGNUM_P (REGNO (operands[0]))
-       && FP_REGNUM_P (REGNO (operands[1])))"
+   && (!(FP_REGNUM_P (REGNO (operands[0]))
+        && FP_REGNUM_P (REGNO (operands[1])))
+       || (aarch64_simd_special_constant_p (operands[1], NULL_RTX, <MODE>mode)
+          && FP_REGNUM_P (REGNO (operands[0]))))"
   [(const_int 0)]
   {
     if (GP_REGNUM_P (REGNO (operands[0]))
        && GP_REGNUM_P (REGNO (operands[1])))
       aarch64_simd_emit_reg_reg_move (operands, DImode, 2);
     else
-      aarch64_split_simd_move (operands[0], operands[1]);
+      {
+       if (FP_REGNUM_P (REGNO (operands[0]))
+           && <MODE>mode == V2DImode
+           && aarch64_simd_special_constant_p (operands[1], operands[0],
+                                               <MODE>mode))
+         ;
+       else
+         aarch64_split_simd_move (operands[0], operands[1]);
+      }
     DONE;
   }
 )
diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
index 
3739a44bfd909b69a76529cc6b0ae2f01d6fb36e..6e7ee446f1b31ee8bcf121c97c1c6fa87725bf42
 100644
--- a/gcc/config/aarch64/aarch64.cc
+++ b/gcc/config/aarch64/aarch64.cc
@@ -11799,16 +11799,18 @@ aarch64_get_condition_code_1 (machine_mode mode, enum 
rtx_code comp_code)
 /* Return true if X is a CONST_INT, CONST_WIDE_INT or a constant vector
    duplicate of such constants.  If so, store in RET_WI the wide_int
    representation of the constant paired with the inner mode of the vector mode
-   or TImode for scalar X constants.  */
+   or SMODE for scalar X constants.  If SMODE is not provided then TImode is
+   used.  */
 
 static bool
-aarch64_extract_vec_duplicate_wide_int (rtx x, wide_int *ret_wi)
+aarch64_extract_vec_duplicate_wide_int (rtx x, wide_int *ret_wi,
+                                       scalar_mode mode = TImode)
 {
   rtx elt = unwrap_const_vec_duplicate (x);
   if (!CONST_SCALAR_INT_P (elt))
     return false;
   scalar_mode smode
-    = CONST_SCALAR_INT_P (x) ? TImode : GET_MODE_INNER (GET_MODE (x));
+    = CONST_SCALAR_INT_P (x) ? mode : GET_MODE_INNER (GET_MODE (x));
   *ret_wi = rtx_mode_t (elt, smode);
   return true;
 }
@@ -11857,6 +11859,43 @@ aarch64_const_vec_all_same_in_range_p (rtx x,
          && IN_RANGE (INTVAL (elt), minval, maxval));
 }
 
+/* Some constants can't be made using normal mov instructions in Advanced SIMD
+   but we can still create them in various ways.  If the constant in VAL can be
+   created using alternate methods then if TARGET then return true and set
+   TARGET to the rtx for the sequence, otherwise return false if sequence is
+   not possible.  */
+
+bool
+aarch64_simd_special_constant_p (rtx val, rtx target, machine_mode mode)
+{
+  wide_int wval;
+  machine_mode tmode = GET_MODE (val);
+  auto smode = GET_MODE_INNER (tmode != VOIDmode ? tmode : mode);
+  if (!aarch64_extract_vec_duplicate_wide_int (val, &wval, smode))
+    return false;
+
+  /* For Advanced SIMD we can create an integer with only the top bit set
+     using fneg (0.0f).  */
+  if (TARGET_SIMD
+      && !TARGET_SVE
+      && smode == DImode
+      && wi::only_sign_bit_p (wval))
+    {
+      if (!target)
+       return true;
+
+      /* Use the same base type as aarch64_gen_shareable_zero.  */
+      rtx zero = CONST0_RTX (V4SImode);
+      emit_move_insn (target, lowpart_subreg (mode, zero, V4SImode));
+      rtx neg = lowpart_subreg (V2DFmode, target, mode);
+      emit_insn (gen_negv2df2 (neg, lowpart_subreg (V2DFmode, target, mode)));
+      emit_move_insn (target, lowpart_subreg (mode, neg, V2DFmode));
+      return true;
+    }
+
+  return false;
+}
+
 bool
 aarch64_const_vec_all_same_int_p (rtx x, HOST_WIDE_INT val)
 {
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 
634cfd33b41d0f945ca00d8efc9eff1ede490544..b51f979dba12b726bff0c1109b75c6d2c7ae41ab
 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -1340,13 +1340,21 @@ (define_insn_and_split "*movdi_aarch64"
      [r, w  ; f_mrc    , fp  , 4] fmov\t%x0, %d1
      [w, w  ; fmov     , fp  , 4] fmov\t%d0, %d1
      [w, Dd ; neon_move, simd, 4] << aarch64_output_scalar_simd_mov_immediate 
(operands[1], DImode);
+     [w, Dx ; neon_move, simd, 8] #
   }
-  "CONST_INT_P (operands[1]) && !aarch64_move_imm (INTVAL (operands[1]), 
DImode)
-   && REG_P (operands[0]) && GP_REGNUM_P (REGNO (operands[0]))"
+  "CONST_INT_P (operands[1])
+   && REG_P (operands[0])
+   && ((!aarch64_move_imm (INTVAL (operands[1]), DImode)
+       && GP_REGNUM_P (REGNO (operands[0])))
+       || (aarch64_simd_special_constant_p (operands[1], NULL_RTX, DImode)
+          && FP_REGNUM_P (REGNO (operands[0]))))"
   [(const_int 0)]
   {
+    if (GP_REGNUM_P (REGNO (operands[0])))
       aarch64_expand_mov_immediate (operands[0], operands[1]);
-      DONE;
+    else
+      aarch64_simd_special_constant_p (operands[1], operands[0], DImode);
+    DONE;
   }
 )
 
diff --git a/gcc/config/aarch64/constraints.md 
b/gcc/config/aarch64/constraints.md
index 
371a00827d84d8ea4a06ba2b00a761d3b179ae90..11cf5a0d16b3364a7a4d0b2a2e5bb33063151479
 100644
--- a/gcc/config/aarch64/constraints.md
+++ b/gcc/config/aarch64/constraints.md
@@ -488,6 +488,14 @@ (define_constraint "Dr"
  (and (match_code "const,const_vector")
       (match_test "aarch64_simd_shift_imm_p (op, GET_MODE (op),
                                                 false)")))
+
+(define_constraint "Dx"
+  "@internal
+ A constraint that matches a vector of 64-bit immediates which we don't have a
+ single instruction to create but that we can create in creative ways."
+ (and (match_code "const_int,const,const_vector")
+      (match_test "aarch64_simd_special_constant_p (op, NULL_RTX, DImode)")))
+
 (define_constraint "Dz"
   "@internal
  A constraint that matches a vector of immediate zero."
diff --git a/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c 
b/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c
index 
f823013c3ddf6b3a266c3abfcbf2642fc2a75fa6..43c37e21b50e13c09b8d6850686e88465cd8482a
 100644
--- a/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c
@@ -28,8 +28,8 @@ float32x4_t t2 (float32x4_t a)
 
 /*
 ** t3:
-**     adrp    x0, .LC[0-9]+
-**     ldr     q[0-9]+, \[x0, #:lo12:.LC0\]
+**     movi    v[0-9]+.4s, 0
+**     fneg    v[0-9]+.2d, v[0-9]+.2d
 **     orr     v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b
 **     ret
 */
diff --git a/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c 
b/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
index 
141121176b309e4b2aa413dc55271a6e3c93d5e1..fb14ec3e2210e0feeff80f2410d777d3046a9f78
 100644
--- a/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
@@ -20,8 +20,8 @@ float32_t f1 (float32_t a)
 
 /*
 ** f2:
-**     mov     x0, -9223372036854775808
-**     fmov    d[0-9]+, x0
+**     fmov    d[0-9]+, xzr
+**     fneg    v[0-9]+.2d, v[0-9]+.2d
 **     orr     v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
 **     ret
 */
@@ -29,3 +29,4 @@ float64_t f2 (float64_t a)
 {
   return -fabs (a);
 }
+
diff --git a/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c 
b/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
index 
10879dea74462d34b26160eeb0bd54ead063166b..4ea0105f6c0a9756070bcc60d34f142f53d8242c
 100644
--- a/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
@@ -8,8 +8,8 @@
 
 /*
 ** negabs:
-**     mov     x0, -9223372036854775808
-**     fmov    d[0-9]+, x0
+**     fmov    d[0-9]+, xzr
+**     fneg    v[0-9]+.2d, v[0-9]+.2d
 **     orr     v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
 **     ret
 */




-- 
diff --git a/gcc/config/aarch64/aarch64-protos.h 
b/gcc/config/aarch64/aarch64-protos.h
index 
70303d6fd953e0c397b9138ede8858c2db2e53db..2af9f6a774c20268bf90756c17064bbff8f8ff87
 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -827,6 +827,7 @@ bool aarch64_sve_ptrue_svpattern_p (rtx, struct 
simd_immediate_info *);
 bool aarch64_simd_valid_immediate (rtx, struct simd_immediate_info *,
                        enum simd_immediate_check w = AARCH64_CHECK_MOV);
 rtx aarch64_check_zero_based_sve_index_immediate (rtx);
+bool aarch64_simd_special_constant_p (rtx, rtx, machine_mode);
 bool aarch64_sve_index_immediate_p (rtx);
 bool aarch64_sve_arith_immediate_p (machine_mode, rtx, bool);
 bool aarch64_sve_sqadd_sqsub_immediate_p (machine_mode, rtx, bool);
diff --git a/gcc/config/aarch64/aarch64-simd.md 
b/gcc/config/aarch64/aarch64-simd.md
index 
7b4d5a37a9795fefda785aaacc246918826ed0a2..63c802d942a186b5a94c66d2e83828a82a88ffa8
 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -181,17 +181,28 @@ (define_insn_and_split "*aarch64_simd_mov<VQMOV:mode>"
      [?r , r ; multiple           , *   , 8] #
      [w  , Dn; neon_move<q>       , simd, 4] << 
aarch64_output_simd_mov_immediate (operands[1], 128);
      [w  , Dz; fmov               , *   , 4] fmov\t%d0, xzr
+     [w  , Dx; neon_move          , simd, 8] #
   }
   "&& reload_completed
-   && !(FP_REGNUM_P (REGNO (operands[0]))
-       && FP_REGNUM_P (REGNO (operands[1])))"
+   && (!(FP_REGNUM_P (REGNO (operands[0]))
+        && FP_REGNUM_P (REGNO (operands[1])))
+       || (aarch64_simd_special_constant_p (operands[1], NULL_RTX, <MODE>mode)
+          && FP_REGNUM_P (REGNO (operands[0]))))"
   [(const_int 0)]
   {
     if (GP_REGNUM_P (REGNO (operands[0]))
        && GP_REGNUM_P (REGNO (operands[1])))
       aarch64_simd_emit_reg_reg_move (operands, DImode, 2);
     else
-      aarch64_split_simd_move (operands[0], operands[1]);
+      {
+       if (FP_REGNUM_P (REGNO (operands[0]))
+           && <MODE>mode == V2DImode
+           && aarch64_simd_special_constant_p (operands[1], operands[0],
+                                               <MODE>mode))
+         ;
+       else
+         aarch64_split_simd_move (operands[0], operands[1]);
+      }
     DONE;
   }
 )
diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
index 
3739a44bfd909b69a76529cc6b0ae2f01d6fb36e..6e7ee446f1b31ee8bcf121c97c1c6fa87725bf42
 100644
--- a/gcc/config/aarch64/aarch64.cc
+++ b/gcc/config/aarch64/aarch64.cc
@@ -11799,16 +11799,18 @@ aarch64_get_condition_code_1 (machine_mode mode, enum 
rtx_code comp_code)
 /* Return true if X is a CONST_INT, CONST_WIDE_INT or a constant vector
    duplicate of such constants.  If so, store in RET_WI the wide_int
    representation of the constant paired with the inner mode of the vector mode
-   or TImode for scalar X constants.  */
+   or SMODE for scalar X constants.  If SMODE is not provided then TImode is
+   used.  */
 
 static bool
-aarch64_extract_vec_duplicate_wide_int (rtx x, wide_int *ret_wi)
+aarch64_extract_vec_duplicate_wide_int (rtx x, wide_int *ret_wi,
+                                       scalar_mode mode = TImode)
 {
   rtx elt = unwrap_const_vec_duplicate (x);
   if (!CONST_SCALAR_INT_P (elt))
     return false;
   scalar_mode smode
-    = CONST_SCALAR_INT_P (x) ? TImode : GET_MODE_INNER (GET_MODE (x));
+    = CONST_SCALAR_INT_P (x) ? mode : GET_MODE_INNER (GET_MODE (x));
   *ret_wi = rtx_mode_t (elt, smode);
   return true;
 }
@@ -11857,6 +11859,43 @@ aarch64_const_vec_all_same_in_range_p (rtx x,
          && IN_RANGE (INTVAL (elt), minval, maxval));
 }
 
+/* Some constants can't be made using normal mov instructions in Advanced SIMD
+   but we can still create them in various ways.  If the constant in VAL can be
+   created using alternate methods then if TARGET then return true and set
+   TARGET to the rtx for the sequence, otherwise return false if sequence is
+   not possible.  */
+
+bool
+aarch64_simd_special_constant_p (rtx val, rtx target, machine_mode mode)
+{
+  wide_int wval;
+  machine_mode tmode = GET_MODE (val);
+  auto smode = GET_MODE_INNER (tmode != VOIDmode ? tmode : mode);
+  if (!aarch64_extract_vec_duplicate_wide_int (val, &wval, smode))
+    return false;
+
+  /* For Advanced SIMD we can create an integer with only the top bit set
+     using fneg (0.0f).  */
+  if (TARGET_SIMD
+      && !TARGET_SVE
+      && smode == DImode
+      && wi::only_sign_bit_p (wval))
+    {
+      if (!target)
+       return true;
+
+      /* Use the same base type as aarch64_gen_shareable_zero.  */
+      rtx zero = CONST0_RTX (V4SImode);
+      emit_move_insn (target, lowpart_subreg (mode, zero, V4SImode));
+      rtx neg = lowpart_subreg (V2DFmode, target, mode);
+      emit_insn (gen_negv2df2 (neg, lowpart_subreg (V2DFmode, target, mode)));
+      emit_move_insn (target, lowpart_subreg (mode, neg, V2DFmode));
+      return true;
+    }
+
+  return false;
+}
+
 bool
 aarch64_const_vec_all_same_int_p (rtx x, HOST_WIDE_INT val)
 {
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 
634cfd33b41d0f945ca00d8efc9eff1ede490544..b51f979dba12b726bff0c1109b75c6d2c7ae41ab
 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -1340,13 +1340,21 @@ (define_insn_and_split "*movdi_aarch64"
      [r, w  ; f_mrc    , fp  , 4] fmov\t%x0, %d1
      [w, w  ; fmov     , fp  , 4] fmov\t%d0, %d1
      [w, Dd ; neon_move, simd, 4] << aarch64_output_scalar_simd_mov_immediate 
(operands[1], DImode);
+     [w, Dx ; neon_move, simd, 8] #
   }
-  "CONST_INT_P (operands[1]) && !aarch64_move_imm (INTVAL (operands[1]), 
DImode)
-   && REG_P (operands[0]) && GP_REGNUM_P (REGNO (operands[0]))"
+  "CONST_INT_P (operands[1])
+   && REG_P (operands[0])
+   && ((!aarch64_move_imm (INTVAL (operands[1]), DImode)
+       && GP_REGNUM_P (REGNO (operands[0])))
+       || (aarch64_simd_special_constant_p (operands[1], NULL_RTX, DImode)
+          && FP_REGNUM_P (REGNO (operands[0]))))"
   [(const_int 0)]
   {
+    if (GP_REGNUM_P (REGNO (operands[0])))
       aarch64_expand_mov_immediate (operands[0], operands[1]);
-      DONE;
+    else
+      aarch64_simd_special_constant_p (operands[1], operands[0], DImode);
+    DONE;
   }
 )
 
diff --git a/gcc/config/aarch64/constraints.md 
b/gcc/config/aarch64/constraints.md
index 
371a00827d84d8ea4a06ba2b00a761d3b179ae90..11cf5a0d16b3364a7a4d0b2a2e5bb33063151479
 100644
--- a/gcc/config/aarch64/constraints.md
+++ b/gcc/config/aarch64/constraints.md
@@ -488,6 +488,14 @@ (define_constraint "Dr"
  (and (match_code "const,const_vector")
       (match_test "aarch64_simd_shift_imm_p (op, GET_MODE (op),
                                                 false)")))
+
+(define_constraint "Dx"
+  "@internal
+ A constraint that matches a vector of 64-bit immediates which we don't have a
+ single instruction to create but that we can create in creative ways."
+ (and (match_code "const_int,const,const_vector")
+      (match_test "aarch64_simd_special_constant_p (op, NULL_RTX, DImode)")))
+
 (define_constraint "Dz"
   "@internal
  A constraint that matches a vector of immediate zero."
diff --git a/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c 
b/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c
index 
f823013c3ddf6b3a266c3abfcbf2642fc2a75fa6..43c37e21b50e13c09b8d6850686e88465cd8482a
 100644
--- a/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c
@@ -28,8 +28,8 @@ float32x4_t t2 (float32x4_t a)
 
 /*
 ** t3:
-**     adrp    x0, .LC[0-9]+
-**     ldr     q[0-9]+, \[x0, #:lo12:.LC0\]
+**     movi    v[0-9]+.4s, 0
+**     fneg    v[0-9]+.2d, v[0-9]+.2d
 **     orr     v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b
 **     ret
 */
diff --git a/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c 
b/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
index 
141121176b309e4b2aa413dc55271a6e3c93d5e1..fb14ec3e2210e0feeff80f2410d777d3046a9f78
 100644
--- a/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
@@ -20,8 +20,8 @@ float32_t f1 (float32_t a)
 
 /*
 ** f2:
-**     mov     x0, -9223372036854775808
-**     fmov    d[0-9]+, x0
+**     fmov    d[0-9]+, xzr
+**     fneg    v[0-9]+.2d, v[0-9]+.2d
 **     orr     v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
 **     ret
 */
@@ -29,3 +29,4 @@ float64_t f2 (float64_t a)
 {
   return -fabs (a);
 }
+
diff --git a/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c 
b/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
index 
10879dea74462d34b26160eeb0bd54ead063166b..4ea0105f6c0a9756070bcc60d34f142f53d8242c
 100644
--- a/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
@@ -8,8 +8,8 @@
 
 /*
 ** negabs:
-**     mov     x0, -9223372036854775808
-**     fmov    d[0-9]+, x0
+**     fmov    d[0-9]+, xzr
+**     fneg    v[0-9]+.2d, v[0-9]+.2d
 **     orr     v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
 **     ret
 */



Reply via email to