This patch adds the following intrinsics:
- svcvt1_bf16[_mf8]_fpm
- svcvt1_f16[_mf8]_fpm
- svcvt2_bf16[_mf8]_fpm
- svcvt2_f16[_mf8]_fpm
- svcvtlt1_bf16[_mf8]_fpm
- svcvtlt1_f16[_mf8]_fpm
- svcvtlt2_bf16[_mf8]_fpm
- svcvtlt2_f16[_mf8]_fpm
- svcvtn_mf8[_f16_x2]_fpm (unpredicated)
- svcvtnb_mf8[_f32_x2]_fpm
- svcvtnt_mf8[_f32_x2]_fpm

The underlying instructions are only available when SVE2 is enabled and the PE
is not in streaming SVE mode. They are also available when SME2 is enabled and
the PE is in streaming SVE mode.

gcc/
        * config/aarch64/aarch64-sve-builtins-shapes.cc
        (parse_signature): Add an fpm_t (uint64_t) argument to functions that
        set the fpm register.
        (unary_convert_narrowxn_fpm_def): New class.
        (unary_convert_narrowxn_fpm): New shape.
        (unary_convertxn_fpm_def): New class.
        (unary_convertxn_fpm): New shape.
        * config/aarch64/aarch64-sve-builtins-shapes.h
        (unary_convert_narrowxn_fpm): Declare.
        (unary_convertxn_fpm): Likewise.
        * config/aarch64/aarch64-sve-builtins-sve2.cc
        (svcvt_fp8_impl): New class.
        (svcvtn_impl): Handle fp8 cases.
        (svcvt1, svcvt2, svcvtlt1, svcvtlt2): Add new FUNCTION.
        (svcvtnb): Likewise.
        * config/aarch64/aarch64-sve-builtins-sve2.def
        (svcvt1, svcvt2, svcvtlt1, svcvtlt2): Add new DEF_SVE_FUNCTION_GS.
        (svcvtn): Likewise.
        (svcvtnb, svcvtnt): Likewise.
        * config/aarch64/aarch64-sve-builtins-sve2.h
        (svcvt1, svcvt2, svcvtlt1, svcvtlt2, svcvtnb, svcvtnt): Declare.
        * config/aarch64/aarch64-sve-builtins.cc
        (TYPES_cvt_mf8, TYPES_cvtn_mf8, TYPES_cvtnx_mf8): Add new types arrays.
        (function_builder::get_name): Append _fpm to functions that set fpmr.
        (function_resolver::check_gp_argument): Deal with the fpm_t argument.
        (function_expander::use_exact_insn): Set the fpm register before
        calling the insn if the function warrants it.
        * config/aarch64/aarch64-sve2.md (@aarch64_sve2_fp8_cvt): Add new.
        (@aarch64_sve2_fp8_cvtn): Likewise.
        (@aarch64_sve2_fp8_cvtnb): Likewise.
        (@aarch64_sve_cvtnt): Likewise.
        * config/aarch64/aarch64.h (TARGET_SSVE_FP8): Add new.
        * config/aarch64/iterators.md
        (VNx8SF_ONLY, SVE_FULL_HFx2): New mode iterators.
        (UNSPEC_F1CVT, UNSPEC_F1CVTLT, UNSPEC_F2CVT, UNSPEC_F2CVTLT): Add new.
        (UNSPEC_FCVTNB, UNSPEC_FCVTNT): Likewise.
        (UNSPEC_FP8FCVTN): Likewise.
        (FP8CVT_UNS, fp8_cvt_uns_op): Likewise.

gcc/testsuite/

        * gcc.target/aarch64/sve/acle/asm/test_sve_acle.h
        (TEST_DUAL_Z): Add fpm0 argument
        * gcc.target/aarch64/sve/acle/general-c/unary_convert_narrowxn_fpm_1.c:
        Add new tests.
        * gcc.target/aarch64/sve/acle/general-c/unary_convertxn_fpm_1.c:
        Likewise.
        * gcc.target/aarch64/sve2/acle/asm/cvt_mf8.c: Likewise.
        * gcc.target/aarch64/sve2/acle/asm/cvtlt_mf8.c: Likewise.
        * gcc.target/aarch64/sve2/acle/asm/cvtn_mf8.c: Likewise.
        * lib/target-supports.exp: Add aarch64_asm_fp8_ok check.
---
 .../aarch64/aarch64-sve-builtins-shapes.cc    | 74 +++++++++++++++++++
 .../aarch64/aarch64-sve-builtins-shapes.h     |  2 +
 .../aarch64/aarch64-sve-builtins-sve2.cc      | 28 ++++++-
 .../aarch64/aarch64-sve-builtins-sve2.def     | 12 +++
 .../aarch64/aarch64-sve-builtins-sve2.h       |  6 ++
 gcc/config/aarch64/aarch64-sve-builtins.cc    | 30 +++++++-
 gcc/config/aarch64/aarch64-sve2.md            | 52 +++++++++++++
 gcc/config/aarch64/aarch64.h                  |  5 ++
 gcc/config/aarch64/iterators.md               | 25 +++++++
 .../aarch64/sve/acle/asm/test_sve_acle.h      |  2 +-
 .../general-c/unary_convert_narrowxn_fpm_1.c  | 38 ++++++++++
 .../acle/general-c/unary_convertxn_fpm_1.c    | 60 +++++++++++++++
 .../aarch64/sve2/acle/asm/cvt_mf8.c           | 48 ++++++++++++
 .../aarch64/sve2/acle/asm/cvtlt_mf8.c         | 47 ++++++++++++
 .../aarch64/sve2/acle/asm/cvtn_mf8.c          | 59 +++++++++++++++
 gcc/testsuite/lib/target-supports.exp         |  2 +-
 16 files changed, 485 insertions(+), 5 deletions(-)
 create mode 100644 
gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convert_narrowxn_fpm_1.c
 create mode 100644 
gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convertxn_fpm_1.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/cvt_mf8.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/cvtlt_mf8.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/cvtn_mf8.c

diff --git a/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc b/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc
index 51f7cfdf96f..f08c377f5e4 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc
+++ b/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc
@@ -325,6 +325,8 @@ parse_signature (const function_instance &instance, const char *format,
 	argument_types.quick_push (argument_type);
     }
   gcc_assert (format[0] == 0);
+  if (instance.fpm_mode == FPM_set)
+    argument_types.quick_push(get_typenode_from_name (UINT64_TYPE));
   return return_type;
 }
 
@@ -4316,6 +4318,44 @@ struct unary_convert_narrowt_def : public overloaded_base<1>
 };
 SHAPE (unary_convert_narrowt)
 
+/* sv<t0>x<g0>_t svfoo_t0[_t1_g]_fpm(sv<t0>x<g0>_t, sv<t1>x<g1>_t, fpm_t)
+
+   Similar to unary_convert_narrowt but for tuple arguments with support for
+   modal floating point.  */
+struct unary_convert_narrowxn_fpm_def : public overloaded_base<1>
+{
+  bool
+  explicit_group_suffix_p () const override
+  {
+    return false;
+  }
+
+  bool
+  has_merge_argument_p (const function_instance &, unsigned int) const override
+  {
+    return true;
+  }
+
+  void
+  build (function_builder &b, const function_group_info &group) const override
+  {
+    b.add_overloaded_functions (group, MODE_none);
+    build_all (b, "v0,v0,t1", group, MODE_none);
+  }
+  
+  tree
+  resolve (function_resolver &r) const override
+  {
+    sve_type type;
+    if (!r.check_num_arguments (3) || !(type = r.infer_sve_type (1))
+	|| !r.require_derived_scalar_type (2, TYPE_unsigned, 64))
+      return error_mark_node;
+
+    return r.resolve_to (r.mode_suffix_id, type);
+  }
+};
+SHAPE (unary_convert_narrowxn_fpm)
+
 /* sv<t0>x<g0>_t svfoo_t0[_t1_g](sv<t1>x<g1>_t)
 
    where the target type <t0> must be specified explicitly but the
@@ -4348,6 +4388,40 @@ struct unary_convertxn_def : public unary_convert_def
 };
 SHAPE (unary_convertxn)
 
+/* sv<t0>x<g0>_t svfoo_t0[_t1_g]_fpm(sv<t1>x<g1>_t, fpm_t)
+
+   where the target type <t0> must be specified explicitly but the
+   source type <t1> can be inferred.
+
+   Functions with a group suffix are unpredicated. */
+struct unary_convertxn_fpm_def : public unary_convert_def
+{
+  bool explicit_group_suffix_p () const override { return false; }
+
+  void
+  build (function_builder &b, const function_group_info &group) const override
+  {
+    b.add_overloaded_functions (group, MODE_none);
+    build_all (b, "v0,t1", group, MODE_none);
+  }
+
+  tree
+  resolve (function_resolver &r) const override
+  {
+    if (r.pred != PRED_none)
+      return unary_convert_def::resolve (r);
+
+    sve_type type;
+    if (!r.check_num_arguments (2)
+	|| !(type = r.infer_sve_type (0))
+	|| !r.require_derived_scalar_type (1, TYPE_unsigned, 64))
+      return error_mark_node;
+
+    return r.resolve_to (r.mode_suffix_id, type);
+  }
+};
+SHAPE (unary_convertxn_fpm)
+
 /* sv<t0>_t svfoo[_t0](sv<t0:half>_t).  */
 struct unary_long_def : public overloaded_base<0>
 {
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-shapes.h b/gcc/config/aarch64/aarch64-sve-builtins-shapes.h
index ea87240518d..ddb1f720c65 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-shapes.h
+++ b/gcc/config/aarch64/aarch64-sve-builtins-shapes.h
@@ -217,7 +217,9 @@ namespace aarch64_sve
     extern const function_shape *const unary;
     extern const function_shape *const unary_convert;
     extern const function_shape *const unary_convert_narrowt;
+    extern const function_shape *const unary_convert_narrowxn_fpm;
     extern const function_shape *const unary_convertxn;
+    extern const function_shape *const unary_convertxn_fpm;
     extern const function_shape *const unary_long;
     extern const function_shape *const unary_n;
     extern const function_shape *const unary_narrowb;
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-sve2.cc b/gcc/config/aarch64/aarch64-sve-builtins-sve2.cc
index d09b75b60c2..28a5b60d4a2 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-sve2.cc
+++ b/gcc/config/aarch64/aarch64-sve-builtins-sve2.cc
@@ -174,13 +174,34 @@ public:
   }
 };
 
+class svcvt_fp8_impl : public function_base
+{
+public:
+  CONSTEXPR
+  svcvt_fp8_impl (int unspec) : m_unspec (unspec) {}
+
+  rtx
+  expand (function_expander &e) const override
+  {
+    return e.use_exact_insn (code_for_aarch64_sve2_fp8_cvt (m_unspec,
+							    e.result_mode ()));
+  }
+
+  int m_unspec;
+};
+
 class svcvtn_impl : public function_base
 {
 public:
   rtx
   expand (function_expander &e) const override
   {
-    return e.use_exact_insn (code_for_aarch64_sve_cvtn (e.result_mode ()));
+    if (e.fpm_mode == FPM_set)
+      return e
+	  .use_exact_insn (code_for_aarch64_sve2_fp8_cvtn (GET_MODE (e.args
+									 [0])));
+    else
+      return e.use_exact_insn (code_for_aarch64_sve_cvtn (e.result_mode ()));
   }
 };
 
@@ -633,8 +654,13 @@ FUNCTION (svbsl2n, CODE_FOR_MODE0 (aarch64_sve2_bsl2n),)
 FUNCTION (svcdot, svcdot_impl,)
 FUNCTION (svcdot_lane, svcdot_lane_impl,)
 FUNCTION (svclamp, svclamp_impl,)
+FUNCTION (svcvt1, svcvt_fp8_impl, (UNSPEC_F1CVT))
+FUNCTION (svcvt2, svcvt_fp8_impl, (UNSPEC_F2CVT))
+FUNCTION (svcvtlt1, svcvt_fp8_impl, (UNSPEC_F1CVTLT))
+FUNCTION (svcvtlt2, svcvt_fp8_impl, (UNSPEC_F2CVTLT))
 FUNCTION (svcvtlt, unspec_based_function, (-1, -1, UNSPEC_COND_FCVTLT))
 FUNCTION (svcvtn, svcvtn_impl,)
+FUNCTION (svcvtnb, fixed_insn_function, (CODE_FOR_aarch64_sve2_fp8_cvtnb_vnx8sf))
 FUNCTION (svcvtx, unspec_based_function, (-1, -1, UNSPEC_COND_FCVTX))
 FUNCTION (svcvtxnt, CODE_FOR_MODE1 (aarch64_sve2_cvtxnt),)
 FUNCTION (sveor3, CODE_FOR_MODE0 (aarch64_sve2_eor3),)
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-sve2.def b/gcc/config/aarch64/aarch64-sve-builtins-sve2.def
index 8d25bb33dad..f846ebe79ce 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-sve2.def
+++ b/gcc/config/aarch64/aarch64-sve-builtins-sve2.def
@@ -302,3 +302,15 @@ DEF_SVE_FUNCTION_GS (svzipq, unaryxn, all_data, x24, none, unused)
 DEF_SVE_FUNCTION (svamax, binary_opt_single_n, all_float, mxz)
 DEF_SVE_FUNCTION (svamin, binary_opt_single_n, all_float, mxz)
 #undef REQUIRED_EXTENSIONS
+
+#define REQUIRED_EXTENSIONS \
+  sve_and_sme (AARCH64_FL_SVE2 | AARCH64_FL_FP8, \
+	       AARCH64_FL_SME2 | AARCH64_FL_FP8)
+DEF_SVE_FUNCTION_GS (svcvt1, unary_convert, cvt_mf8, none, none, set)
+DEF_SVE_FUNCTION_GS (svcvt2, unary_convert, cvt_mf8, none, none, set)
+DEF_SVE_FUNCTION_GS (svcvtlt1, unary_convert, cvt_mf8, none, none, set)
+DEF_SVE_FUNCTION_GS (svcvtlt2, unary_convert, cvt_mf8, none, none, set)
+DEF_SVE_FUNCTION_GS (svcvtn, unary_convertxn_fpm, cvtn_mf8, x2, none, set)
+DEF_SVE_FUNCTION_GS (svcvtnb, unary_convertxn_fpm, cvtnx_mf8, x2, none, set)
+DEF_SVE_FUNCTION_GS (svcvtnt, unary_convert_narrowxn_fpm, cvtnx_mf8, x2, none, set)
+#undef REQUIRED_EXTENSIONS
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-sve2.h b/gcc/config/aarch64/aarch64-sve-builtins-sve2.h
index 013a9dfc5fa..f1584e9dcfc 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-sve2.h
+++ b/gcc/config/aarch64/aarch64-sve-builtins-sve2.h
@@ -59,8 +59,14 @@ namespace aarch64_sve
     extern const function_base *const svcdot_lane;
     extern const function_base *const svclamp;
     extern const function_base *const svcntp;
+    extern const function_base *const svcvt1;
+    extern const function_base *const svcvt2;
+    extern const function_base *const svcvtlt1;
+    extern const function_base *const svcvtlt2;
     extern const function_base *const svcvtlt;
     extern const function_base *const svcvtn;
+    extern const function_base *const svcvtnb;
+    extern const function_base *const svcvtnt;
     extern const function_base *const svcvtx;
     extern const function_base *const svcvtxnt;
     extern const function_base *const sveor3;
diff --git a/gcc/config/aarch64/aarch64-sve-builtins.cc b/gcc/config/aarch64/aarch64-sve-builtins.cc
index dfe6221e5c2..fe2d4986051 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins.cc
+++ b/gcc/config/aarch64/aarch64-sve-builtins.cc
@@ -453,6 +453,20 @@ CONSTEXPR const group_suffix_info group_suffixes[] = {
   D (f32, s32), \
   D (f32, u32)
 
+/* _f16_mf8
+   _bf16_mf8.  */
+#define TYPES_cvt_mf8(S, D) \
+  D (f16, mf8), D (bf16, mf8)
+
+/* _mf8_f16
+   _mf8_bf16.  */
+#define TYPES_cvtn_mf8(S, D) \
+  D (mf8, f16), D (mf8, bf16)
+
+/* _mf8_f32.  */
+#define TYPES_cvtnx_mf8(S, D) \
+  D (mf8, f32)
+
 /* { _s32 _s64 } x { _b8 _b16 _b32 _b64 }
    { _u32 _u64 }.  */
 #define TYPES_inc_dec_n1(D, A) \
@@ -752,9 +766,12 @@ DEF_SVE_TYPES_ARRAY (cvt);
 DEF_SVE_TYPES_ARRAY (cvt_bfloat);
 DEF_SVE_TYPES_ARRAY (cvt_h_s_float);
 DEF_SVE_TYPES_ARRAY (cvt_long);
+DEF_SVE_TYPES_ARRAY (cvt_mf8);
 DEF_SVE_TYPES_ARRAY (cvt_narrow_s);
 DEF_SVE_TYPES_ARRAY (cvt_narrow);
 DEF_SVE_TYPES_ARRAY (cvt_s_s);
+DEF_SVE_TYPES_ARRAY (cvtn_mf8);
+DEF_SVE_TYPES_ARRAY (cvtnx_mf8);
 DEF_SVE_TYPES_ARRAY (inc_dec_n);
 DEF_SVE_TYPES_ARRAY (qcvt_x2);
 DEF_SVE_TYPES_ARRAY (qcvt_x4);
@@ -1370,6 +1387,8 @@ function_builder::get_name (const function_instance &instance,
   if (!overloaded_p || instance.shape->explicit_group_suffix_p ())
     append_name (instance.group_suffix ().string);
   append_name (pred_suffixes[instance.pred]);
+  if (instance.fpm_mode == FPM_set)
+    append_name ("_fpm");
   return finish_name ();
 }
 
@@ -2992,11 +3011,12 @@ function_resolver::check_gp_argument (unsigned int nops,
 {
   gcc_assert (pred != PRED_za_m);
   i = 0;
+  unsigned int nfpm_args = (fpm_mode == FPM_set)? 1:0;
   if (pred != PRED_none)
     {
       /* Unary merge operations should use resolve_unary instead.  */
       gcc_assert (!shape->has_merge_argument_p (*this, nops));
-      nargs = nops + 1;
+      nargs = nops + nfpm_args + 1;
       if (!check_num_arguments (nargs)
 	  || !require_vector_type (i, gp_type_index ()))
 	return false;
@@ -3004,7 +3024,7 @@ function_resolver::check_gp_argument (unsigned int nops,
     }
   else
     {
-      nargs = nops;
+      nargs = nops + nfpm_args;
       if (!check_num_arguments (nargs))
 	return false;
     }
@@ -4124,6 +4144,12 @@ function_expander::use_exact_insn (insn_code icode)
     }
   for (unsigned int i = 0; i < nops; ++i)
     add_input_operand (icode, args[i]);
+  if (fpm_mode == FPM_set)
+    {
+      // The last element of these functions is always an fpm_t
+      gcc_assert(args.last()->mode == DImode);
+      emit_move_insn (gen_rtx_REG (DImode, FPM_REGNUM), args.last ());
+    }
   return generate_insn (icode);
 }
 
diff --git a/gcc/config/aarch64/aarch64-sve2.md b/gcc/config/aarch64/aarch64-sve2.md
index ac27124fb74..629523e7a45 100644
--- a/gcc/config/aarch64/aarch64-sve2.md
+++ b/gcc/config/aarch64/aarch64-sve2.md
@@ -2676,6 +2676,14 @@ (define_insn "@aarch64_<optab>_lane_<mode>"
 ;; ---- [FP<-FP] Widening conversions
 ;; -------------------------------------------------------------------------
 ;; Includes:
+;; - BF1CVT
+;; - BF1CVTLT
+;; - BF2CVT
+;; - BF2CVTLT
+;; - F1CVT
+;; - F1CVTLT
+;; - F2CVT
+;; - F2CVTLT
 ;; - FCVTLT
 ;; -------------------------------------------------------------------------
 
@@ -2741,6 +2749,17 @@ (define_insn "*cond_<sve_fp_op><mode>_strict"
   "<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Ventype>"
 )
 
+(define_insn "@aarch64_sve2_fp8_cvt_<fp8_cvt_uns_op><SVE_FULL_HF:mode>"
+  [(set (match_operand:SVE_FULL_HF 0 "register_operand" "=w")
+	(unspec:SVE_FULL_HF
+	  [(match_operand:VNx16QI_ONLY 1 "register_operand" "w")
+	  (reg:DI FPM_REGNUM)]
+	  FP8CVT_UNS)
+	  )]
+  "TARGET_SSVE_FP8"
+  "<b><fp8_cvt_uns_op>\t%0.h, %1.b"
+)
+
 ;; -------------------------------------------------------------------------
 ;; ---- [FP<-FP] Narrowing conversions
 ;; -------------------------------------------------------------------------
@@ -2865,6 +2884,8 @@ (define_insn "@aarch64_sve2_cvtxnt<mode>"
 ;; - BFCVTN
 ;; - FCVT
 ;; - FCVTN
+;; - FCVTNB
+;; - FCVTNT
 ;; -------------------------------------------------------------------------
 
 (define_insn "truncvnx8sf<mode>2"
@@ -2884,6 +2905,37 @@ (define_insn "@aarch64_sve_cvtn<mode>"
   "<b>fcvtn\t%0.h, %1"
 )
 
+(define_insn "@aarch64_sve2_fp8_cvtn_<SVE_FULL_HFx2:mode>"
+  [(set (match_operand:VNx16QI_ONLY 0 "register_operand" "=w")
+	(unspec:VNx16QI_ONLY
+	  [(match_operand:SVE_FULL_HFx2 1 "aligned_register_operand" "Uw2")
+	   (reg:DI FPM_REGNUM)]
+	  UNSPEC_FP8FCVTN))]
+  "TARGET_SSVE_FP8"
+  "<b>fcvtn\t%0.b, %1"
+)
+
+(define_insn "@aarch64_sve2_fp8_cvtnb_<VNx8SF_ONLY:mode>"
+  [(set (match_operand:VNx16QI_ONLY 0 "register_operand" "=w")
+	(unspec:VNx16QI_ONLY
+	  [(match_operand:VNx8SF_ONLY 1 "aligned_register_operand" "Uw2")
+	   (reg:DI FPM_REGNUM)]
+	  UNSPEC_FCVTNB))]
+  "TARGET_SSVE_FP8"
+  "fcvtnb\t%0.b, %1"
+)
+
+(define_insn "@aarch64_sve_cvtnt<mode>"
+  [(set (match_operand:VNx16QI_ONLY 0 "register_operand" "=w")
+	(unspec:VNx16QI_ONLY
+	  [(match_operand:VNx16QI_ONLY 1 "register_operand" "0")
+	   (match_operand:VNx8SF 2 "aligned_register_operand" "Uw2")
+	   (reg:DI FPM_REGNUM)]
+	  UNSPEC_FCVTNT))]
+  "TARGET_SSVE_FP8"
+  "fcvtnt\t%0.b, %2"
+)
+
 ;; -------------------------------------------------------------------------
 ;; ---- [FP<-INT] Multi-vector conversions
 ;; -------------------------------------------------------------------------
diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
index 593319fd472..7e1aeb74ea1 100644
--- a/gcc/config/aarch64/aarch64.h
+++ b/gcc/config/aarch64/aarch64.h
@@ -483,6 +483,11 @@ constexpr auto AARCH64_FL_DEFAULT_ISA_MODE ATTRIBUTE_UNUSED
 /* fp8 instructions are enabled through +fp8.  */
 #define TARGET_FP8 AARCH64_HAVE_ISA (FP8)
 
+/* Some fp8 instructions require +fp8 and one of +sve2 or +sme2.  */
+#define TARGET_SSVE_FP8 (TARGET_FP8 \
+			 && (TARGET_SVE2 || TARGET_STREAMING) \
+			 && (TARGET_SME2 || TARGET_NON_STREAMING))
+
 /* Standard register usage.  */
 
 /* 31 64-bit general purpose registers R0-R30:
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index 8269b0cdcd9..f9f16676219 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -432,6 +432,7 @@ (define_mode_iterator VNx16SI_ONLY [VNx16SI])
 (define_mode_iterator VNx8HI_ONLY [VNx8HI])
 (define_mode_iterator VNx8BF_ONLY [VNx8BF])
 (define_mode_iterator VNx8SI_ONLY [VNx8SI])
+(define_mode_iterator VNx8SF_ONLY [VNx8SF])
 (define_mode_iterator VNx8DI_ONLY [VNx8DI])
 (define_mode_iterator VNx4SI_ONLY [VNx4SI])
 (define_mode_iterator VNx4SF_ONLY [VNx4SF])
@@ -465,6 +466,9 @@ (define_mode_iterator SVE_FULL_BHSIx2 [VNx32QI VNx16HI VNx8SI])
 ;; Fully-packed SVE vector modes that have 16-bit float elements.
 (define_mode_iterator SVE_FULL_HF [VNx8BF VNx8HF])
 
+;; Pairs of the above.
+(define_mode_iterator SVE_FULL_HFx2 [VNx16BF VNx16HF])
+
 ;; Fully-packed SVE vector modes that have 16-bit, 32-bit or 64-bit elements.
 (define_mode_iterator SVE_FULL_HSD [VNx8HI VNx4SI VNx2DI
 				    VNx8BF VNx8HF VNx4SF VNx2DF])
@@ -937,7 +941,13 @@ (define_c_enum "unspec"
     UNSPEC_COND_FLOGB	; Used in aarch64-sve2.md.
     UNSPEC_EORBT	; Used in aarch64-sve2.md.
     UNSPEC_EORTB	; Used in aarch64-sve2.md.
+    UNSPEC_F1CVT	; Used in aarch64-sve2.md.
+    UNSPEC_F1CVTLT	; Used in aarch64-sve2.md.
+    UNSPEC_F2CVT	; Used in aarch64-sve2.md.
+    UNSPEC_F2CVTLT	; Used in aarch64-sve2.md.
     UNSPEC_FADDP	; Used in aarch64-sve2.md.
+    UNSPEC_FCVTNB	; Used in aarch64-sve2.md.
+    UNSPEC_FCVTNT	; Used in aarch64-sve2.md.
     UNSPEC_FMAXNMP	; Used in aarch64-sve2.md.
     UNSPEC_FMAXP	; Used in aarch64-sve2.md.
     UNSPEC_FMINNMP	; Used in aarch64-sve2.md.
@@ -946,6 +956,7 @@ (define_c_enum "unspec"
     UNSPEC_FMLALT	; Used in aarch64-sve2.md.
     UNSPEC_FMLSLB	; Used in aarch64-sve2.md.
     UNSPEC_FMLSLT	; Used in aarch64-sve2.md.
+    UNSPEC_FP8FCVTN	; Used in aarch64-sve2.md.
     UNSPEC_HISTCNT	; Used in aarch64-sve2.md.
     UNSPEC_HISTSEG	; Used in aarch64-sve2.md.
     UNSPEC_LD1_COUNT	; Used in aarch64-sve2.md.
@@ -4534,3 +4545,17 @@ (define_int_attr faminmax_uns_op
 
 (define_code_attr faminmax_op
   [(smax "famax") (smin "famin")])
+
+;; Iterators and attributes for fp8 sve/sme conversions
+
+(define_int_iterator FP8CVT_UNS
+  [UNSPEC_F1CVT
+   UNSPEC_F2CVT
+   UNSPEC_F1CVTLT
+   UNSPEC_F2CVTLT])
+
+(define_int_attr fp8_cvt_uns_op
+  [(UNSPEC_F1CVT "f1cvt")
+   (UNSPEC_F2CVT "f2cvt")
+   (UNSPEC_F1CVTLT "f1cvtlt")
+   (UNSPEC_F2CVTLT "f2cvtlt")])
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/test_sve_acle.h b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/test_sve_acle.h
index 367024be863..8048c488a28 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/test_sve_acle.h
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/test_sve_acle.h
@@ -75,7 +75,7 @@
 #define TEST_DUAL_Z(NAME, TYPE1, TYPE2, CODE1, CODE2)		\
   PROTO (NAME, TYPE1, (TYPE1 z0, TYPE1 z1, TYPE1 z2, TYPE1 z3,	\
 		       TYPE2 z4, TYPE2 z5, TYPE2 z6, TYPE2 z7,	\
-		       svbool_t p0, svbool_t p1))		\
+		       svbool_t p0, svbool_t p1, fpm_t fpm0))	\
   {								\
     INVOKE (CODE1, CODE2);					\
     return z0;							\
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convert_narrowxn_fpm_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convert_narrowxn_fpm_1.c
new file mode 100644
index 00000000000..ab97eef3472
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convert_narrowxn_fpm_1.c
@@ -0,0 +1,38 @@
+#include <arm_sve.h>
+
+#pragma GCC target "+sme2+fp8"
+
+void
+test (svmfloat8_t f8, svfloat32x2_t f32x2, fpm_t fpm0,
+      svfloat16x2_t f16x2, svfloat16x4_t f16x4,
+      svfloat32x3_t f32x3, svfloat32x4_t f32x4,
+      svbool_t pg, float f, svint8_t s8, svint32x2_t s32x2)
+  __arm_streaming
+{
+  svcvtnt_mf8_fpm (f8, f32x2, fpm0);
+
+  svcvtnt_mf8_fpm (); /* { dg-error {too few arguments to function 'svcvtnt_mf8_fpm'} } */
+  
+  svcvtnt_mf8_fpm (f8); /* { dg-error {too few arguments to function 'svcvtnt_mf8_fpm'} } */
+  svcvtnt_mf8_fpm (f32x2); /* { dg-error {too few arguments to function 'svcvtnt_mf8_fpm'} } */
+  svcvtnt_mf8_fpm (fpm0); /* { dg-error {too few arguments to function 'svcvtnt_mf8_fpm'} } */
+  svcvtnt_mf8_fpm (f); /* { dg-error {too few arguments to function 'svcvtnt_mf8_fpm'} } */
+  svcvtnt_mf8_fpm (f8, f32x2); /* { dg-error {too few arguments to function 'svcvtnt_mf8_fpm'} } */
+  svcvtnt_mf8_fpm (f32x2, fpm0); /* { dg-error {too few arguments to function 'svcvtnt_mf8_fpm'} } */
+  svcvtnt_mf8_fpm (f8, fpm0); /* { dg-error {too few arguments to function 'svcvtnt_mf8_fpm'} } */
+  svcvtnt_mf8_fpm (pg); /* { dg-error {too few arguments to function 'svcvtnt_mf8_fpm'} } */
+  svcvtnt_mf8_fpm (s8); /* { dg-error {too few arguments to function 'svcvtnt_mf8_fpm'} } */
+
+  svcvtnt_mf8_fpm (f8, f16x2, fpm0); /* { dg-error {'svcvtnt_mf8_fpm' has no form that takes 'svfloat16x2_t' arguments} } */
+  svcvtnt_mf8_fpm (f8, f16x4, fpm0); /* { dg-error {'svcvtnt_mf8_fpm' has no form that takes 'svfloat16x4_t' arguments} } */
+  svcvtnt_mf8_fpm (f8, f32x3, fpm0); /* { dg-error {'svcvtnt_mf8_fpm' has no form that takes 'svfloat32x3_t' arguments} } */
+  svcvtnt_mf8_fpm (f8, f32x4, fpm0); /* { dg-error {'svcvtnt_mf8_fpm' has no form that takes 'svfloat32x4_t' arguments} } */
+
+  svcvtnt_mf8_fpm (f8, 0, fpm0); /* { dg-error {passing 'int' to argument 2 of 'svcvtnt_mf8_fpm', which expects an SVE type rather than a scalar type} } */
+  svcvtnt_mf8_fpm (f8, f, fpm0); /* { dg-error {passing 'float' to argument 2 of 'svcvtnt_mf8_fpm', which expects an SVE type rather than a scalar type} } */
+  svcvtnt_mf8_fpm (f8, pg, fpm0); /* { dg-error {'svcvtnt_mf8_fpm' has no form that takes 'svbool_t' arguments} } */
+  svcvtnt_mf8_fpm (f8, s8, fpm0); /* { dg-error {'svcvtnt_mf8_fpm' has no form that takes 'svint8_t' arguments} } */
+  svcvtnt_mf8_fpm (f8, s32x2, fpm0); /* { dg-error {'svcvtnt_mf8_fpm' has no form that takes 'svint32x2_t' arguments} } */
+  
+  svcvtnt_mf8_fpm (f8, f32x2, f32x2); /* { dg-error {passing 'svfloat32x2_t' to argument 3 of 'svcvtnt_mf8_fpm', which expects 'uint64_t'} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convertxn_fpm_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convertxn_fpm_1.c
new file mode 100644
index 00000000000..d312e857d81
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convertxn_fpm_1.c
@@ -0,0 +1,60 @@
+#include <arm_sve.h>
+
+#pragma GCC target "+sme2+fp8"
+
+void
+test (svfloat16x2_t f16x2, svbfloat16x2_t bf16x2, svfloat32x2_t f32x2,
+      svfloat16x3_t f16x3, svfloat16x4_t f16x4,
+      svfloat32x3_t f32x3, svfloat32x4_t f32x4,
+      fpm_t fpm0,
+      svbool_t pg, float f, svint8_t s8, svint32x2_t s32x2)
+  __arm_streaming
+{
+  svcvtn_mf8_fpm (f16x2, fpm0);
+  svcvtn_mf8_fpm (bf16x2, fpm0);
+
+  svcvtn_mf8_fpm (); /* { dg-error {too few arguments to function 'svcvtn_mf8_fpm'} } */
+  
+  svcvtn_mf8_fpm (f16x2); /* { dg-error {too few arguments to function 'svcvtn_mf8_fpm'} } */
+  svcvtn_mf8_fpm (fpm0); /* { dg-error {too few arguments to function 'svcvtn_mf8_fpm'} } */
+  
+  svcvtn_mf8_fpm (f); /* { dg-error {too few arguments to function 'svcvtn_mf8_fpm'} } */
+  svcvtn_mf8_fpm (pg); /* { dg-error {too few arguments to function 'svcvtn_mf8_fpm'} } */
+  svcvtn_mf8_fpm (s8); /* { dg-error {too few arguments to function 'svcvtn_mf8_fpm'} } */
+
+  svcvtn_mf8_fpm (f16x2, f16x2, fpm0); /* { dg-error {too many arguments to function 'svcvtn_mf8_fpm'} } */
+
+  svcvtn_mf8_fpm (f16x3, fpm0); /* { dg-error {'svcvtn_mf8_fpm' has no form that takes 'svfloat16x3_t' arguments} } */
+  svcvtn_mf8_fpm (f16x4, fpm0); /* { dg-error {'svcvtn_mf8_fpm' has no form that takes 'svfloat16x4_t' arguments} } */
+  svcvtn_mf8_fpm (0, fpm0); /* { dg-error {passing 'int' to argument 1 of 'svcvtn_mf8_fpm', which expects an SVE type rather than a scalar type} } */
+  svcvtn_mf8_fpm (f, fpm0); /* { dg-error {passing 'float' to argument 1 of 'svcvtn_mf8_fpm', which expects an SVE type rather than a scalar type} } */
+  svcvtn_mf8_fpm (pg, fpm0); /* { dg-error {'svcvtn_mf8_fpm' has no form that takes 'svbool_t' arguments} } */
+  svcvtn_mf8_fpm (s8, fpm0); /* { dg-error {'svcvtn_mf8_fpm' has no form that takes 'svint8_t' arguments} } */
+  svcvtn_mf8_fpm (s32x2, fpm0); /* { dg-error {'svcvtn_mf8_fpm' has no form that takes 'svint32x2_t' arguments} } */
+  
+  svcvtn_mf8_fpm (f16x2, f16x2); /* { dg-error {passing 'svfloat16x2_t' to argument 2 of 'svcvtn_mf8_fpm', which expects 'uint64_t'} } */
+
+
+  svcvtnb_mf8_fpm (f32x2, fpm0);
+
+  svcvtnb_mf8_fpm (); /* { dg-error {too few arguments to function 'svcvtnb_mf8_fpm'} } */
+  
+  svcvtnb_mf8_fpm (f32x2); /* { dg-error {too few arguments to function 'svcvtnb_mf8_fpm'} } */
+  svcvtnb_mf8_fpm (fpm0); /* { dg-error {too few arguments to function 'svcvtnb_mf8_fpm'} } */
+  
+  svcvtnb_mf8_fpm (f); /* { dg-error {too few arguments to function 'svcvtnb_mf8_fpm'} } */
+  svcvtnb_mf8_fpm (pg); /* { dg-error {too few arguments to function 'svcvtnb_mf8_fpm'} } */
+  svcvtnb_mf8_fpm (s8); /* { dg-error {too few arguments to function 'svcvtnb_mf8_fpm'} } */
+
+  svcvtnb_mf8_fpm (f32x2, f32x2, fpm0); /* { dg-error {too many arguments to function 'svcvtnb_mf8_fpm'} } */
+
+  svcvtnb_mf8_fpm (f32x3, fpm0); /* { dg-error {'svcvtnb_mf8_fpm' has no form that takes 'svfloat32x3_t' arguments} } */
+  svcvtnb_mf8_fpm (f32x4, fpm0); /* { dg-error {'svcvtnb_mf8_fpm' has no form that takes 'svfloat32x4_t' arguments} } */
+  svcvtnb_mf8_fpm (0, fpm0); /* { dg-error {passing 'int' to argument 1 of 'svcvtnb_mf8_fpm', which expects an SVE type rather than a scalar type} } */
+  svcvtnb_mf8_fpm (f, fpm0); /* { dg-error {passing 'float' to argument 1 of 'svcvtnb_mf8_fpm', which expects an SVE type rather than a scalar type} } */
+  svcvtnb_mf8_fpm (pg, fpm0); /* { dg-error {'svcvtnb_mf8_fpm' has no form that takes 'svbool_t' arguments} } */
+  svcvtnb_mf8_fpm (s8, fpm0); /* { dg-error {'svcvtnb_mf8_fpm' has no form that takes 'svint8_t' arguments} } */
+  svcvtnb_mf8_fpm (s32x2, fpm0); /* { dg-error {'svcvtnb_mf8_fpm' has no form that takes 'svint32x2_t' arguments} } */
+  
+  svcvtnb_mf8_fpm (f32x2, f32x2); /* { dg-error {passing 'svfloat32x2_t' to argument 2 of 'svcvtnb_mf8_fpm', which expects 'uint64_t'} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/cvt_mf8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/cvt_mf8.c
new file mode 100644
index 00000000000..b6b616bb9b3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/cvt_mf8.c
@@ -0,0 +1,48 @@
+/* { dg-additional-options "-march=armv8.5-a+sve2+bf16+fp8" } */
+/* { dg-require-effective-target aarch64_asm_fp8_ok }  */
+/* { dg-require-effective-target aarch64_asm_bf16_ok }  */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** cvt1_f16_mf8_fpm:
+**	msr	fpmr, x0
+**	f1cvt	z0\.h, z4\.b
+**	ret
+*/
+TEST_DUAL_Z (cvt1_f16_mf8_fpm, svfloat16_t, svmfloat8_t,
+	     z0 = svcvt1_f16_mf8_fpm (z4, fpm0),
+	     z0 = svcvt1_f16_fpm (z4, fpm0))
+
+/*
+** cvt1_bf16_mf8_fpm:
+**	msr	fpmr, x0
+**	bf1cvt	z0\.h, z4\.b
+**	ret
+*/
+TEST_DUAL_Z (cvt1_bf16_mf8_fpm, svbfloat16_t, svmfloat8_t,
+	     z0 = svcvt1_bf16_mf8_fpm (z4, fpm0),
+	     z0 = svcvt1_bf16_fpm (z4, fpm0))
+
+/*
+** cvt2_f16_mf8_fpm:
+**	msr	fpmr, x0
+**	f2cvt	z0\.h, z4\.b
+**	ret
+*/
+TEST_DUAL_Z (cvt2_f16_mf8_fpm, svfloat16_t, svmfloat8_t,
+	     z0 = svcvt2_f16_mf8_fpm (z4, fpm0),
+	     z0 = svcvt2_f16_fpm (z4, fpm0))
+
+/*
+** cvt2_bf16_mf8_fpm:
+**	msr	fpmr, x0
+**	bf2cvt	z0\.h, z4\.b
+**	ret
+*/
+TEST_DUAL_Z (cvt2_bf16_mf8_fpm, svbfloat16_t, svmfloat8_t,
+	     z0 = svcvt2_bf16_mf8_fpm (z4, fpm0),
+	     z0 = svcvt2_bf16_fpm (z4, fpm0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/cvtlt_mf8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/cvtlt_mf8.c
new file mode 100644
index 00000000000..4ff68c53e22
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/cvtlt_mf8.c
@@ -0,0 +1,47 @@
+/* { dg-additional-options "-march=armv8.5-a+sve2+bf16+fp8" } */
+/* { dg-require-effective-target aarch64_asm_fp8_ok }  */
+/* { dg-require-effective-target aarch64_asm_bf16_ok }  */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+/*
+** cvtlt1_f16_mf8_fpm:
+**	msr	fpmr, x0
+**	f1cvtlt	z0\.h, z4\.b
+**	ret
+*/
+TEST_DUAL_Z (cvtlt1_f16_mf8_fpm, svfloat16_t, svmfloat8_t,
+	     z0 = svcvtlt1_f16_mf8_fpm (z4, fpm0),
+	     z0 = svcvtlt1_f16_fpm (z4, fpm0))
+
+/*
+** cvtlt1_bf16_mf8_fpm:
+**	msr	fpmr, x0
+**	bf1cvtlt	z0\.h, z4\.b
+**	ret
+*/
+TEST_DUAL_Z (cvtlt1_bf16_mf8_fpm, svbfloat16_t, svmfloat8_t,
+	     z0 = svcvtlt1_bf16_mf8_fpm (z4, fpm0),
+	     z0 = svcvtlt1_bf16_fpm (z4, fpm0))
+
+/*
+** cvtlt2_f16_mf8_fpm:
+**	msr	fpmr, x0
+**	f2cvtlt	z0\.h, z4\.b
+**	ret
+*/
+TEST_DUAL_Z (cvtlt2_f16_mf8_fpm, svfloat16_t, svmfloat8_t,
+	     z0 = svcvtlt2_f16_mf8_fpm (z4, fpm0),
+	     z0 = svcvtlt2_f16_fpm (z4, fpm0))
+
+/*
+** cvtlt2_bf16_mf8_fpm:
+**	msr	fpmr, x0
+**	bf2cvtlt	z0\.h, z4\.b
+**	ret
+*/
+TEST_DUAL_Z (cvtlt2_bf16_mf8_fpm, svbfloat16_t, svmfloat8_t,
+	     z0 = svcvtlt2_bf16_mf8_fpm (z4, fpm0),
+	     z0 = svcvtlt2_bf16_fpm (z4, fpm0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/cvtn_mf8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/cvtn_mf8.c
new file mode 100644
index 00000000000..82188f8ca9a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/cvtn_mf8.c
@@ -0,0 +1,59 @@
+/* { dg-additional-options "-march=armv8.5-a+sve2+bf16+fp8" } */
+/* { dg-require-effective-target aarch64_asm_fp8_ok }  */
+/* { dg-require-effective-target aarch64_asm_bf16_ok }  */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** cvtn_mf8_f16_x2_fpm:
+**	msr	fpmr, x2
+**	fcvtn	z0\.b, {z4\.h(?:, | - )z5\.h}
+**	ret
+*/
+TEST_DUAL_Z (cvtn_mf8_f16_x2_fpm, svmfloat8_t, svfloat16x2_t,
+	   z0 = svcvtn_mf8_f16_x2_fpm (z4, fpm0),
+	   z0 = svcvtn_mf8_fpm (z4, fpm0))
+
+/*
+** cvtn_mf8_bf16_x2_fpm:
+**	msr	fpmr, x2
+**	bfcvtn	z0\.b, {z4\.h(?:, | - )z5\.h}
+**	ret
+*/
+TEST_DUAL_Z (cvtn_mf8_bf16_x2_fpm, svmfloat8_t, svbfloat16x2_t,
+	   z0 = svcvtn_mf8_bf16_x2_fpm (z4, fpm0),
+	   z0 = svcvtn_mf8_fpm (z4, fpm0))
+
+/*
+** cvtnb_mf8_f32_x2_fpm:
+**	msr	fpmr, x2
+**	fcvtnb	z0\.b, {z4\.s(?:, | - )z5\.s}
+**	ret
+*/
+TEST_DUAL_Z (cvtnb_mf8_f32_x2_fpm, svmfloat8_t, svfloat32x2_t,
+	   z0 = svcvtnb_mf8_f32_x2_fpm (z4, fpm0),
+	   z0 = svcvtnb_mf8_fpm (z4, fpm0))
+
+/*
+** cvtnt_mf8_f32_x2_fpm_untied:
+**	msr	fpmr, x2
+**	fcvtnt	z1\.b, {z4\.s(?:, | - )z5\.s}
+**	mov	z0.d, z1.d
+**	ret
+*/
+TEST_DUAL_Z (cvtnt_mf8_f32_x2_fpm_untied, svmfloat8_t, svfloat32x2_t,
+	   z0 = svcvtnt_mf8_f32_x2_fpm (z1, z4, fpm0),
+	   z0 = svcvtnt_mf8_fpm (z1, z4, fpm0))
+
+/*
+** cvtnt_mf8_f32_x2_fpm_tied:
+**	msr	fpmr, x2
+**	fcvtnt	z0\.b, {z4\.s(?:, | - )z5\.s}
+**	ret
+*/
+TEST_DUAL_Z (cvtnt_mf8_f32_x2_fpm_tied, svmfloat8_t, svfloat32x2_t,
+	   z0 = svcvtnt_mf8_f32_x2_fpm (z0, z4, fpm0),
+	   z0 = svcvtnt_mf8_fpm (z0, z4, fpm0))
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index 0c2fd83f45c..ec4e6be1a12 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -12121,7 +12121,7 @@ proc check_effective_target_aarch64_tiny { } {
 
 foreach { aarch64_ext } { "fp" "simd" "crypto" "crc" "lse" "dotprod" "sve"
 			  "i8mm" "f32mm" "f64mm" "bf16" "sb" "sve2" "ls64"
-			  "sme" "sme-i16i64" "sme2" } {
+			  "sme" "sme-i16i64" "sme2" "fp8" } {
     eval [string map [list FUNC $aarch64_ext] {
 	proc check_effective_target_aarch64_asm_FUNC_ok { } {
 	  if { [istarget aarch64*-*-*] } {

Reply via email to