On 10/9/2024 5:26 PM, Richard Sandiford wrote:
<saurabh....@arm.com> writes:
+/*
+** amax_0_f16_m_tied1:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)

Sorry to ask for another round, but: the "(" and ")" aren't needed.
They're used when something later in the regular expression sequence
needs to refer back to something earlier, such as in:

**      mov     (z[0-9]+\.h), h4
**      movprfx z0, z1
**      famax   z0\.h, p0/m, z0\.h, \1

Here, the "\1" refers back to the "(...)", i.e. the last operand of the
famax has to be the destination of the mov.

But in amax_0_f16_m_tied1 above, we're not matching the constant move
that sets the register to all zero.  We're just matching a single instruction
and are taking it on faith that the "z[0-9]+\.h" operand does in fact hold
zeros.  So it should just be:

**      famax   z0\.h, p0/m, z0\.h, z[0-9]+\.h

Same for the other tests that have "(...)" in their final line.

No worries, thank you for the review again! I'll send in a new version.

Thanks,
Richard


+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f16_m_tied1, svfloat16_t,
+               z0 = svamax_n_f16_m (p0, z0, 0),
+               z0 = svamax_m (p0, z0, 0))
+
+/*
+** amax_0_f16_m_untied:
+**     ...
+**     movprfx z0, z1
+**     famax   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f16_m_untied, svfloat16_t,
+               z0 = svamax_n_f16_m (p0, z1, 0),
+               z0 = svamax_m (p0, z1, 0))
+
+/*
+** amax_1_f16_m_tied1:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f16_m_tied1, svfloat16_t,
+               z0 = svamax_n_f16_m (p0, z0, 1),
+               z0 = svamax_m (p0, z0, 1))
+
+/*
+** amax_1_f16_m_untied:
+**     ...
+**     movprfx z0, z1
+**     famax   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f16_m_untied, svfloat16_t,
+               z0 = svamax_n_f16_m (p0, z1, 1),
+               z0 = svamax_m (p0, z1, 1))
+
+/*
+** amax_2_f16_m:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_2_f16_m, svfloat16_t,
+               z0 = svamax_n_f16_m (p0, z0, 2),
+               z0 = svamax_m (p0, z0, 2))
+
+/*
+** amax_f16_z_tied1:
+**     movprfx z0\.h, p0/z, z0\.h
+**     famax   z0\.h, p0/m, z0\.h, z1\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f16_z_tied1, svfloat16_t,
+               z0 = svamax_f16_z (p0, z0, z1),
+               z0 = svamax_z (p0, z0, z1))
+
+/*
+** amax_f16_z_tied2:
+**     movprfx z0\.h, p0/z, z0\.h
+**     famax   z0\.h, p0/m, z0\.h, z1\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f16_z_tied2, svfloat16_t,
+               z0 = svamax_f16_z (p0, z1, z0),
+               z0 = svamax_z (p0, z1, z0))
+
+/*
+** amax_f16_z_untied:
+** (
+**     movprfx z0\.h, p0/z, z1\.h
+**     famax   z0\.h, p0/m, z0\.h, z2\.h
+** |
+**     movprfx z0\.h, p0/z, z2\.h
+**     famax   z0\.h, p0/m, z0\.h, z1\.h
+** )
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f16_z_untied, svfloat16_t,
+               z0 = svamax_f16_z (p0, z1, z2),
+               z0 = svamax_z (p0, z1, z2))
+
+/*
+** amax_h4_f16_z_tied1:
+**     mov     (z[0-9]+\.h), h4
+**     movprfx z0\.h, p0/z, z0\.h
+**     famax   z0\.h, p0/m, z0\.h, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_h4_f16_z_tied1, svfloat16_t, __fp16,
+                z0 = svamax_n_f16_z (p0, z0, d4),
+                z0 = svamax_z (p0, z0, d4))
+
+/*
+** amax_h4_f16_z_untied:
+**     mov     (z[0-9]+\.h), h4
+** (
+**     movprfx z0\.h, p0/z, z1\.h
+**     famax   z0\.h, p0/m, z0\.h, \1
+** |
+**     movprfx z0\.h, p0/z, \1
+**     famax   z0\.h, p0/m, z0\.h, z1\.h
+** )
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_h4_f16_z_untied, svfloat16_t, __fp16,
+                z0 = svamax_n_f16_z (p0, z1, d4),
+                z0 = svamax_z (p0, z1, d4))
+
+/*
+** amax_0_f16_z_tied1:
+**     ...
+**     movprfx z0, z31
+**     famax   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f16_z_tied1, svfloat16_t,
+               z0 = svamax_n_f16_z (p0, z0, 0),
+               z0 = svamax_z (p0, z0, 0))
+
+/*
+** amax_0_f16_z_untied:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f16_z_untied, svfloat16_t,
+               z0 = svamax_n_f16_z (p0, z1, 0),
+               z0 = svamax_z (p0, z1, 0))
+
+/*
+** amax_1_f16_z_tied1:
+**     ...
+**     movprfx z0\.h, p0/z, z0\.h
+**     famax   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f16_z_tied1, svfloat16_t,
+               z0 = svamax_n_f16_z (p0, z0, 1),
+               z0 = svamax_z (p0, z0, 1))
+
+/*
+** amax_1_f16_z_untied:
+**     ...
+**     movprfx z0\.h, p0/z, z0\.h
+**     famax   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f16_z_untied, svfloat16_t,
+               z0 = svamax_n_f16_z (p0, z1, 1),
+               z0 = svamax_z (p0, z1, 1))
+
+/*
+** amax_2_f16_z:
+**     ...
+**     movprfx z0\.h, p0/z, z0\.h
+**     famax   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_2_f16_z, svfloat16_t,
+               z0 = svamax_n_f16_z (p0, z0, 2),
+               z0 = svamax_z (p0, z0, 2))
+
+/*
+** amax_f16_x_tied1:
+**     famax   z0\.h, p0/m, z0\.h, z1\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f16_x_tied1, svfloat16_t,
+               z0 = svamax_f16_x (p0, z0, z1),
+               z0 = svamax_x (p0, z0, z1))
+
+/*
+** amax_f16_x_tied2:
+**     famax   z0\.h, p0/m, z0\.h, z1\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f16_x_tied2, svfloat16_t,
+               z0 = svamax_f16_x (p0, z1, z0),
+               z0 = svamax_x (p0, z1, z0))
+
+/*
+** amax_f16_x_untied:
+** (
+**     movprfx z0, z1
+**     famax   z0\.h, p0/m, z0\.h, z2\.h
+** |
+**     movprfx z0, z2
+**     famax   z0\.h, p0/m, z0\.h, z1\.h
+** )
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f16_x_untied, svfloat16_t,
+               z0 = svamax_f16_x (p0, z1, z2),
+               z0 = svamax_x (p0, z1, z2))
+
+/*
+** amax_h4_f16_x_tied1:
+**     mov     (z[0-9]+\.h), h4
+**     famax   z0\.h, p0/m, z0\.h, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_h4_f16_x_tied1, svfloat16_t, __fp16,
+                z0 = svamax_n_f16_x (p0, z0, d4),
+                z0 = svamax_x (p0, z0, d4))
+
+/*
+** amax_h4_f16_x_untied:
+**     mov     z0\.h, h4
+**     famax   z0\.h, p0/m, z0\.h, z1\.h
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_h4_f16_x_untied, svfloat16_t, __fp16,
+                z0 = svamax_n_f16_x (p0, z1, d4),
+                z0 = svamax_x (p0, z1, d4))
+
+/*
+** amax_0_f16_x_tied1:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f16_x_tied1, svfloat16_t,
+               z0 = svamax_n_f16_x (p0, z0, 0),
+               z0 = svamax_x (p0, z0, 0))
+
+/*
+** amax_0_f16_x_untied:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f16_x_untied, svfloat16_t,
+               z0 = svamax_n_f16_x (p0, z1, 0),
+               z0 = svamax_x (p0, z1, 0))
+
+/*
+** amax_1_f16_x_tied1:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f16_x_tied1, svfloat16_t,
+               z0 = svamax_n_f16_x (p0, z0, 1),
+               z0 = svamax_x (p0, z0, 1))
+
+/*
+** amax_1_f16_x_untied:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f16_x_untied, svfloat16_t,
+               z0 = svamax_n_f16_x (p0, z1, 1),
+               z0 = svamax_x (p0, z1, 1))
+
+/*
+** amax_2_f16_x_tied1:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_2_f16_x_tied1, svfloat16_t,
+               z0 = svamax_n_f16_x (p0, z0, 2),
+               z0 = svamax_x (p0, z0, 2))
+
+/*
+** amax_2_f16_x_untied:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_2_f16_x_untied, svfloat16_t,
+               z0 = svamax_n_f16_x (p0, z1, 2),
+               z0 = svamax_x (p0, z1, 2))
+
+/*
+** ptrue_amax_f16_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f16_x_tied1, svfloat16_t,
+               z0 = svamax_f16_x (svptrue_b16 (), z0, z1),
+               z0 = svamax_x (svptrue_b16 (), z0, z1))
+
+/*
+** ptrue_amax_f16_x_tied2:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f16_x_tied2, svfloat16_t,
+               z0 = svamax_f16_x (svptrue_b16 (), z1, z0),
+               z0 = svamax_x (svptrue_b16 (), z1, z0))
+
+/*
+** ptrue_amax_f16_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f16_x_untied, svfloat16_t,
+               z0 = svamax_f16_x (svptrue_b16 (), z1, z2),
+               z0 = svamax_x (svptrue_b16 (), z1, z2))
+
+/*
+** ptrue_amax_0_f16_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_0_f16_x_tied1, svfloat16_t,
+               z0 = svamax_n_f16_x (svptrue_b16 (), z0, 0),
+               z0 = svamax_x (svptrue_b16 (), z0, 0))
+
+/*
+** ptrue_amax_0_f16_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_0_f16_x_untied, svfloat16_t,
+               z0 = svamax_n_f16_x (svptrue_b16 (), z1, 0),
+               z0 = svamax_x (svptrue_b16 (), z1, 0))
+
+/*
+** ptrue_amax_1_f16_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_1_f16_x_tied1, svfloat16_t,
+               z0 = svamax_n_f16_x (svptrue_b16 (), z0, 1),
+               z0 = svamax_x (svptrue_b16 (), z0, 1))
+
+/*
+** ptrue_amax_1_f16_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_1_f16_x_untied, svfloat16_t,
+               z0 = svamax_n_f16_x (svptrue_b16 (), z1, 1),
+               z0 = svamax_x (svptrue_b16 (), z1, 1))
+
+/*
+** ptrue_amax_2_f16_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_2_f16_x_tied1, svfloat16_t,
+               z0 = svamax_n_f16_x (svptrue_b16 (), z0, 2),
+               z0 = svamax_x (svptrue_b16 (), z0, 2))
+
+/*
+** ptrue_amax_2_f16_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_2_f16_x_untied, svfloat16_t,
+               z0 = svamax_n_f16_x (svptrue_b16 (), z1, 2),
+               z0 = svamax_x (svptrue_b16 (), z1, 2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amax_f32.c 
b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amax_f32.c
new file mode 100644
index 00000000000..3a2d5116784
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amax_f32.c
@@ -0,0 +1,437 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve+faminmax"
+
+/*
+** amax_f32_m_tied1:
+**     famax   z0\.s, p0/m, z0\.s, z1\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f32_m_tied1, svfloat32_t,
+               z0 = svamax_f32_m (p0, z0, z1),
+               z0 = svamax_m (p0, z0, z1))
+
+/*
+** amax_f32_m_tied2:
+**     mov     (z[0-9]+)\.d, z0\.d
+**     movprfx z0, z1
+**     famax   z0\.s, p0/m, z0\.s, \1\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f32_m_tied2, svfloat32_t,
+               z0 = svamax_f32_m (p0, z1, z0),
+               z0 = svamax_m (p0, z1, z0))
+
+/*
+** amax_f32_m_untied:
+**     movprfx z0, z1
+**     famax   z0\.s, p0/m, z0\.s, z2\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f32_m_untied, svfloat32_t,
+               z0 = svamax_f32_m (p0, z1, z2),
+               z0 = svamax_m (p0, z1, z2))
+
+/*
+** amax_s4_f32_m_tied1:
+**     mov     (z[0-9]+\.s), s4
+**     famax   z0\.s, p0/m, z0\.s, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_s4_f32_m_tied1, svfloat32_t, float,
+                z0 = svamax_n_f32_m (p0, z0, d4),
+                z0 = svamax_m (p0, z0, d4))
+
+/*
+** amax_s4_f32_m_untied:
+**     mov     (z[0-9]+\.s), s4
+**     movprfx z0, z1
+**     famax   z0\.s, p0/m, z0\.s, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_s4_f32_m_untied, svfloat32_t, float,
+                z0 = svamax_n_f32_m (p0, z1, d4),
+                z0 = svamax_m (p0, z1, d4))
+
+/*
+** amax_0_f32_m_tied1:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f32_m_tied1, svfloat32_t,
+               z0 = svamax_n_f32_m (p0, z0, 0),
+               z0 = svamax_m (p0, z0, 0))
+
+/*
+** amax_0_f32_m_untied:
+**     ...
+**     movprfx z0, z1
+**     famax   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f32_m_untied, svfloat32_t,
+               z0 = svamax_n_f32_m (p0, z1, 0),
+               z0 = svamax_m (p0, z1, 0))
+
+/*
+** amax_1_f32_m_tied1:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f32_m_tied1, svfloat32_t,
+               z0 = svamax_n_f32_m (p0, z0, 1),
+               z0 = svamax_m (p0, z0, 1))
+
+/*
+** amax_1_f32_m_untied:
+**     ...
+**     movprfx z0, z1
+**     famax   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f32_m_untied, svfloat32_t,
+               z0 = svamax_n_f32_m (p0, z1, 1),
+               z0 = svamax_m (p0, z1, 1))
+
+/*
+** amax_2_f32_m:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_2_f32_m, svfloat32_t,
+               z0 = svamax_n_f32_m (p0, z0, 2),
+               z0 = svamax_m (p0, z0, 2))
+
+/*
+** amax_f32_z_tied1:
+**     movprfx z0\.s, p0/z, z0\.s
+**     famax   z0\.s, p0/m, z0\.s, z1\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f32_z_tied1, svfloat32_t,
+               z0 = svamax_f32_z (p0, z0, z1),
+               z0 = svamax_z (p0, z0, z1))
+
+/*
+** amax_f32_z_tied2:
+**     movprfx z0\.s, p0/z, z0\.s
+**     famax   z0\.s, p0/m, z0\.s, z1\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f32_z_tied2, svfloat32_t,
+               z0 = svamax_f32_z (p0, z1, z0),
+               z0 = svamax_z (p0, z1, z0))
+
+/*
+** amax_f32_z_untied:
+** (
+**     movprfx z0\.s, p0/z, z1\.s
+**     famax   z0\.s, p0/m, z0\.s, z2\.s
+** |
+**     movprfx z0\.s, p0/z, z2\.s
+**     famax   z0\.s, p0/m, z0\.s, z1\.s
+** )
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f32_z_untied, svfloat32_t,
+               z0 = svamax_f32_z (p0, z1, z2),
+               z0 = svamax_z (p0, z1, z2))
+
+/*
+** amax_s4_f32_z_tied1:
+**     mov     (z[0-9]+\.s), s4
+**     movprfx z0\.s, p0/z, z0\.s
+**     famax   z0\.s, p0/m, z0\.s, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_s4_f32_z_tied1, svfloat32_t, float,
+                z0 = svamax_n_f32_z (p0, z0, d4),
+                z0 = svamax_z (p0, z0, d4))
+
+/*
+** amax_s4_f32_z_untied:
+**     mov     (z[0-9]+\.s), s4
+** (
+**     movprfx z0\.s, p0/z, z1\.s
+**     famax   z0\.s, p0/m, z0\.s, \1
+** |
+**     movprfx z0\.s, p0/z, \1
+**     famax   z0\.s, p0/m, z0\.s, z1\.s
+** )
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_s4_f32_z_untied, svfloat32_t, float,
+                z0 = svamax_n_f32_z (p0, z1, d4),
+                z0 = svamax_z (p0, z1, d4))
+
+/*
+** amax_0_f32_z_tied1:
+**     ...
+**     movprfx z0, z31
+**     famax   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f32_z_tied1, svfloat32_t,
+               z0 = svamax_n_f32_z (p0, z0, 0),
+               z0 = svamax_z (p0, z0, 0))
+
+/*
+** amax_0_f32_z_untied:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f32_z_untied, svfloat32_t,
+               z0 = svamax_n_f32_z (p0, z1, 0),
+               z0 = svamax_z (p0, z1, 0))
+
+/*
+** amax_1_f32_z_tied1:
+**     ...
+**     movprfx z0\.s, p0/z, z0\.s
+**     famax   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f32_z_tied1, svfloat32_t,
+               z0 = svamax_n_f32_z (p0, z0, 1),
+               z0 = svamax_z (p0, z0, 1))
+
+/*
+** amax_1_f32_z_untied:
+**     ...
+**     movprfx z0\.s, p0/z, z0\.s
+**     famax   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f32_z_untied, svfloat32_t,
+               z0 = svamax_n_f32_z (p0, z1, 1),
+               z0 = svamax_z (p0, z1, 1))
+
+/*
+** amax_2_f32_z:
+**     ...
+**     movprfx z0\.s, p0/z, z0\.s
+**     famax   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_2_f32_z, svfloat32_t,
+               z0 = svamax_n_f32_z (p0, z0, 2),
+               z0 = svamax_z (p0, z0, 2))
+
+/*
+** amax_f32_x_tied1:
+**     famax   z0\.s, p0/m, z0\.s, z1\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f32_x_tied1, svfloat32_t,
+               z0 = svamax_f32_x (p0, z0, z1),
+               z0 = svamax_x (p0, z0, z1))
+
+/*
+** amax_f32_x_tied2:
+**     famax   z0\.s, p0/m, z0\.s, z1\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f32_x_tied2, svfloat32_t,
+               z0 = svamax_f32_x (p0, z1, z0),
+               z0 = svamax_x (p0, z1, z0))
+
+/*
+** amax_f32_x_untied:
+** (
+**     movprfx z0, z1
+**     famax   z0\.s, p0/m, z0\.s, z2\.s
+** |
+**     movprfx z0, z2
+**     famax   z0\.s, p0/m, z0\.s, z1\.s
+** )
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f32_x_untied, svfloat32_t,
+               z0 = svamax_f32_x (p0, z1, z2),
+               z0 = svamax_x (p0, z1, z2))
+
+/*
+** amax_s4_f32_x_tied1:
+**     mov     (z[0-9]+\.s), s4
+**     famax   z0\.s, p0/m, z0\.s, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_s4_f32_x_tied1, svfloat32_t, float,
+                z0 = svamax_n_f32_x (p0, z0, d4),
+                z0 = svamax_x (p0, z0, d4))
+
+/*
+** amax_s4_f32_x_untied:
+**     mov     z0\.s, s4
+**     famax   z0\.s, p0/m, z0\.s, z1\.s
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_s4_f32_x_untied, svfloat32_t, float,
+                z0 = svamax_n_f32_x (p0, z1, d4),
+                z0 = svamax_x (p0, z1, d4))
+
+/*
+** amax_0_f32_x_tied1:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f32_x_tied1, svfloat32_t,
+               z0 = svamax_n_f32_x (p0, z0, 0),
+               z0 = svamax_x (p0, z0, 0))
+
+/*
+** amax_0_f32_x_untied:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f32_x_untied, svfloat32_t,
+               z0 = svamax_n_f32_x (p0, z1, 0),
+               z0 = svamax_x (p0, z1, 0))
+
+/*
+** amax_1_f32_x_tied1:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f32_x_tied1, svfloat32_t,
+               z0 = svamax_n_f32_x (p0, z0, 1),
+               z0 = svamax_x (p0, z0, 1))
+
+/*
+** amax_1_f32_x_untied:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f32_x_untied, svfloat32_t,
+               z0 = svamax_n_f32_x (p0, z1, 1),
+               z0 = svamax_x (p0, z1, 1))
+
+/*
+** amax_2_f32_x_tied1:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_2_f32_x_tied1, svfloat32_t,
+               z0 = svamax_n_f32_x (p0, z0, 2),
+               z0 = svamax_x (p0, z0, 2))
+
+/*
+** amax_2_f32_x_untied:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_2_f32_x_untied, svfloat32_t,
+               z0 = svamax_n_f32_x (p0, z1, 2),
+               z0 = svamax_x (p0, z1, 2))
+
+/*
+** ptrue_amax_f32_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f32_x_tied1, svfloat32_t,
+               z0 = svamax_f32_x (svptrue_b32 (), z0, z1),
+               z0 = svamax_x (svptrue_b32 (), z0, z1))
+
+/*
+** ptrue_amax_f32_x_tied2:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f32_x_tied2, svfloat32_t,
+               z0 = svamax_f32_x (svptrue_b32 (), z1, z0),
+               z0 = svamax_x (svptrue_b32 (), z1, z0))
+
+/*
+** ptrue_amax_f32_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f32_x_untied, svfloat32_t,
+               z0 = svamax_f32_x (svptrue_b32 (), z1, z2),
+               z0 = svamax_x (svptrue_b32 (), z1, z2))
+
+/*
+** ptrue_amax_0_f32_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_0_f32_x_tied1, svfloat32_t,
+               z0 = svamax_n_f32_x (svptrue_b32 (), z0, 0),
+               z0 = svamax_x (svptrue_b32 (), z0, 0))
+
+/*
+** ptrue_amax_0_f32_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_0_f32_x_untied, svfloat32_t,
+               z0 = svamax_n_f32_x (svptrue_b32 (), z1, 0),
+               z0 = svamax_x (svptrue_b32 (), z1, 0))
+
+/*
+** ptrue_amax_1_f32_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_1_f32_x_tied1, svfloat32_t,
+               z0 = svamax_n_f32_x (svptrue_b32 (), z0, 1),
+               z0 = svamax_x (svptrue_b32 (), z0, 1))
+
+/*
+** ptrue_amax_1_f32_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_1_f32_x_untied, svfloat32_t,
+               z0 = svamax_n_f32_x (svptrue_b32 (), z1, 1),
+               z0 = svamax_x (svptrue_b32 (), z1, 1))
+
+/*
+** ptrue_amax_2_f32_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_2_f32_x_tied1, svfloat32_t,
+               z0 = svamax_n_f32_x (svptrue_b32 (), z0, 2),
+               z0 = svamax_x (svptrue_b32 (), z0, 2))
+
+/*
+** ptrue_amax_2_f32_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_2_f32_x_untied, svfloat32_t,
+               z0 = svamax_n_f32_x (svptrue_b32 (), z1, 2),
+               z0 = svamax_x (svptrue_b32 (), z1, 2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amax_f64.c 
b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amax_f64.c
new file mode 100644
index 00000000000..042dc391e65
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amax_f64.c
@@ -0,0 +1,437 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve+faminmax"
+
+/*
+** amax_f64_m_tied1:
+**     famax   z0\.d, p0/m, z0\.d, z1\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f64_m_tied1, svfloat64_t,
+               z0 = svamax_f64_m (p0, z0, z1),
+               z0 = svamax_m (p0, z0, z1))
+
+/*
+** amax_f64_m_tied2:
+**     mov     (z[0-9]+\.d), z0\.d
+**     movprfx z0, z1
+**     famax   z0\.d, p0/m, z0\.d, \1
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f64_m_tied2, svfloat64_t,
+               z0 = svamax_f64_m (p0, z1, z0),
+               z0 = svamax_m (p0, z1, z0))
+
+/*
+** amax_f64_m_untied:
+**     movprfx z0, z1
+**     famax   z0\.d, p0/m, z0\.d, z2\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f64_m_untied, svfloat64_t,
+               z0 = svamax_f64_m (p0, z1, z2),
+               z0 = svamax_m (p0, z1, z2))
+
+/*
+** amax_d4_f64_m_tied1:
+**     mov     (z[0-9]+\.d), d4
+**     famax   z0\.d, p0/m, z0\.d, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_d4_f64_m_tied1, svfloat64_t, double,
+                z0 = svamax_n_f64_m (p0, z0, d4),
+                z0 = svamax_m (p0, z0, d4))
+
+/*
+** amax_d4_f64_m_untied:
+**     mov     (z[0-9]+\.d), d4
+**     movprfx z0, z1
+**     famax   z0\.d, p0/m, z0\.d, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_d4_f64_m_untied, svfloat64_t, double,
+                z0 = svamax_n_f64_m (p0, z1, d4),
+                z0 = svamax_m (p0, z1, d4))
+
+/*
+** amax_0_f64_m_tied1:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f64_m_tied1, svfloat64_t,
+               z0 = svamax_n_f64_m (p0, z0, 0),
+               z0 = svamax_m (p0, z0, 0))
+
+/*
+** amax_0_f64_m_untied:
+**     ...
+**     movprfx z0, z1
+**     famax   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f64_m_untied, svfloat64_t,
+               z0 = svamax_n_f64_m (p0, z1, 0),
+               z0 = svamax_m (p0, z1, 0))
+
+/*
+** amax_1_f64_m_tied1:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f64_m_tied1, svfloat64_t,
+               z0 = svamax_n_f64_m (p0, z0, 1),
+               z0 = svamax_m (p0, z0, 1))
+
+/*
+** amax_1_f64_m_untied:
+**     ...
+**     movprfx z0, z1
+**     famax   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f64_m_untied, svfloat64_t,
+               z0 = svamax_n_f64_m (p0, z1, 1),
+               z0 = svamax_m (p0, z1, 1))
+
+/*
+** amax_2_f64_m:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_2_f64_m, svfloat64_t,
+               z0 = svamax_n_f64_m (p0, z0, 2),
+               z0 = svamax_m (p0, z0, 2))
+
+/*
+** amax_f64_z_tied1:
+**     movprfx z0\.d, p0/z, z0\.d
+**     famax   z0\.d, p0/m, z0\.d, z1\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f64_z_tied1, svfloat64_t,
+               z0 = svamax_f64_z (p0, z0, z1),
+               z0 = svamax_z (p0, z0, z1))
+
+/*
+** amax_f64_z_tied2:
+**     movprfx z0\.d, p0/z, z0\.d
+**     famax   z0\.d, p0/m, z0\.d, z1\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f64_z_tied2, svfloat64_t,
+               z0 = svamax_f64_z (p0, z1, z0),
+               z0 = svamax_z (p0, z1, z0))
+
+/*
+** amax_f64_z_untied:
+** (
+**     movprfx z0\.d, p0/z, z1\.d
+**     famax   z0\.d, p0/m, z0\.d, z2\.d
+** |
+**     movprfx z0\.d, p0/z, z2\.d
+**     famax   z0\.d, p0/m, z0\.d, z1\.d
+** )
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f64_z_untied, svfloat64_t,
+               z0 = svamax_f64_z (p0, z1, z2),
+               z0 = svamax_z (p0, z1, z2))
+
+/*
+** amax_d4_f64_z_tied1:
+**     mov     (z[0-9]+\.d), d4
+**     movprfx z0\.d, p0/z, z0\.d
+**     famax   z0\.d, p0/m, z0\.d, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_d4_f64_z_tied1, svfloat64_t, double,
+                z0 = svamax_n_f64_z (p0, z0, d4),
+                z0 = svamax_z (p0, z0, d4))
+
+/*
+** amax_d4_f64_z_untied:
+**     mov     (z[0-9]+\.d), d4
+** (
+**     movprfx z0\.d, p0/z, z1\.d
+**     famax   z0\.d, p0/m, z0\.d, \1
+** |
+**     movprfx z0\.d, p0/z, \1
+**     famax   z0\.d, p0/m, z0\.d, z1\.d
+** )
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_d4_f64_z_untied, svfloat64_t, double,
+                z0 = svamax_n_f64_z (p0, z1, d4),
+                z0 = svamax_z (p0, z1, d4))
+
+/*
+** amax_0_f64_z_tied1:
+**     ...
+**     movprfx z0, z31
+**     famax   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f64_z_tied1, svfloat64_t,
+               z0 = svamax_n_f64_z (p0, z0, 0),
+               z0 = svamax_z (p0, z0, 0))
+
+/*
+** amax_0_f64_z_untied:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f64_z_untied, svfloat64_t,
+               z0 = svamax_n_f64_z (p0, z1, 0),
+               z0 = svamax_z (p0, z1, 0))
+
+/*
+** amax_1_f64_z_tied1:
+**     ...
+**     movprfx z0\.d, p0/z, z0\.d
+**     famax   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f64_z_tied1, svfloat64_t,
+               z0 = svamax_n_f64_z (p0, z0, 1),
+               z0 = svamax_z (p0, z0, 1))
+
+/*
+** amax_1_f64_z_untied:
+**     ...
+**     movprfx z0\.d, p0/z, z0\.d
+**     famax   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f64_z_untied, svfloat64_t,
+               z0 = svamax_n_f64_z (p0, z1, 1),
+               z0 = svamax_z (p0, z1, 1))
+
+/*
+** amax_2_f64_z:
+**     ...
+**     movprfx z0\.d, p0/z, z0\.d
+**     famax   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_2_f64_z, svfloat64_t,
+               z0 = svamax_n_f64_z (p0, z0, 2),
+               z0 = svamax_z (p0, z0, 2))
+
+/*
+** amax_f64_x_tied1:
+**     famax   z0\.d, p0/m, z0\.d, z1\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f64_x_tied1, svfloat64_t,
+               z0 = svamax_f64_x (p0, z0, z1),
+               z0 = svamax_x (p0, z0, z1))
+
+/*
+** amax_f64_x_tied2:
+**     famax   z0\.d, p0/m, z0\.d, z1\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f64_x_tied2, svfloat64_t,
+               z0 = svamax_f64_x (p0, z1, z0),
+               z0 = svamax_x (p0, z1, z0))
+
+/*
+** amax_f64_x_untied:
+** (
+**     movprfx z0, z1
+**     famax   z0\.d, p0/m, z0\.d, z2\.d
+** |
+**     movprfx z0, z2
+**     famax   z0\.d, p0/m, z0\.d, z1\.d
+** )
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f64_x_untied, svfloat64_t,
+               z0 = svamax_f64_x (p0, z1, z2),
+               z0 = svamax_x (p0, z1, z2))
+
+/*
+** amax_d4_f64_x_tied1:
+**     mov     (z[0-9]+\.d), d4
+**     famax   z0\.d, p0/m, z0\.d, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_d4_f64_x_tied1, svfloat64_t, double,
+                z0 = svamax_n_f64_x (p0, z0, d4),
+                z0 = svamax_x (p0, z0, d4))
+
+/*
+** amax_d4_f64_x_untied:
+**     mov     z0\.d, d4
+**     famax   z0\.d, p0/m, z0\.d, z1\.d
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_d4_f64_x_untied, svfloat64_t, double,
+                z0 = svamax_n_f64_x (p0, z1, d4),
+                z0 = svamax_x (p0, z1, d4))
+
+/*
+** amax_0_f64_x_tied1:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f64_x_tied1, svfloat64_t,
+               z0 = svamax_n_f64_x (p0, z0, 0),
+               z0 = svamax_x (p0, z0, 0))
+
+/*
+** amax_0_f64_x_untied:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f64_x_untied, svfloat64_t,
+               z0 = svamax_n_f64_x (p0, z1, 0),
+               z0 = svamax_x (p0, z1, 0))
+
+/*
+** amax_1_f64_x_tied1:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f64_x_tied1, svfloat64_t,
+               z0 = svamax_n_f64_x (p0, z0, 1),
+               z0 = svamax_x (p0, z0, 1))
+
+/*
+** amax_1_f64_x_untied:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f64_x_untied, svfloat64_t,
+               z0 = svamax_n_f64_x (p0, z1, 1),
+               z0 = svamax_x (p0, z1, 1))
+
+/*
+** amax_2_f64_x_tied1:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_2_f64_x_tied1, svfloat64_t,
+               z0 = svamax_n_f64_x (p0, z0, 2),
+               z0 = svamax_x (p0, z0, 2))
+
+/*
+** amax_2_f64_x_untied:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amax_2_f64_x_untied, svfloat64_t,
+               z0 = svamax_n_f64_x (p0, z1, 2),
+               z0 = svamax_x (p0, z1, 2))
+
+/*
+** ptrue_amax_f64_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f64_x_tied1, svfloat64_t,
+               z0 = svamax_f64_x (svptrue_b64 (), z0, z1),
+               z0 = svamax_x (svptrue_b64 (), z0, z1))
+
+/*
+** ptrue_amax_f64_x_tied2:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f64_x_tied2, svfloat64_t,
+               z0 = svamax_f64_x (svptrue_b64 (), z1, z0),
+               z0 = svamax_x (svptrue_b64 (), z1, z0))
+
+/*
+** ptrue_amax_f64_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f64_x_untied, svfloat64_t,
+               z0 = svamax_f64_x (svptrue_b64 (), z1, z2),
+               z0 = svamax_x (svptrue_b64 (), z1, z2))
+
+/*
+** ptrue_amax_0_f64_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_0_f64_x_tied1, svfloat64_t,
+               z0 = svamax_n_f64_x (svptrue_b64 (), z0, 0),
+               z0 = svamax_x (svptrue_b64 (), z0, 0))
+
+/*
+** ptrue_amax_0_f64_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_0_f64_x_untied, svfloat64_t,
+               z0 = svamax_n_f64_x (svptrue_b64 (), z1, 0),
+               z0 = svamax_x (svptrue_b64 (), z1, 0))
+
+/*
+** ptrue_amax_1_f64_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_1_f64_x_tied1, svfloat64_t,
+               z0 = svamax_n_f64_x (svptrue_b64 (), z0, 1),
+               z0 = svamax_x (svptrue_b64 (), z0, 1))
+
+/*
+** ptrue_amax_1_f64_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_1_f64_x_untied, svfloat64_t,
+               z0 = svamax_n_f64_x (svptrue_b64 (), z1, 1),
+               z0 = svamax_x (svptrue_b64 (), z1, 1))
+
+/*
+** ptrue_amax_2_f64_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_2_f64_x_tied1, svfloat64_t,
+               z0 = svamax_n_f64_x (svptrue_b64 (), z0, 2),
+               z0 = svamax_x (svptrue_b64 (), z0, 2))
+
+/*
+** ptrue_amax_2_f64_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_2_f64_x_untied, svfloat64_t,
+               z0 = svamax_n_f64_x (svptrue_b64 (), z1, 2),
+               z0 = svamax_x (svptrue_b64 (), z1, 2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amin_f16.c 
b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amin_f16.c
new file mode 100644
index 00000000000..bba27c47656
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amin_f16.c
@@ -0,0 +1,437 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve+faminmax"
+
+/*
+** amin_f16_m_tied1:
+**     famin   z0\.h, p0/m, z0\.h, z1\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f16_m_tied1, svfloat16_t,
+               z0 = svamin_f16_m (p0, z0, z1),
+               z0 = svamin_m (p0, z0, z1))
+
+/*
+** amin_f16_m_tied2:
+**     mov     (z[0-9]+)\.d, z0\.d
+**     movprfx z0, z1
+**     famin   z0\.h, p0/m, z0\.h, \1\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f16_m_tied2, svfloat16_t,
+               z0 = svamin_f16_m (p0, z1, z0),
+               z0 = svamin_m (p0, z1, z0))
+
+/*
+** amin_f16_m_untied:
+**     movprfx z0, z1
+**     famin   z0\.h, p0/m, z0\.h, z2\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f16_m_untied, svfloat16_t,
+               z0 = svamin_f16_m (p0, z1, z2),
+               z0 = svamin_m (p0, z1, z2))
+
+/*
+** amin_h4_f16_m_tied1:
+**     mov     (z[0-9]+\.h), h4
+**     famin   z0\.h, p0/m, z0\.h, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_h4_f16_m_tied1, svfloat16_t, __fp16,
+                z0 = svamin_n_f16_m (p0, z0, d4),
+                z0 = svamin_m (p0, z0, d4))
+
+/*
+** amin_h4_f16_m_untied:
+**     mov     (z[0-9]+\.h), h4
+**     movprfx z0, z1
+**     famin   z0\.h, p0/m, z0\.h, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_h4_f16_m_untied, svfloat16_t, __fp16,
+                z0 = svamin_n_f16_m (p0, z1, d4),
+                z0 = svamin_m (p0, z1, d4))
+
+/*
+** amin_0_f16_m_tied1:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f16_m_tied1, svfloat16_t,
+               z0 = svamin_n_f16_m (p0, z0, 0),
+               z0 = svamin_m (p0, z0, 0))
+
+/*
+** amin_0_f16_m_untied:
+**     ...
+**     movprfx z0, z1
+**     famin   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f16_m_untied, svfloat16_t,
+               z0 = svamin_n_f16_m (p0, z1, 0),
+               z0 = svamin_m (p0, z1, 0))
+
+/*
+** amin_1_f16_m_tied1:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f16_m_tied1, svfloat16_t,
+               z0 = svamin_n_f16_m (p0, z0, 1),
+               z0 = svamin_m (p0, z0, 1))
+
+/*
+** amin_1_f16_m_untied:
+**     ...
+**     movprfx z0, z1
+**     famin   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f16_m_untied, svfloat16_t,
+               z0 = svamin_n_f16_m (p0, z1, 1),
+               z0 = svamin_m (p0, z1, 1))
+
+/*
+** amin_2_f16_m:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_2_f16_m, svfloat16_t,
+               z0 = svamin_n_f16_m (p0, z0, 2),
+               z0 = svamin_m (p0, z0, 2))
+
+/*
+** amin_f16_z_tied1:
+**     movprfx z0\.h, p0/z, z0\.h
+**     famin   z0\.h, p0/m, z0\.h, z1\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f16_z_tied1, svfloat16_t,
+               z0 = svamin_f16_z (p0, z0, z1),
+               z0 = svamin_z (p0, z0, z1))
+
+/*
+** amin_f16_z_tied2:
+**     movprfx z0\.h, p0/z, z0\.h
+**     famin   z0\.h, p0/m, z0\.h, z1\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f16_z_tied2, svfloat16_t,
+               z0 = svamin_f16_z (p0, z1, z0),
+               z0 = svamin_z (p0, z1, z0))
+
+/*
+** amin_f16_z_untied:
+** (
+**     movprfx z0\.h, p0/z, z1\.h
+**     famin   z0\.h, p0/m, z0\.h, z2\.h
+** |
+**     movprfx z0\.h, p0/z, z2\.h
+**     famin   z0\.h, p0/m, z0\.h, z1\.h
+** )
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f16_z_untied, svfloat16_t,
+               z0 = svamin_f16_z (p0, z1, z2),
+               z0 = svamin_z (p0, z1, z2))
+
+/*
+** amin_h4_f16_z_tied1:
+**     mov     (z[0-9]+\.h), h4
+**     movprfx z0\.h, p0/z, z0\.h
+**     famin   z0\.h, p0/m, z0\.h, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_h4_f16_z_tied1, svfloat16_t, __fp16,
+                z0 = svamin_n_f16_z (p0, z0, d4),
+                z0 = svamin_z (p0, z0, d4))
+
+/*
+** amin_h4_f16_z_untied:
+**     mov     (z[0-9]+\.h), h4
+** (
+**     movprfx z0\.h, p0/z, z1\.h
+**     famin   z0\.h, p0/m, z0\.h, \1
+** |
+**     movprfx z0\.h, p0/z, \1
+**     famin   z0\.h, p0/m, z0\.h, z1\.h
+** )
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_h4_f16_z_untied, svfloat16_t, __fp16,
+                z0 = svamin_n_f16_z (p0, z1, d4),
+                z0 = svamin_z (p0, z1, d4))
+
+/*
+** amin_0_f16_z_tied1:
+**     ...
+**     movprfx z0, z31
+**     famin   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f16_z_tied1, svfloat16_t,
+               z0 = svamin_n_f16_z (p0, z0, 0),
+               z0 = svamin_z (p0, z0, 0))
+
+/*
+** amin_0_f16_z_untied:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f16_z_untied, svfloat16_t,
+               z0 = svamin_n_f16_z (p0, z1, 0),
+               z0 = svamin_z (p0, z1, 0))
+
+/*
+** amin_1_f16_z_tied1:
+**     ...
+**     movprfx z0\.h, p0/z, z0\.h
+**     famin   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f16_z_tied1, svfloat16_t,
+               z0 = svamin_n_f16_z (p0, z0, 1),
+               z0 = svamin_z (p0, z0, 1))
+
+/*
+** amin_1_f16_z_untied:
+**     ...
+**     movprfx z0\.h, p0/z, z0\.h
+**     famin   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f16_z_untied, svfloat16_t,
+               z0 = svamin_n_f16_z (p0, z1, 1),
+               z0 = svamin_z (p0, z1, 1))
+
+/*
+** amin_2_f16_z:
+**     ...
+**     movprfx z0\.h, p0/z, z0\.h
+**     famin   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_2_f16_z, svfloat16_t,
+               z0 = svamin_n_f16_z (p0, z0, 2),
+               z0 = svamin_z (p0, z0, 2))
+
+/*
+** amin_f16_x_tied1:
+**     famin   z0\.h, p0/m, z0\.h, z1\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f16_x_tied1, svfloat16_t,
+               z0 = svamin_f16_x (p0, z0, z1),
+               z0 = svamin_x (p0, z0, z1))
+
+/*
+** amin_f16_x_tied2:
+**     famin   z0\.h, p0/m, z0\.h, z1\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f16_x_tied2, svfloat16_t,
+               z0 = svamin_f16_x (p0, z1, z0),
+               z0 = svamin_x (p0, z1, z0))
+
+/*
+** amin_f16_x_untied:
+** (
+**     movprfx z0, z1
+**     famin   z0\.h, p0/m, z0\.h, z2\.h
+** |
+**     movprfx z0, z2
+**     famin   z0\.h, p0/m, z0\.h, z1\.h
+** )
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f16_x_untied, svfloat16_t,
+               z0 = svamin_f16_x (p0, z1, z2),
+               z0 = svamin_x (p0, z1, z2))
+
+/*
+** amin_h4_f16_x_tied1:
+**     mov     (z[0-9]+\.h), h4
+**     famin   z0\.h, p0/m, z0\.h, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_h4_f16_x_tied1, svfloat16_t, __fp16,
+                z0 = svamin_n_f16_x (p0, z0, d4),
+                z0 = svamin_x (p0, z0, d4))
+
+/*
+** amin_h4_f16_x_untied:
+**     mov     z0\.h, h4
+**     famin   z0\.h, p0/m, z0\.h, z1\.h
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_h4_f16_x_untied, svfloat16_t, __fp16,
+                z0 = svamin_n_f16_x (p0, z1, d4),
+                z0 = svamin_x (p0, z1, d4))
+
+/*
+** amin_0_f16_x_tied1:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f16_x_tied1, svfloat16_t,
+               z0 = svamin_n_f16_x (p0, z0, 0),
+               z0 = svamin_x (p0, z0, 0))
+
+/*
+** amin_0_f16_x_untied:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f16_x_untied, svfloat16_t,
+               z0 = svamin_n_f16_x (p0, z1, 0),
+               z0 = svamin_x (p0, z1, 0))
+
+/*
+** amin_1_f16_x_tied1:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f16_x_tied1, svfloat16_t,
+               z0 = svamin_n_f16_x (p0, z0, 1),
+               z0 = svamin_x (p0, z0, 1))
+
+/*
+** amin_1_f16_x_untied:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f16_x_untied, svfloat16_t,
+               z0 = svamin_n_f16_x (p0, z1, 1),
+               z0 = svamin_x (p0, z1, 1))
+
+/*
+** amin_2_f16_x_tied1:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_2_f16_x_tied1, svfloat16_t,
+               z0 = svamin_n_f16_x (p0, z0, 2),
+               z0 = svamin_x (p0, z0, 2))
+
+/*
+** amin_2_f16_x_untied:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, (z[0-9]+\.h)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_2_f16_x_untied, svfloat16_t,
+               z0 = svamin_n_f16_x (p0, z1, 2),
+               z0 = svamin_x (p0, z1, 2))
+
+/*
+** ptrue_amin_f16_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f16_x_tied1, svfloat16_t,
+               z0 = svamin_f16_x (svptrue_b16 (), z0, z1),
+               z0 = svamin_x (svptrue_b16 (), z0, z1))
+
+/*
+** ptrue_amin_f16_x_tied2:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f16_x_tied2, svfloat16_t,
+               z0 = svamin_f16_x (svptrue_b16 (), z1, z0),
+               z0 = svamin_x (svptrue_b16 (), z1, z0))
+
+/*
+** ptrue_amin_f16_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f16_x_untied, svfloat16_t,
+               z0 = svamin_f16_x (svptrue_b16 (), z1, z2),
+               z0 = svamin_x (svptrue_b16 (), z1, z2))
+
+/*
+** ptrue_amin_0_f16_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_0_f16_x_tied1, svfloat16_t,
+               z0 = svamin_n_f16_x (svptrue_b16 (), z0, 0),
+               z0 = svamin_x (svptrue_b16 (), z0, 0))
+
+/*
+** ptrue_amin_0_f16_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_0_f16_x_untied, svfloat16_t,
+               z0 = svamin_n_f16_x (svptrue_b16 (), z1, 0),
+               z0 = svamin_x (svptrue_b16 (), z1, 0))
+
+/*
+** ptrue_amin_1_f16_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_1_f16_x_tied1, svfloat16_t,
+               z0 = svamin_n_f16_x (svptrue_b16 (), z0, 1),
+               z0 = svamin_x (svptrue_b16 (), z0, 1))
+
+/*
+** ptrue_amin_1_f16_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_1_f16_x_untied, svfloat16_t,
+               z0 = svamin_n_f16_x (svptrue_b16 (), z1, 1),
+               z0 = svamin_x (svptrue_b16 (), z1, 1))
+
+/*
+** ptrue_amin_2_f16_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_2_f16_x_tied1, svfloat16_t,
+               z0 = svamin_n_f16_x (svptrue_b16 (), z0, 2),
+               z0 = svamin_x (svptrue_b16 (), z0, 2))
+
+/*
+** ptrue_amin_2_f16_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_2_f16_x_untied, svfloat16_t,
+               z0 = svamin_n_f16_x (svptrue_b16 (), z1, 2),
+               z0 = svamin_x (svptrue_b16 (), z1, 2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amin_f32.c 
b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amin_f32.c
new file mode 100644
index 00000000000..5cb50230ce4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amin_f32.c
@@ -0,0 +1,437 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve+faminmax"
+
+/*
+** amin_f32_m_tied1:
+**     famin   z0\.s, p0/m, z0\.s, z1\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f32_m_tied1, svfloat32_t,
+               z0 = svamin_f32_m (p0, z0, z1),
+               z0 = svamin_m (p0, z0, z1))
+
+/*
+** amin_f32_m_tied2:
+**     mov     (z[0-9]+)\.d, z0\.d
+**     movprfx z0, z1
+**     famin   z0\.s, p0/m, z0\.s, \1\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f32_m_tied2, svfloat32_t,
+               z0 = svamin_f32_m (p0, z1, z0),
+               z0 = svamin_m (p0, z1, z0))
+
+/*
+** amin_f32_m_untied:
+**     movprfx z0, z1
+**     famin   z0\.s, p0/m, z0\.s, z2\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f32_m_untied, svfloat32_t,
+               z0 = svamin_f32_m (p0, z1, z2),
+               z0 = svamin_m (p0, z1, z2))
+
+/*
+** amin_s4_f32_m_tied1:
+**     mov     (z[0-9]+\.s), s4
+**     famin   z0\.s, p0/m, z0\.s, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_s4_f32_m_tied1, svfloat32_t, float,
+                z0 = svamin_n_f32_m (p0, z0, d4),
+                z0 = svamin_m (p0, z0, d4))
+
+/*
+** amin_s4_f32_m_untied:
+**     mov     (z[0-9]+\.s), s4
+**     movprfx z0, z1
+**     famin   z0\.s, p0/m, z0\.s, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_s4_f32_m_untied, svfloat32_t, float,
+                z0 = svamin_n_f32_m (p0, z1, d4),
+                z0 = svamin_m (p0, z1, d4))
+
+/*
+** amin_0_f32_m_tied1:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f32_m_tied1, svfloat32_t,
+               z0 = svamin_n_f32_m (p0, z0, 0),
+               z0 = svamin_m (p0, z0, 0))
+
+/*
+** amin_0_f32_m_untied:
+**     ...
+**     movprfx z0, z1
+**     famin   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f32_m_untied, svfloat32_t,
+               z0 = svamin_n_f32_m (p0, z1, 0),
+               z0 = svamin_m (p0, z1, 0))
+
+/*
+** amin_1_f32_m_tied1:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f32_m_tied1, svfloat32_t,
+               z0 = svamin_n_f32_m (p0, z0, 1),
+               z0 = svamin_m (p0, z0, 1))
+
+/*
+** amin_1_f32_m_untied:
+**     ...
+**     movprfx z0, z1
+**     famin   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f32_m_untied, svfloat32_t,
+               z0 = svamin_n_f32_m (p0, z1, 1),
+               z0 = svamin_m (p0, z1, 1))
+
+/*
+** amin_2_f32_m:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_2_f32_m, svfloat32_t,
+               z0 = svamin_n_f32_m (p0, z0, 2),
+               z0 = svamin_m (p0, z0, 2))
+
+/*
+** amin_f32_z_tied1:
+**     movprfx z0\.s, p0/z, z0\.s
+**     famin   z0\.s, p0/m, z0\.s, z1\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f32_z_tied1, svfloat32_t,
+               z0 = svamin_f32_z (p0, z0, z1),
+               z0 = svamin_z (p0, z0, z1))
+
+/*
+** amin_f32_z_tied2:
+**     movprfx z0\.s, p0/z, z0\.s
+**     famin   z0\.s, p0/m, z0\.s, z1\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f32_z_tied2, svfloat32_t,
+               z0 = svamin_f32_z (p0, z1, z0),
+               z0 = svamin_z (p0, z1, z0))
+
+/*
+** amin_f32_z_untied:
+** (
+**     movprfx z0\.s, p0/z, z1\.s
+**     famin   z0\.s, p0/m, z0\.s, z2\.s
+** |
+**     movprfx z0\.s, p0/z, z2\.s
+**     famin   z0\.s, p0/m, z0\.s, z1\.s
+** )
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f32_z_untied, svfloat32_t,
+               z0 = svamin_f32_z (p0, z1, z2),
+               z0 = svamin_z (p0, z1, z2))
+
+/*
+** amin_s4_f32_z_tied1:
+**     mov     (z[0-9]+\.s), s4
+**     movprfx z0\.s, p0/z, z0\.s
+**     famin   z0\.s, p0/m, z0\.s, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_s4_f32_z_tied1, svfloat32_t, float,
+                z0 = svamin_n_f32_z (p0, z0, d4),
+                z0 = svamin_z (p0, z0, d4))
+
+/*
+** amin_s4_f32_z_untied:
+**     mov     (z[0-9]+\.s), s4
+** (
+**     movprfx z0\.s, p0/z, z1\.s
+**     famin   z0\.s, p0/m, z0\.s, \1
+** |
+**     movprfx z0\.s, p0/z, \1
+**     famin   z0\.s, p0/m, z0\.s, z1\.s
+** )
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_s4_f32_z_untied, svfloat32_t, float,
+                z0 = svamin_n_f32_z (p0, z1, d4),
+                z0 = svamin_z (p0, z1, d4))
+
+/*
+** amin_0_f32_z_tied1:
+**     ...
+**     movprfx z0, z31
+**     famin   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f32_z_tied1, svfloat32_t,
+               z0 = svamin_n_f32_z (p0, z0, 0),
+               z0 = svamin_z (p0, z0, 0))
+
+/*
+** amin_0_f32_z_untied:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f32_z_untied, svfloat32_t,
+               z0 = svamin_n_f32_z (p0, z1, 0),
+               z0 = svamin_z (p0, z1, 0))
+
+/*
+** amin_1_f32_z_tied1:
+**     ...
+**     movprfx z0\.s, p0/z, z0\.s
+**     famin   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f32_z_tied1, svfloat32_t,
+               z0 = svamin_n_f32_z (p0, z0, 1),
+               z0 = svamin_z (p0, z0, 1))
+
+/*
+** amin_1_f32_z_untied:
+**     ...
+**     movprfx z0\.s, p0/z, z0\.s
+**     famin   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f32_z_untied, svfloat32_t,
+               z0 = svamin_n_f32_z (p0, z1, 1),
+               z0 = svamin_z (p0, z1, 1))
+
+/*
+** amin_2_f32_z:
+**     ...
+**     movprfx z0\.s, p0/z, z0\.s
+**     famin   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_2_f32_z, svfloat32_t,
+               z0 = svamin_n_f32_z (p0, z0, 2),
+               z0 = svamin_z (p0, z0, 2))
+
+/*
+** amin_f32_x_tied1:
+**     famin   z0\.s, p0/m, z0\.s, z1\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f32_x_tied1, svfloat32_t,
+               z0 = svamin_f32_x (p0, z0, z1),
+               z0 = svamin_x (p0, z0, z1))
+
+/*
+** amin_f32_x_tied2:
+**     famin   z0\.s, p0/m, z0\.s, z1\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f32_x_tied2, svfloat32_t,
+               z0 = svamin_f32_x (p0, z1, z0),
+               z0 = svamin_x (p0, z1, z0))
+
+/*
+** amin_f32_x_untied:
+** (
+**     movprfx z0, z1
+**     famin   z0\.s, p0/m, z0\.s, z2\.s
+** |
+**     movprfx z0, z2
+**     famin   z0\.s, p0/m, z0\.s, z1\.s
+** )
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f32_x_untied, svfloat32_t,
+               z0 = svamin_f32_x (p0, z1, z2),
+               z0 = svamin_x (p0, z1, z2))
+
+/*
+** amin_s4_f32_x_tied1:
+**     mov     (z[0-9]+\.s), s4
+**     famin   z0\.s, p0/m, z0\.s, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_s4_f32_x_tied1, svfloat32_t, float,
+                z0 = svamin_n_f32_x (p0, z0, d4),
+                z0 = svamin_x (p0, z0, d4))
+
+/*
+** amin_s4_f32_x_untied:
+**     mov     z0\.s, s4
+**     famin   z0\.s, p0/m, z0\.s, z1\.s
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_s4_f32_x_untied, svfloat32_t, float,
+                z0 = svamin_n_f32_x (p0, z1, d4),
+                z0 = svamin_x (p0, z1, d4))
+
+/*
+** amin_0_f32_x_tied1:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f32_x_tied1, svfloat32_t,
+               z0 = svamin_n_f32_x (p0, z0, 0),
+               z0 = svamin_x (p0, z0, 0))
+
+/*
+** amin_0_f32_x_untied:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f32_x_untied, svfloat32_t,
+               z0 = svamin_n_f32_x (p0, z1, 0),
+               z0 = svamin_x (p0, z1, 0))
+
+/*
+** amin_1_f32_x_tied1:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f32_x_tied1, svfloat32_t,
+               z0 = svamin_n_f32_x (p0, z0, 1),
+               z0 = svamin_x (p0, z0, 1))
+
+/*
+** amin_1_f32_x_untied:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f32_x_untied, svfloat32_t,
+               z0 = svamin_n_f32_x (p0, z1, 1),
+               z0 = svamin_x (p0, z1, 1))
+
+/*
+** amin_2_f32_x_tied1:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_2_f32_x_tied1, svfloat32_t,
+               z0 = svamin_n_f32_x (p0, z0, 2),
+               z0 = svamin_x (p0, z0, 2))
+
+/*
+** amin_2_f32_x_untied:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, (z[0-9]+\.s)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_2_f32_x_untied, svfloat32_t,
+               z0 = svamin_n_f32_x (p0, z1, 2),
+               z0 = svamin_x (p0, z1, 2))
+
+/*
+** ptrue_amin_f32_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f32_x_tied1, svfloat32_t,
+               z0 = svamin_f32_x (svptrue_b32 (), z0, z1),
+               z0 = svamin_x (svptrue_b32 (), z0, z1))
+
+/*
+** ptrue_amin_f32_x_tied2:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f32_x_tied2, svfloat32_t,
+               z0 = svamin_f32_x (svptrue_b32 (), z1, z0),
+               z0 = svamin_x (svptrue_b32 (), z1, z0))
+
+/*
+** ptrue_amin_f32_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f32_x_untied, svfloat32_t,
+               z0 = svamin_f32_x (svptrue_b32 (), z1, z2),
+               z0 = svamin_x (svptrue_b32 (), z1, z2))
+
+/*
+** ptrue_amin_0_f32_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_0_f32_x_tied1, svfloat32_t,
+               z0 = svamin_n_f32_x (svptrue_b32 (), z0, 0),
+               z0 = svamin_x (svptrue_b32 (), z0, 0))
+
+/*
+** ptrue_amin_0_f32_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_0_f32_x_untied, svfloat32_t,
+               z0 = svamin_n_f32_x (svptrue_b32 (), z1, 0),
+               z0 = svamin_x (svptrue_b32 (), z1, 0))
+
+/*
+** ptrue_amin_1_f32_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_1_f32_x_tied1, svfloat32_t,
+               z0 = svamin_n_f32_x (svptrue_b32 (), z0, 1),
+               z0 = svamin_x (svptrue_b32 (), z0, 1))
+
+/*
+** ptrue_amin_1_f32_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_1_f32_x_untied, svfloat32_t,
+               z0 = svamin_n_f32_x (svptrue_b32 (), z1, 1),
+               z0 = svamin_x (svptrue_b32 (), z1, 1))
+
+/*
+** ptrue_amin_2_f32_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_2_f32_x_tied1, svfloat32_t,
+               z0 = svamin_n_f32_x (svptrue_b32 (), z0, 2),
+               z0 = svamin_x (svptrue_b32 (), z0, 2))
+
+/*
+** ptrue_amin_2_f32_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_2_f32_x_untied, svfloat32_t,
+               z0 = svamin_n_f32_x (svptrue_b32 (), z1, 2),
+               z0 = svamin_x (svptrue_b32 (), z1, 2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amin_f64.c 
b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amin_f64.c
new file mode 100644
index 00000000000..8c87ec9c721
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amin_f64.c
@@ -0,0 +1,437 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve+faminmax"
+
+/*
+** amin_f64_m_tied1:
+**     famin   z0\.d, p0/m, z0\.d, z1\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f64_m_tied1, svfloat64_t,
+               z0 = svamin_f64_m (p0, z0, z1),
+               z0 = svamin_m (p0, z0, z1))
+
+/*
+** amin_f64_m_tied2:
+**     mov     (z[0-9]+\.d), z0\.d
+**     movprfx z0, z1
+**     famin   z0\.d, p0/m, z0\.d, \1
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f64_m_tied2, svfloat64_t,
+               z0 = svamin_f64_m (p0, z1, z0),
+               z0 = svamin_m (p0, z1, z0))
+
+/*
+** amin_f64_m_untied:
+**     movprfx z0, z1
+**     famin   z0\.d, p0/m, z0\.d, z2\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f64_m_untied, svfloat64_t,
+               z0 = svamin_f64_m (p0, z1, z2),
+               z0 = svamin_m (p0, z1, z2))
+
+/*
+** amin_d4_f64_m_tied1:
+**     mov     (z[0-9]+\.d), d4
+**     famin   z0\.d, p0/m, z0\.d, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_d4_f64_m_tied1, svfloat64_t, double,
+                z0 = svamin_n_f64_m (p0, z0, d4),
+                z0 = svamin_m (p0, z0, d4))
+
+/*
+** amin_d4_f64_m_untied:
+**     mov     (z[0-9]+\.d), d4
+**     movprfx z0, z1
+**     famin   z0\.d, p0/m, z0\.d, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_d4_f64_m_untied, svfloat64_t, double,
+                z0 = svamin_n_f64_m (p0, z1, d4),
+                z0 = svamin_m (p0, z1, d4))
+
+/*
+** amin_0_f64_m_tied1:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f64_m_tied1, svfloat64_t,
+               z0 = svamin_n_f64_m (p0, z0, 0),
+               z0 = svamin_m (p0, z0, 0))
+
+/*
+** amin_0_f64_m_untied:
+**     ...
+**     movprfx z0, z1
+**     famin   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f64_m_untied, svfloat64_t,
+               z0 = svamin_n_f64_m (p0, z1, 0),
+               z0 = svamin_m (p0, z1, 0))
+
+/*
+** amin_1_f64_m_tied1:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f64_m_tied1, svfloat64_t,
+               z0 = svamin_n_f64_m (p0, z0, 1),
+               z0 = svamin_m (p0, z0, 1))
+
+/*
+** amin_1_f64_m_untied:
+**     ...
+**     movprfx z0, z1
+**     famin   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f64_m_untied, svfloat64_t,
+               z0 = svamin_n_f64_m (p0, z1, 1),
+               z0 = svamin_m (p0, z1, 1))
+
+/*
+** amin_2_f64_m:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_2_f64_m, svfloat64_t,
+               z0 = svamin_n_f64_m (p0, z0, 2),
+               z0 = svamin_m (p0, z0, 2))
+
+/*
+** amin_f64_z_tied1:
+**     movprfx z0\.d, p0/z, z0\.d
+**     famin   z0\.d, p0/m, z0\.d, z1\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f64_z_tied1, svfloat64_t,
+               z0 = svamin_f64_z (p0, z0, z1),
+               z0 = svamin_z (p0, z0, z1))
+
+/*
+** amin_f64_z_tied2:
+**     movprfx z0\.d, p0/z, z0\.d
+**     famin   z0\.d, p0/m, z0\.d, z1\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f64_z_tied2, svfloat64_t,
+               z0 = svamin_f64_z (p0, z1, z0),
+               z0 = svamin_z (p0, z1, z0))
+
+/*
+** amin_f64_z_untied:
+** (
+**     movprfx z0\.d, p0/z, z1\.d
+**     famin   z0\.d, p0/m, z0\.d, z2\.d
+** |
+**     movprfx z0\.d, p0/z, z2\.d
+**     famin   z0\.d, p0/m, z0\.d, z1\.d
+** )
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f64_z_untied, svfloat64_t,
+               z0 = svamin_f64_z (p0, z1, z2),
+               z0 = svamin_z (p0, z1, z2))
+
+/*
+** amin_d4_f64_z_tied1:
+**     mov     (z[0-9]+\.d), d4
+**     movprfx z0\.d, p0/z, z0\.d
+**     famin   z0\.d, p0/m, z0\.d, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_d4_f64_z_tied1, svfloat64_t, double,
+                z0 = svamin_n_f64_z (p0, z0, d4),
+                z0 = svamin_z (p0, z0, d4))
+
+/*
+** amin_d4_f64_z_untied:
+**     mov     (z[0-9]+\.d), d4
+** (
+**     movprfx z0\.d, p0/z, z1\.d
+**     famin   z0\.d, p0/m, z0\.d, \1
+** |
+**     movprfx z0\.d, p0/z, \1
+**     famin   z0\.d, p0/m, z0\.d, z1\.d
+** )
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_d4_f64_z_untied, svfloat64_t, double,
+                z0 = svamin_n_f64_z (p0, z1, d4),
+                z0 = svamin_z (p0, z1, d4))
+
+/*
+** amin_0_f64_z_tied1:
+**     ...
+**     movprfx z0, z31
+**     famin   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f64_z_tied1, svfloat64_t,
+               z0 = svamin_n_f64_z (p0, z0, 0),
+               z0 = svamin_z (p0, z0, 0))
+
+/*
+** amin_0_f64_z_untied:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f64_z_untied, svfloat64_t,
+               z0 = svamin_n_f64_z (p0, z1, 0),
+               z0 = svamin_z (p0, z1, 0))
+
+/*
+** amin_1_f64_z_tied1:
+**     ...
+**     movprfx z0\.d, p0/z, z0\.d
+**     famin   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f64_z_tied1, svfloat64_t,
+               z0 = svamin_n_f64_z (p0, z0, 1),
+               z0 = svamin_z (p0, z0, 1))
+
+/*
+** amin_1_f64_z_untied:
+**     ...
+**     movprfx z0\.d, p0/z, z0\.d
+**     famin   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f64_z_untied, svfloat64_t,
+               z0 = svamin_n_f64_z (p0, z1, 1),
+               z0 = svamin_z (p0, z1, 1))
+
+/*
+** amin_2_f64_z:
+**     ...
+**     movprfx z0\.d, p0/z, z0\.d
+**     famin   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_2_f64_z, svfloat64_t,
+               z0 = svamin_n_f64_z (p0, z0, 2),
+               z0 = svamin_z (p0, z0, 2))
+
+/*
+** amin_f64_x_tied1:
+**     famin   z0\.d, p0/m, z0\.d, z1\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f64_x_tied1, svfloat64_t,
+               z0 = svamin_f64_x (p0, z0, z1),
+               z0 = svamin_x (p0, z0, z1))
+
+/*
+** amin_f64_x_tied2:
+**     famin   z0\.d, p0/m, z0\.d, z1\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f64_x_tied2, svfloat64_t,
+               z0 = svamin_f64_x (p0, z1, z0),
+               z0 = svamin_x (p0, z1, z0))
+
+/*
+** amin_f64_x_untied:
+** (
+**     movprfx z0, z1
+**     famin   z0\.d, p0/m, z0\.d, z2\.d
+** |
+**     movprfx z0, z2
+**     famin   z0\.d, p0/m, z0\.d, z1\.d
+** )
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f64_x_untied, svfloat64_t,
+               z0 = svamin_f64_x (p0, z1, z2),
+               z0 = svamin_x (p0, z1, z2))
+
+/*
+** amin_d4_f64_x_tied1:
+**     mov     (z[0-9]+\.d), d4
+**     famin   z0\.d, p0/m, z0\.d, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_d4_f64_x_tied1, svfloat64_t, double,
+                z0 = svamin_n_f64_x (p0, z0, d4),
+                z0 = svamin_x (p0, z0, d4))
+
+/*
+** amin_d4_f64_x_untied:
+**     mov     z0\.d, d4
+**     famin   z0\.d, p0/m, z0\.d, z1\.d
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_d4_f64_x_untied, svfloat64_t, double,
+                z0 = svamin_n_f64_x (p0, z1, d4),
+                z0 = svamin_x (p0, z1, d4))
+
+/*
+** amin_0_f64_x_tied1:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f64_x_tied1, svfloat64_t,
+               z0 = svamin_n_f64_x (p0, z0, 0),
+               z0 = svamin_x (p0, z0, 0))
+
+/*
+** amin_0_f64_x_untied:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f64_x_untied, svfloat64_t,
+               z0 = svamin_n_f64_x (p0, z1, 0),
+               z0 = svamin_x (p0, z1, 0))
+
+/*
+** amin_1_f64_x_tied1:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f64_x_tied1, svfloat64_t,
+               z0 = svamin_n_f64_x (p0, z0, 1),
+               z0 = svamin_x (p0, z0, 1))
+
+/*
+** amin_1_f64_x_untied:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f64_x_untied, svfloat64_t,
+               z0 = svamin_n_f64_x (p0, z1, 1),
+               z0 = svamin_x (p0, z1, 1))
+
+/*
+** amin_2_f64_x_tied1:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_2_f64_x_tied1, svfloat64_t,
+               z0 = svamin_n_f64_x (p0, z0, 2),
+               z0 = svamin_x (p0, z0, 2))
+
+/*
+** amin_2_f64_x_untied:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, (z[0-9]+\.d)
+**     ret
+*/
+TEST_UNIFORM_Z (amin_2_f64_x_untied, svfloat64_t,
+               z0 = svamin_n_f64_x (p0, z1, 2),
+               z0 = svamin_x (p0, z1, 2))
+
+/*
+** ptrue_amin_f64_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f64_x_tied1, svfloat64_t,
+               z0 = svamin_f64_x (svptrue_b64 (), z0, z1),
+               z0 = svamin_x (svptrue_b64 (), z0, z1))
+
+/*
+** ptrue_amin_f64_x_tied2:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f64_x_tied2, svfloat64_t,
+               z0 = svamin_f64_x (svptrue_b64 (), z1, z0),
+               z0 = svamin_x (svptrue_b64 (), z1, z0))
+
+/*
+** ptrue_amin_f64_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f64_x_untied, svfloat64_t,
+               z0 = svamin_f64_x (svptrue_b64 (), z1, z2),
+               z0 = svamin_x (svptrue_b64 (), z1, z2))
+
+/*
+** ptrue_amin_0_f64_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_0_f64_x_tied1, svfloat64_t,
+               z0 = svamin_n_f64_x (svptrue_b64 (), z0, 0),
+               z0 = svamin_x (svptrue_b64 (), z0, 0))
+
+/*
+** ptrue_amin_0_f64_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_0_f64_x_untied, svfloat64_t,
+               z0 = svamin_n_f64_x (svptrue_b64 (), z1, 0),
+               z0 = svamin_x (svptrue_b64 (), z1, 0))
+
+/*
+** ptrue_amin_1_f64_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_1_f64_x_tied1, svfloat64_t,
+               z0 = svamin_n_f64_x (svptrue_b64 (), z0, 1),
+               z0 = svamin_x (svptrue_b64 (), z0, 1))
+
+/*
+** ptrue_amin_1_f64_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_1_f64_x_untied, svfloat64_t,
+               z0 = svamin_n_f64_x (svptrue_b64 (), z1, 1),
+               z0 = svamin_x (svptrue_b64 (), z1, 1))
+
+/*
+** ptrue_amin_2_f64_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_2_f64_x_tied1, svfloat64_t,
+               z0 = svamin_n_f64_x (svptrue_b64 (), z0, 2),
+               z0 = svamin_x (svptrue_b64 (), z0, 2))
+
+/*
+** ptrue_amin_2_f64_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_2_f64_x_untied, svfloat64_t,
+               z0 = svamin_n_f64_x (svptrue_b64 (), z1, 2),
+               z0 = svamin_x (svptrue_b64 (), z1, 2))

Reply via email to