sdesmalen updated this revision to Diff 543841.
sdesmalen added a comment.

Replace by ImmCheck0_7.


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D156128/new/

https://reviews.llvm.org/D156128

Files:
  clang/include/clang/Basic/arm_sme.td
  clang/test/CodeGen/aarch64-sme-intrinsics/acle_sme_mopa-za64.c
  clang/test/CodeGen/aarch64-sme-intrinsics/acle_sme_mops-za64.c
  clang/test/Sema/aarch64-sme-intrinsics/acle_sme_imm.cpp

Index: clang/test/Sema/aarch64-sme-intrinsics/acle_sme_imm.cpp
===================================================================
--- clang/test/Sema/aarch64-sme-intrinsics/acle_sme_imm.cpp
+++ clang/test/Sema/aarch64-sme-intrinsics/acle_sme_imm.cpp
@@ -190,6 +190,11 @@
   SVE_ACLE_FUNC(svusmopa_za64, _u16, _m,)(8, pg, pg, svundef_u16(), svundef_s16());
   // expected-error@+1 {{argument value 18446744073709551615 is outside the valid range [0, 7]}}
   SVE_ACLE_FUNC(svusmops_za64, _u16, _m,)(-1, pg, pg, svundef_u16(), svundef_s16());
+
+  // expected-error@+1 {{argument value 8 is outside the valid range [0, 7]}}
+  SVE_ACLE_FUNC(svmopa_za64, _f64, _m,)(8, pg, pg, svundef_f64(), svundef_f64());
+  // expected-error@+1 {{argument value 18446744073709551615 is outside the valid range [0, 7]}}
+  SVE_ACLE_FUNC(svmops_za64, _f64, _m,)(-1, pg, pg, svundef_f64(), svundef_f64());
 }
 
 void test_range_0_15(svbool_t pg, void *ptr) {
Index: clang/test/CodeGen/aarch64-sme-intrinsics/acle_sme_mops-za64.c
===================================================================
--- clang/test/CodeGen/aarch64-sme-intrinsics/acle_sme_mops-za64.c
+++ clang/test/CodeGen/aarch64-sme-intrinsics/acle_sme_mops-za64.c
@@ -18,11 +18,11 @@
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PN:%.*]])
 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PM:%.*]])
-// CHECK-NEXT:    tail call void @llvm.aarch64.sme.smops.wide.nxv8i16(i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i1> [[TMP1]], <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.smops.wide.nxv8i16(i32 7, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i1> [[TMP1]], <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
 // CHECK-NEXT:    ret void
 //
 void test_svmops_za64_s16(svbool_t pn, svbool_t pm, svint16_t zn, svint16_t zm) {
-  SME_ACLE_FUNC(svmops_za64, _s16, _m)(1, pn, pm, zn, zm);
+  SME_ACLE_FUNC(svmops_za64, _s16, _m)(7, pn, pm, zn, zm);
 }
 
 // CHECK-C-LABEL: @test_svmops_za64_u16(
@@ -42,11 +42,11 @@
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PN:%.*]])
 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PM:%.*]])
-// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mops.nxv2f64(i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i1> [[TMP1]], <vscale x 2 x double> [[ZN:%.*]], <vscale x 2 x double> [[ZM:%.*]])
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mops.nxv2f64(i32 7, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i1> [[TMP1]], <vscale x 2 x double> [[ZN:%.*]], <vscale x 2 x double> [[ZM:%.*]])
 // CHECK-NEXT:    ret void
 //
 void test_svmops_za64_f64(svbool_t pn, svbool_t pm, svfloat64_t zn, svfloat64_t zm) {
-  SME_ACLE_FUNC(svmops_za64, _f64, _m)(1, pn, pm, zn, zm);
+  SME_ACLE_FUNC(svmops_za64, _f64, _m)(7, pn, pm, zn, zm);
 }
 
 // CHECK-C-LABEL: @test_svsumops_za64_s16(
@@ -66,9 +66,9 @@
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PN:%.*]])
 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PM:%.*]])
-// CHECK-NEXT:    tail call void @llvm.aarch64.sme.usmops.wide.nxv8i16(i32 2, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i1> [[TMP1]], <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.usmops.wide.nxv8i16(i32 7, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i1> [[TMP1]], <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
 // CHECK-NEXT:    ret void
 //
 void test_svusmops_za64_u16(svbool_t pn, svbool_t pm, svuint16_t zn, svint16_t zm) {
-  SME_ACLE_FUNC(svusmops_za64, _u16, _m)(2, pn, pm, zn, zm);
+  SME_ACLE_FUNC(svusmops_za64, _u16, _m)(7, pn, pm, zn, zm);
 }
Index: clang/test/CodeGen/aarch64-sme-intrinsics/acle_sme_mopa-za64.c
===================================================================
--- clang/test/CodeGen/aarch64-sme-intrinsics/acle_sme_mopa-za64.c
+++ clang/test/CodeGen/aarch64-sme-intrinsics/acle_sme_mopa-za64.c
@@ -18,11 +18,11 @@
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PN:%.*]])
 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PM:%.*]])
-// CHECK-NEXT:    tail call void @llvm.aarch64.sme.smopa.wide.nxv8i16(i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i1> [[TMP1]], <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.smopa.wide.nxv8i16(i32 7, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i1> [[TMP1]], <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
 // CHECK-NEXT:    ret void
 //
 void test_svmopa_za64_s16(svbool_t pn, svbool_t pm, svint16_t zn, svint16_t zm) {
-  SME_ACLE_FUNC(svmopa_za64, _s16, _m)(1, pn, pm, zn, zm);
+  SME_ACLE_FUNC(svmopa_za64, _s16, _m)(7, pn, pm, zn, zm);
 }
 
 // CHECK-C-LABEL: @test_svmopa_za64_u16(
@@ -42,11 +42,11 @@
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PN:%.*]])
 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PM:%.*]])
-// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mopa.nxv2f64(i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i1> [[TMP1]], <vscale x 2 x double> [[ZN:%.*]], <vscale x 2 x double> [[ZM:%.*]])
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mopa.nxv2f64(i32 7, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i1> [[TMP1]], <vscale x 2 x double> [[ZN:%.*]], <vscale x 2 x double> [[ZM:%.*]])
 // CHECK-NEXT:    ret void
 //
 void test_svmopa_za64_f64(svbool_t pn, svbool_t pm, svfloat64_t zn, svfloat64_t zm) {
-  SME_ACLE_FUNC(svmopa_za64, _f64, _m)(1, pn, pm, zn, zm);
+  SME_ACLE_FUNC(svmopa_za64, _f64, _m)(7, pn, pm, zn, zm);
 }
 
 // CHECK-C-LABEL: @test_svsumopa_za64_s16(
@@ -66,9 +66,9 @@
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PN:%.*]])
 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PM:%.*]])
-// CHECK-NEXT:    tail call void @llvm.aarch64.sme.usmopa.wide.nxv8i16(i32 2, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i1> [[TMP1]], <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.usmopa.wide.nxv8i16(i32 7, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i1> [[TMP1]], <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
 // CHECK-NEXT:    ret void
 //
 void test_svusmopa_za64_u16(svbool_t pn, svbool_t pm, svuint16_t zn, svint16_t zm) {
-  SME_ACLE_FUNC(svusmopa_za64, _u16, _m)(2, pn, pm, zn, zm);
+  SME_ACLE_FUNC(svusmopa_za64, _u16, _m)(7, pn, pm, zn, zm);
 }
Index: clang/include/clang/Basic/arm_sme.td
===================================================================
--- clang/include/clang/Basic/arm_sme.td
+++ clang/include/clang/Basic/arm_sme.td
@@ -251,7 +251,7 @@
     def NAME # _ZA64_D: SInst<"sv" # n_suffix # "_za64[_{d}]", "viPPdd", "d",
                               MergeOp1, "aarch64_sme_" # n_suffix,
                               [IsStreaming, IsSharedZA],
-                              [ImmCheck<0, ImmCheck0_3>]>;
+                              [ImmCheck<0, ImmCheck0_7>]>;
   }
 }
 
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to