sdesmalen created this revision.
sdesmalen added reviewers: efriedma, SjoerdMeijer, rovka.
Herald added a subscriber: tschuett.
Herald added a reviewer: rengolin.
Herald added a project: clang.
sdesmalen added a parent revision: D76678: [SveEmitter] Add range checks for 
immediates and predicate patterns..
sdesmalen added a child revision: D76680: [SveEmitter] Add immediate checks for 
lanes and complex imms.

This patch adds a number of intrinsics that take immediates with
varying ranges based on the element size one of the operands.

  svext:   immediate ranging 0 to (2048/sizeinbits(elt) - 1)
  svasrd:  immediate ranging 1..sizeinbits(elt)
  svqshlu: immediate ranging 1..sizeinbits(elt)/2
  ftmad:   immediate ranging 0..(sizeinbits(elt) - 1)


Repository:
  rG LLVM Github Monorepo

https://reviews.llvm.org/D76679

Files:
  clang/include/clang/Basic/arm_sve.td
  clang/lib/CodeGen/CGBuiltin.cpp
  clang/lib/Sema/SemaChecking.cpp
  clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_asrd.c
  clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_asrd_shortform.c
  clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ext.c
  clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ext_shortform.c
  clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_tmad.c
  clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_tmad_shortform.c
  clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_asrd.c
  clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_ext.c
  clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_tmad.c
  clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qshlu.c
  clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qshlu_shortform.c
  clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_shrnb.c
  clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_shrnb_shortform.c
  clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qshlu.c
  clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_shrnb.c
  clang/utils/TableGen/SveEmitter.cpp

Index: clang/utils/TableGen/SveEmitter.cpp
===================================================================
--- clang/utils/TableGen/SveEmitter.cpp
+++ clang/utils/TableGen/SveEmitter.cpp
@@ -471,6 +471,9 @@
     Bitwidth = ElementBitwidth;
     NumVectors = 0;
     break;
+  case 'h':
+    ElementBitwidth /= 2;
+    break;
   case 'P':
     Signed = true;
     Float = false;
@@ -478,6 +481,11 @@
     Bitwidth = 16;
     ElementBitwidth = 1;
     break;
+  case 'u':
+    Predicate = false;
+    Signed = false;
+    Float = false;
+    break;
   case 'i':
     Predicate = false;
     Float = false;
Index: clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_shrnb.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_shrnb.c
@@ -0,0 +1,65 @@
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 %s
+
+
+#include <arm_sve.h>
+
+svint8_t test_svshrnb_n_s16(svint16_t op1)
+{
+  return svshrnb_n_s16(op1, 0); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [1, 8]}}
+}
+
+svint8_t test_svshrnb(svint16_t op1)
+{
+  return svshrnb(op1, 0); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [1, 8]}}
+}
+
+svint16_t test_svshrnb_n_s32(svint32_t op1)
+{
+  return svshrnb_n_s32(op1, 0); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+}
+
+svint16_t test_svshrnb_1(svint32_t op1)
+{
+  return svshrnb(op1, 0); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+}
+
+svint32_t test_svshrnb_n_s64(svint64_t op1)
+{
+  return svshrnb_n_s64(op1, 0); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [1, 32]}}
+}
+
+svint32_t test_svshrnb_2(svint64_t op1)
+{
+  return svshrnb(op1, 0); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [1, 32]}}
+}
+
+svuint8_t test_svshrnb_n_u16(svuint16_t op1)
+{
+  return svshrnb_n_u16(op1, 0); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [1, 8]}}
+}
+
+svuint8_t test_svshrnb_3(svuint16_t op1)
+{
+  return svshrnb(op1, 0); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [1, 8]}}
+}
+
+svuint16_t test_svshrnb_n_u32(svuint32_t op1)
+{
+  return svshrnb_n_u32(op1, 0); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+}
+
+svuint16_t test_svshrnb_4(svuint32_t op1)
+{
+  return svshrnb(op1, 0); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+}
+
+svuint32_t test_svshrnb_n_u64(svuint64_t op1)
+{
+  return svshrnb_n_u64(op1, 0); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [1, 32]}}
+}
+
+svuint32_t test_svshrnb_5(svuint64_t op1)
+{
+  return svshrnb(op1, 0); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [1, 32]}}
+}
+
Index: clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qshlu.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qshlu.c
@@ -0,0 +1,44 @@
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 %s
+
+
+#include <arm_sve.h>
+
+svuint8_t test_svqshlu_n_s8_m(svbool_t pg, svint8_t op1)
+{
+  return svqshlu_n_s8_m(pg, op1, -1); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+}
+
+svuint8_t test_svqshlu_m(svbool_t pg, svint8_t op1)
+{
+  return svqshlu_m(pg, op1, 8); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+}
+
+svuint16_t test_svqshlu_n_s16_m(svbool_t pg, svint16_t op1)
+{
+  return svqshlu_n_s16_m(pg, op1, -1); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 15]}}
+}
+
+svuint16_t test_svqshlu_m_1(svbool_t pg, svint16_t op1)
+{
+  return svqshlu_m(pg, op1, 16); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 15]}}
+}
+
+svuint32_t test_svqshlu_n_s32_m(svbool_t pg, svint32_t op1)
+{
+  return svqshlu_n_s32_m(pg, op1, -1); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 31]}}
+}
+
+svuint32_t test_svqshlu_m_2(svbool_t pg, svint32_t op1)
+{
+  return svqshlu_m(pg, op1, 32); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 31]}}
+}
+
+svuint64_t test_svqshlu_n_s64_m(svbool_t pg, svint64_t op1)
+{
+  return svqshlu_n_s64_m(pg, op1, -1); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 63]}}
+}
+
+svuint64_t test_svqshlu_m_3(svbool_t pg, svint64_t op1)
+{
+  return svqshlu_m(pg, op1, 64); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 63]}}
+}
Index: clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_shrnb_shortform.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_shrnb_shortform.c
@@ -0,0 +1,115 @@
+// RUN:  %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o -  -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 %s | FileCheck %s
+// RUN:  %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify  -verify-ignore-unexpected=error -D__ARM_FEATURE_SVE %s
+
+#include <arm_sve.h>
+//
+// shrnb
+//
+
+svint8_t test_svshrnb_n_s16(svint16_t op1)
+{
+  // CHECK-LABEL: test_svshrnb_n_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.shrnb.nxv8i16(<vscale x 8 x i16> %op1, i32 1)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // CHECK-ERROR: {{.*}}:[[@LINE+1]]:10: error:
+  return svshrnb(op1, 1); // expected-warning {{implicit declaration of function 'svshrnb'}}
+}
+
+svint8_t test_svshrnb_n_s16_8(svint16_t op1)
+{
+  // CHECK-LABEL: test_svshrnb_n_s16_8
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.shrnb.nxv8i16(<vscale x 8 x i16> %op1, i32 8)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // CHECK-ERROR: {{.*}}:[[@LINE+1]]:10: error:
+  return svshrnb(op1, 8); // expected-warning {{implicit declaration of function 'svshrnb'}}
+}
+
+svint16_t test_svshrnb_n_s32(svint32_t op1)
+{
+  // CHECK-LABEL: test_svshrnb_n_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.shrnb.nxv4i32(<vscale x 4 x i32> %op1, i32 1)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // CHECK-ERROR: {{.*}}:[[@LINE+1]]:10: error:
+  return svshrnb(op1, 1); // expected-warning {{implicit declaration of function 'svshrnb'}}
+}
+
+svint16_t test_svshrnb_n_s32_16(svint32_t op1)
+{
+  // CHECK-LABEL: test_svshrnb_n_s32_16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.shrnb.nxv4i32(<vscale x 4 x i32> %op1, i32 16)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // CHECK-ERROR: {{.*}}:[[@LINE+1]]:10: error:
+  return svshrnb(op1, 16); // expected-warning {{implicit declaration of function 'svshrnb'}}
+}
+
+svint32_t test_svshrnb_n_s64(svint64_t op1)
+{
+  // CHECK-LABEL: test_svshrnb_n_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.shrnb.nxv2i64(<vscale x 2 x i64> %op1, i32 1)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // CHECK-ERROR: {{.*}}:[[@LINE+1]]:10: error:
+  return svshrnb(op1, 1); // expected-warning {{implicit declaration of function 'svshrnb'}}
+}
+
+svint32_t test_svshrnb_n_s64_32(svint64_t op1)
+{
+  // CHECK-LABEL: test_svshrnb_n_s64_32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.shrnb.nxv2i64(<vscale x 2 x i64> %op1, i32 32)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // CHECK-ERROR: {{.*}}:[[@LINE+1]]:10: error:
+  return svshrnb(op1, 32); // expected-warning {{implicit declaration of function 'svshrnb'}}
+}
+
+svuint8_t test_uvshrnb_n_s16(svuint16_t op1)
+{
+  // CHECK-LABEL: test_uvshrnb_n_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.shrnb.nxv8i16(<vscale x 8 x i16> %op1, i32 1)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // CHECK-ERROR: {{.*}}:[[@LINE+1]]:10: error:
+  return svshrnb(op1, 1); // expected-warning {{implicit declaration of function 'svshrnb'}}
+}
+
+svuint8_t test_uvshrnb_n_s16_8(svuint16_t op1)
+{
+  // CHECK-LABEL: test_uvshrnb_n_s16_8
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.shrnb.nxv8i16(<vscale x 8 x i16> %op1, i32 8)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // CHECK-ERROR: {{.*}}:[[@LINE+1]]:10: error:
+  return svshrnb(op1, 8); // expected-warning {{implicit declaration of function 'svshrnb'}}
+}
+
+svuint16_t test_uvshrnb_n_s32(svuint32_t op1)
+{
+  // CHECK-LABEL: test_uvshrnb_n_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.shrnb.nxv4i32(<vscale x 4 x i32> %op1, i32 1)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // CHECK-ERROR: {{.*}}:[[@LINE+1]]:10: error:
+  return svshrnb(op1, 1); // expected-warning {{implicit declaration of function 'svshrnb'}}
+}
+
+svuint16_t test_uvshrnb_n_s32_16(svuint32_t op1)
+{
+  // CHECK-LABEL: test_uvshrnb_n_s32_16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.shrnb.nxv4i32(<vscale x 4 x i32> %op1, i32 16)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // CHECK-ERROR: {{.*}}:[[@LINE+1]]:10: error:
+  return svshrnb(op1, 16); // expected-warning {{implicit declaration of function 'svshrnb'}}
+}
+
+svuint32_t test_uvshrnb_n_s64(svuint64_t op1)
+{
+  // CHECK-LABEL: test_uvshrnb_n_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.shrnb.nxv2i64(<vscale x 2 x i64> %op1, i32 1)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // CHECK-ERROR: {{.*}}:[[@LINE+1]]:10: error:
+  return svshrnb(op1, 1); // expected-warning {{implicit declaration of function 'svshrnb'}}
+}
+
+svuint32_t test_uvshrnb_n_s64_32(svuint64_t op1)
+{
+  // CHECK-LABEL: test_uvshrnb_n_s64_32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.shrnb.nxv2i64(<vscale x 2 x i64> %op1, i32 32)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // CHECK-ERROR: {{.*}}:[[@LINE+1]]:10: error:
+  return svshrnb(op1, 32); // expected-warning {{implicit declaration of function 'svshrnb'}}
+}
Index: clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_shrnb.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_shrnb.c
@@ -0,0 +1,115 @@
+// RUN:  %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o -  -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 %s | FileCheck %s
+// RUN:  %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error -D__ARM_FEATURE_SVE %s
+
+#include <arm_sve.h>
+//
+// shrnb
+//
+
+svint8_t test_svshrnb_n_s16(svint16_t op1)
+{
+  // CHECK-LABEL: test_svshrnb_n_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.shrnb.nxv8i16(<vscale x 8 x i16> %op1, i32 1)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // CHECK-ERROR: {{.*}}:[[@LINE+1]]:10: error:
+  return svshrnb_n_s16(op1, 1); // expected-warning {{implicit declaration of function 'svshrnb_n_s16'}}
+}
+
+svint8_t test_svshrnb_n_s16_8(svint16_t op1)
+{
+  // CHECK-LABEL: test_svshrnb_n_s16_8
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.shrnb.nxv8i16(<vscale x 8 x i16> %op1, i32 8)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // CHECK-ERROR: {{.*}}:[[@LINE+1]]:10: error:
+  return svshrnb_n_s16(op1, 8); // expected-warning {{implicit declaration of function 'svshrnb_n_s16'}}
+}
+
+svint16_t test_svshrnb_n_s32(svint32_t op1)
+{
+  // CHECK-LABEL: test_svshrnb_n_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.shrnb.nxv4i32(<vscale x 4 x i32> %op1, i32 1)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // CHECK-ERROR: {{.*}}:[[@LINE+1]]:10: error:
+  return svshrnb_n_s32(op1, 1); // expected-warning {{implicit declaration of function 'svshrnb_n_s32'}}
+}
+
+svint16_t test_svshrnb_n_s32_16(svint32_t op1)
+{
+  // CHECK-LABEL: test_svshrnb_n_s32_16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.shrnb.nxv4i32(<vscale x 4 x i32> %op1, i32 16)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // CHECK-ERROR: {{.*}}:[[@LINE+1]]:10: error:
+  return svshrnb_n_s32(op1, 16); // expected-warning {{implicit declaration of function 'svshrnb_n_s32'}}
+}
+
+svint32_t test_svshrnb_n_s64(svint64_t op1)
+{
+  // CHECK-LABEL: test_svshrnb_n_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.shrnb.nxv2i64(<vscale x 2 x i64> %op1, i32 1)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // CHECK-ERROR: {{.*}}:[[@LINE+1]]:10: error:
+  return svshrnb_n_s64(op1, 1); // expected-warning {{implicit declaration of function 'svshrnb_n_s64'}}
+}
+
+svint32_t test_svshrnb_n_s64_32(svint64_t op1)
+{
+  // CHECK-LABEL: test_svshrnb_n_s64_32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.shrnb.nxv2i64(<vscale x 2 x i64> %op1, i32 32)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // CHECK-ERROR: {{.*}}:[[@LINE+1]]:10: error:
+  return svshrnb_n_s64(op1, 32); // expected-warning {{implicit declaration of function 'svshrnb_n_s64'}}
+}
+
+svuint8_t test_uvshrnb_n_s16(svuint16_t op1)
+{
+  // CHECK-LABEL: test_uvshrnb_n_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.shrnb.nxv8i16(<vscale x 8 x i16> %op1, i32 1)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // CHECK-ERROR: {{.*}}:[[@LINE+1]]:10: error:
+  return svshrnb_n_u16(op1, 1); // expected-warning {{implicit declaration of function 'svshrnb_n_u16'}}
+}
+
+svuint8_t test_uvshrnb_n_s16_8(svuint16_t op1)
+{
+  // CHECK-LABEL: test_uvshrnb_n_s16_8
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.shrnb.nxv8i16(<vscale x 8 x i16> %op1, i32 8)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // CHECK-ERROR: {{.*}}:[[@LINE+1]]:10: error:
+  return svshrnb_n_u16(op1, 8); // expected-warning {{implicit declaration of function 'svshrnb_n_u16'}}
+}
+
+svuint16_t test_uvshrnb_n_s32(svuint32_t op1)
+{
+  // CHECK-LABEL: test_uvshrnb_n_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.shrnb.nxv4i32(<vscale x 4 x i32> %op1, i32 1)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // CHECK-ERROR: {{.*}}:[[@LINE+1]]:10: error:
+  return svshrnb_n_u32(op1, 1); // expected-warning {{implicit declaration of function 'svshrnb_n_u32'}}
+}
+
+svuint16_t test_uvshrnb_n_s32_16(svuint32_t op1)
+{
+  // CHECK-LABEL: test_uvshrnb_n_s32_16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.shrnb.nxv4i32(<vscale x 4 x i32> %op1, i32 16)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // CHECK-ERROR: {{.*}}:[[@LINE+1]]:10: error:
+  return svshrnb_n_u32(op1, 16); // expected-warning {{implicit declaration of function 'svshrnb_n_u32'}}
+}
+
+svuint32_t test_uvshrnb_n_s64(svuint64_t op1)
+{
+  // CHECK-LABEL: test_uvshrnb_n_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.shrnb.nxv2i64(<vscale x 2 x i64> %op1, i32 1)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // CHECK-ERROR: {{.*}}:[[@LINE+1]]:10: error:
+  return svshrnb_n_u64(op1, 1); // expected-warning {{implicit declaration of function 'svshrnb_n_u64'}}
+}
+
+svuint32_t test_uvshrnb_n_s64_32(svuint64_t op1)
+{
+  // CHECK-LABEL: test_uvshrnb_n_s64_32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.shrnb.nxv2i64(<vscale x 2 x i64> %op1, i32 32)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // CHECK-ERROR: {{.*}}:[[@LINE+1]]:10: error:
+  return svshrnb_n_u64(op1, 32); // expected-warning {{implicit declaration of function 'svshrnb_n_u64'}}
+}
Index: clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qshlu_shortform.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qshlu_shortform.c
@@ -0,0 +1,65 @@
+// RUN:  %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 %s | FileCheck %s
+// RUN:  %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify  -verify-ignore-unexpected=error -D__ARM_FEATURE_SVE %s
+
+#include <arm_sve.h>
+
+svuint8_t test_svqshlu_n_s8_m(svbool_t pg, svint8_t op1)
+{
+  // CHECK-LABEL: test_svqshlu_n_s8_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqshlu.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, i32 0)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  return svqshlu_m(pg, op1, 0); // expected-warning {{implicit declaration of function 'svqshlu_m'}}
+}
+
+svuint8_t test_svqshlu_n_s8_m_7(svbool_t pg, svint8_t op1)
+{
+  // CHECK-LABEL: test_svqshlu_n_s8_m_7
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqshlu.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, i32 7)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  return svqshlu_m(pg, op1, 7); // expected-warning {{implicit declaration of function 'svqshlu_m'}}
+}
+
+svuint16_t test_svqshlu_n_s16_m(svbool_t pg, svint16_t op1)
+{
+  // CHECK-LABEL: test_svqshlu_n_s16_m
+  // CHECK: %[[P0:.*]] = call <vscale x 8 x i1>  @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqshlu.nxv8i16(<vscale x 8 x i1> %[[P0]], <vscale x 8 x i16> %op1, i32 0)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  return svqshlu_m(pg, op1, 0); // expected-warning {{implicit declaration of function 'svqshlu_m'}}
+}
+
+svuint16_t test_svqshlu_n_s16_m_15(svbool_t pg, svint16_t op1)
+{
+  // CHECK-LABEL: test_svqshlu_n_s16_m_15
+  // CHECK: %[[P0:.*]] = call <vscale x 8 x i1>  @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqshlu.nxv8i16(<vscale x 8 x i1> %[[P0]], <vscale x 8 x i16> %op1, i32 15)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  return svqshlu_m(pg, op1, 15); // expected-warning {{implicit declaration of function 'svqshlu_m'}}
+}
+
+svuint32_t test_svqshlu_n_s32_m_31(svbool_t pg, svint32_t op1)
+{
+  // CHECK-LABEL: test_svqshlu_n_s32_m_31
+  // CHECK: %[[P0:.*]] = call <vscale x 4 x i1>  @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqshlu.nxv4i32(<vscale x 4 x i1> %[[P0]], <vscale x 4 x i32> %op1, i32 31)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return svqshlu_m(pg, op1, 31); // expected-warning {{implicit declaration of function 'svqshlu_m'}}
+}
+
+svuint64_t test_svqshlu_n_s64_m(svbool_t pg, svint64_t op1)
+{
+  // CHECK-LABEL: test_svqshlu_n_s64_m
+  // CHECK: %[[P0:.*]] = call <vscale x 2 x i1>  @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqshlu.nxv2i64(<vscale x 2 x i1> %[[P0]], <vscale x 2 x i64> %op1, i32 0)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return svqshlu_m(pg, op1, 0); // expected-warning {{implicit declaration of function 'svqshlu_m'}}
+}
+
+svuint64_t test_svqshlu_n_s64_m_63(svbool_t pg, svint64_t op1)
+{
+  // CHECK-LABEL: test_svqshlu_n_s64_m_63
+  // CHECK: %[[P0:.*]] = call <vscale x 2 x i1>  @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqshlu.nxv2i64(<vscale x 2 x i1> %[[P0]], <vscale x 2 x i64> %op1, i32 63)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return svqshlu_m(pg, op1, 63); // expected-warning {{implicit declaration of function 'svqshlu_m'}}
+}
Index: clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qshlu.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qshlu.c
@@ -0,0 +1,65 @@
+// RUN:  %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 %s | FileCheck %s
+// RUN:  %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify  -verify-ignore-unexpected=error -D__ARM_FEATURE_SVE %s
+
+#include <arm_sve.h>
+
+svuint8_t test_svqshlu_n_s8_m(svbool_t pg, svint8_t op1)
+{
+  // CHECK-LABEL: test_svqshlu_n_s8_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqshlu.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, i32 0)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  return svqshlu_n_s8_m(pg, op1, 0); // expected-warning {{implicit declaration of function 'svqshlu_n_s8_m'}}
+}
+
+svuint8_t test_svqshlu_n_s8_m_7(svbool_t pg, svint8_t op1)
+{
+  // CHECK-LABEL: test_svqshlu_n_s8_m_7
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqshlu.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, i32 7)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  return svqshlu_n_s8_m(pg, op1, 7); // expected-warning {{implicit declaration of function 'svqshlu_n_s8_m'}}
+}
+
+svuint16_t test_svqshlu_n_s16_m(svbool_t pg, svint16_t op1)
+{
+  // CHECK-LABEL: test_svqshlu_n_s16_m
+  // CHECK: %[[P0:.*]] = call <vscale x 8 x i1>  @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqshlu.nxv8i16(<vscale x 8 x i1> %[[P0]], <vscale x 8 x i16> %op1, i32 0)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  return svqshlu_n_s16_m(pg, op1, 0); // expected-warning {{implicit declaration of function 'svqshlu_n_s16_m'}}
+}
+
+svuint16_t test_svqshlu_n_s16_m_15(svbool_t pg, svint16_t op1)
+{
+  // CHECK-LABEL: test_svqshlu_n_s16_m_15
+  // CHECK: %[[P0:.*]] = call <vscale x 8 x i1>  @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqshlu.nxv8i16(<vscale x 8 x i1> %[[P0]], <vscale x 8 x i16> %op1, i32 15)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  return svqshlu_n_s16_m(pg, op1, 15); // expected-warning {{implicit declaration of function 'svqshlu_n_s16_m'}}
+}
+
+svuint32_t test_svqshlu_n_s32_m_31(svbool_t pg, svint32_t op1)
+{
+  // CHECK-LABEL: test_svqshlu_n_s32_m_31
+  // CHECK: %[[P0:.*]] = call <vscale x 4 x i1>  @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqshlu.nxv4i32(<vscale x 4 x i1> %[[P0]], <vscale x 4 x i32> %op1, i32 31)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return svqshlu_n_s32_m(pg, op1, 31); // expected-warning {{implicit declaration of function 'svqshlu_n_s32_m'}}
+}
+
+svuint64_t test_svqshlu_n_s64_m(svbool_t pg, svint64_t op1)
+{
+  // CHECK-LABEL: test_svqshlu_n_s64_m
+  // CHECK: %[[P0:.*]] = call <vscale x 2 x i1>  @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqshlu.nxv2i64(<vscale x 2 x i1> %[[P0]], <vscale x 2 x i64> %op1, i32 0)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return svqshlu_n_s64_m(pg, op1, 0); // expected-warning {{implicit declaration of function 'svqshlu_n_s64_m'}}
+}
+
+svuint64_t test_svqshlu_n_s64_m_63(svbool_t pg, svint64_t op1)
+{
+  // CHECK-LABEL: test_svqshlu_n_s64_m_63
+  // CHECK: %[[P0:.*]] = call <vscale x 2 x i1>  @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqshlu.nxv2i64(<vscale x 2 x i1> %[[P0]], <vscale x 2 x i64> %op1, i32 63)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return svqshlu_n_s64_m(pg, op1, 63); // expected-warning {{implicit declaration of function 'svqshlu_n_s64_m'}}
+}
Index: clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_tmad.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_tmad.c
@@ -0,0 +1,36 @@
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -D__ARM_FEATURE_SVE %s
+
+#include <arm_sve.h>
+//
+// tmad
+//
+
+svfloat16_t test_svtmad_f16(svfloat16_t op1, svfloat16_t op2)
+{
+  return svtmad_f16(op1, op2, -1); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+}
+
+svfloat16_t test_svtmad_f16_2(svfloat16_t op1, svfloat16_t op2)
+{
+  return svtmad_f16(op1, op2, 8); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+}
+
+svfloat32_t test_svtmad_f32(svfloat32_t op1, svfloat32_t op2)
+{
+  return svtmad_f32(op1, op2, -1); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+}
+
+svfloat32_t test_svtmad_f32_2(svfloat32_t op1, svfloat32_t op2)
+{
+  return svtmad_f32(op1, op2, 8); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+}
+
+svfloat64_t test_svtmad_f64(svfloat64_t op1, svfloat64_t op2)
+{
+  return svtmad_f64(op1, op2, -1); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+}
+
+svfloat64_t test_svtmad_f64_2(svfloat64_t op1, svfloat64_t op2)
+{
+  return svtmad_f64(op1, op2, 8); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+}
Index: clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_ext.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_ext.c
@@ -0,0 +1,81 @@
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -D__ARM_FEATURE_SVE %s
+
+#include <arm_sve.h>
+//
+// ext
+//
+
+svint8_t test_svext_s8(svint8_t op1, svint8_t op2)
+{
+  return svext_s8(op1, op2, -1); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 255]}}
+}
+
+svint8_t test_svext_s8_2(svint8_t op1, svint8_t op2)
+{
+  return svext_s8(op1, op2, 256); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 255]}}
+}
+
+svint16_t test_svext_s16(svint16_t op1, svint16_t op2)
+{
+  return svext_s16(op1, op2, -1); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 127]}}
+}
+
+svint16_t test_svext_s16_2(svint16_t op1, svint16_t op2)
+{
+  return svext_s16(op1, op2, 128); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 127]}}
+}
+
+svint32_t test_svext_s32(svint32_t op1, svint32_t op2)
+{
+  return svext_s32(op1, op2, -1); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 63]}}
+}
+
+svint32_t test_svext_s32_2(svint32_t op1, svint32_t op2)
+{
+  return svext_s32(op1, op2, 64); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 63]}}
+}
+
+svint64_t test_svext_s64(svint64_t op1, svint64_t op2)
+{
+  return svext_s64(op1, op2, -1); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 31]}}
+}
+
+svint64_t test_svext_s64_2(svint64_t op1, svint64_t op2)
+{
+  return svext_s64(op1, op2, 32); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 31]}}
+}
+
+svuint8_t test_svext_u8(svuint8_t op1, svuint8_t op2)
+{
+  return svext_u8(op1, op2, -1); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 255]}}
+}
+
+svuint16_t test_svext_u16(svuint16_t op1, svuint16_t op2)
+{
+  return svext_u16(op1, op2, 128); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 127]}}
+}
+
+svuint32_t test_svext_u32(svuint32_t op1, svuint32_t op2)
+{
+  return svext_u32(op1, op2, -1); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 63]}}
+}
+
+svuint64_t test_svext_u64(svuint64_t op1, svuint64_t op2)
+{
+  return svext_u64(op1, op2, 32); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 31]}}
+}
+
+svfloat16_t test_svext_f16(svfloat16_t op1, svfloat16_t op2)
+{
+  return svext_f16(op1, op2, -1); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 127]}}
+}
+
+svfloat32_t test_svext_f32(svfloat32_t op1, svfloat32_t op2)
+{
+  return svext_f32(op1, op2, 64); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 63]}}
+}
+
+svfloat64_t test_svext_f64(svfloat64_t op1, svfloat64_t op2)
+{
+  return svext_f64(op1, op2, -1); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [0, 31]}}
+}
Index: clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_asrd.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_asrd.c
@@ -0,0 +1,45 @@
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -D__ARM_FEATURE_SVE %s
+
+#include <arm_sve.h>
+//
+// asrd
+//
+svint8_t test_svasrd_n_s8_m(svbool_t pg, svint8_t op1)
+{
+  return svasrd_n_s8_m(pg, op1, 0); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [1, 8]}}
+}
+
+svint16_t test_svasrd_n_s16_m(svbool_t pg, svint16_t op1)
+{
+  return svasrd_n_s16_m(pg, op1, 0); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+}
+
+svint32_t test_svasrd_n_s32_m(svbool_t pg, svint32_t op1)
+{
+  return svasrd_n_s32_m(pg, op1, 0); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [1, 32]}}
+}
+
+svint64_t test_svasrd_n_s64_m(svbool_t pg, svint64_t op1)
+{
+  return svasrd_n_s64_m(pg, op1, 0); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [1, 64]}}
+}
+
+svint8_t test_svasrd_n_s8_max_m(svbool_t pg, svint8_t op1)
+{
+  return svasrd_n_s8_m(pg, op1, 9); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [1, 8]}}
+}
+
+svint16_t test_svasrd_n_s16_max_m(svbool_t pg, svint16_t op1)
+{
+  return svasrd_n_s16_m(pg, op1, 17); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+}
+
+svint32_t test_svasrd_n_s32_max_m(svbool_t pg, svint32_t op1)
+{
+  return svasrd_n_s32_m(pg, op1, 33); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [1, 32]}}
+}
+
+svint64_t test_svasrd_n_s64_max_m(svbool_t pg, svint64_t op1)
+{
+  return svasrd_n_s64_m(pg, op1, 65); // expected-error-re {{argument value {{[0-9]+}} is outside the valid range [1, 64]}}
+}
Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_tmad_shortform.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_tmad_shortform.c
@@ -0,0 +1,51 @@
+// RUN:  %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o -  -D__ARM_FEATURE_SVE %s | FileCheck %s
+
+#include <arm_sve.h>
+
+svfloat16_t test_svtmad_f16(svfloat16_t op1, svfloat16_t op2)
+{
+  // CHECK-LABEL: test_svtmad_f16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.ftmad.x.nxv8f16(<vscale x 8 x half> %op1, <vscale x 8 x half> %op2, i32 0)
+  // CHECK: ret
+  return svtmad(op1, op2, 0);
+}
+
+svfloat16_t test_svtmad_f16_7(svfloat16_t op1, svfloat16_t op2)
+{
+  // CHECK-LABEL: test_svtmad_f16_7
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.ftmad.x.nxv8f16(<vscale x 8 x half> %op1, <vscale x 8 x half> %op2, i32 7)
+  // CHECK: ret
+  return svtmad(op1, op2, 7);
+}
+
+svfloat32_t test_svtmad_f32(svfloat32_t op1, svfloat32_t op2)
+{
+  // CHECK-LABEL: test_svtmad_f32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ftmad.x.nxv4f32(<vscale x 4 x float> %op1, <vscale x 4 x float> %op2, i32 0)
+  // CHECK: ret
+  return svtmad(op1, op2, 0);
+}
+
+svfloat32_t test_svtmad_f32_7(svfloat32_t op1, svfloat32_t op2)
+{
+  // CHECK-LABEL: test_svtmad_f32_7
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ftmad.x.nxv4f32(<vscale x 4 x float> %op1, <vscale x 4 x float> %op2, i32 7)
+  // CHECK: ret
+  return svtmad(op1, op2, 7);
+}
+
+svfloat64_t test_svtmad_f64(svfloat64_t op1, svfloat64_t op2)
+{
+  // CHECK-LABEL: test_svtmad_f64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ftmad.x.nxv2f64(<vscale x 2 x double> %op1, <vscale x 2 x double> %op2, i32 0)
+  // CHECK: ret
+  return svtmad(op1, op2, 0);
+}
+
+svfloat64_t test_svtmad_f64_7(svfloat64_t op1, svfloat64_t op2)
+{
+  // CHECK-LABEL: test_svtmad_f64_7
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ftmad.x.nxv2f64(<vscale x 2 x double> %op1, <vscale x 2 x double> %op2, i32 7)
+  // CHECK: ret
+  return svtmad(op1, op2, 7);
+}
Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_tmad.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_tmad.c
@@ -0,0 +1,51 @@
+// RUN:  %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o -  -D__ARM_FEATURE_SVE %s | FileCheck %s
+
+#include <arm_sve.h>
+
+svfloat16_t test_svtmad_f16(svfloat16_t op1, svfloat16_t op2)
+{
+  // CHECK-LABEL: test_svtmad_f16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.ftmad.x.nxv8f16(<vscale x 8 x half> %op1, <vscale x 8 x half> %op2, i32 0)
+  // CHECK: ret
+  return svtmad_f16(op1, op2, 0);
+}
+
+svfloat16_t test_svtmad_f16_7(svfloat16_t op1, svfloat16_t op2)
+{
+  // CHECK-LABEL: test_svtmad_f16_7
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.ftmad.x.nxv8f16(<vscale x 8 x half> %op1, <vscale x 8 x half> %op2, i32 7)
+  // CHECK: ret
+  return svtmad_f16(op1, op2, 7);
+}
+
+svfloat32_t test_svtmad_f32(svfloat32_t op1, svfloat32_t op2)
+{
+  // CHECK-LABEL: test_svtmad_f32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ftmad.x.nxv4f32(<vscale x 4 x float> %op1, <vscale x 4 x float> %op2, i32 0)
+  // CHECK: ret
+  return svtmad_f32(op1, op2, 0);
+}
+
+svfloat32_t test_svtmad_f32_7(svfloat32_t op1, svfloat32_t op2)
+{
+  // CHECK-LABEL: test_svtmad_f32_7
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ftmad.x.nxv4f32(<vscale x 4 x float> %op1, <vscale x 4 x float> %op2, i32 7)
+  // CHECK: ret
+  return svtmad_f32(op1, op2, 7);
+}
+
+svfloat64_t test_svtmad_f64(svfloat64_t op1, svfloat64_t op2)
+{
+  // CHECK-LABEL: test_svtmad_f64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ftmad.x.nxv2f64(<vscale x 2 x double> %op1, <vscale x 2 x double> %op2, i32 0)
+  // CHECK: ret
+  return svtmad_f64(op1, op2, 0);
+}
+
+svfloat64_t test_svtmad_f64_7(svfloat64_t op1, svfloat64_t op2)
+{
+  // CHECK-LABEL: test_svtmad_f64_7
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ftmad.x.nxv2f64(<vscale x 2 x double> %op1, <vscale x 2 x double> %op2, i32 7)
+  // CHECK: ret
+  return svtmad_f64(op1, op2, 7);
+}
Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ext_shortform.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ext_shortform.c
@@ -0,0 +1,179 @@
+// RUN:  %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o -  -D__ARM_FEATURE_SVE %s | FileCheck %s
+
+#include <arm_sve.h>
+
+svint8_t test_svext_s8(svint8_t op1, svint8_t op2)
+{
+  // CHECK-LABEL: test_svext_s8
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ext.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 0)
+  // CHECK: ret
+  return svext(op1, op2, 0);
+}
+
+svint8_t test_svext_s8_255(svint8_t op1, svint8_t op2)
+{
+  // CHECK-LABEL: test_svext_s8_255
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ext.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 255)
+  // CHECK: ret
+  return svext(op1, op2, 255);
+}
+
+svint16_t test_svext_s16(svint16_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svext_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ext.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 0)
+  // CHECK: ret
+  return svext(op1, op2, 0);
+}
+
+svint16_t test_svext_s16_127(svint16_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svext_s16_127
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ext.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 127)
+  // CHECK: ret
+  return svext(op1, op2, 127);
+}
+
+svint32_t test_svext_s32(svint32_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svext_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ext.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 0)
+  // CHECK: ret
+  return svext(op1, op2, 0);
+}
+
+svint32_t test_svext_s32_63(svint32_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svext_s32_63
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ext.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 63)
+  // CHECK: ret
+  return svext(op1, op2, 63);
+}
+
+svint64_t test_svext_s64(svint64_t op1, svint64_t op2)
+{
+  // CHECK-LABEL: test_svext_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ext.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 0)
+  // CHECK: ret
+  return svext(op1, op2, 0);
+}
+
+svint64_t test_svext_s64_31(svint64_t op1, svint64_t op2)
+{
+  // CHECK-LABEL: test_svext_s64_31
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ext.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 31)
+  // CHECK: ret
+  return svext(op1, op2, 31);
+}
+
+svuint8_t test_uvext_u8(svuint8_t op1, svuint8_t op2)
+{
+  // CHECK-LABEL: test_uvext_u8
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ext.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 0)
+  // CHECK: ret
+  return svext(op1, op2, 0);
+}
+
+svuint8_t test_uvext_u8_255(svuint8_t op1, svuint8_t op2)
+{
+  // CHECK-LABEL: test_uvext_u8_255
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ext.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 255)
+  // CHECK: ret
+  return svext(op1, op2, 255);
+}
+
+svuint16_t test_uvext_u16(svuint16_t op1, svuint16_t op2)
+{
+  // CHECK-LABEL: test_uvext_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ext.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 0)
+  // CHECK: ret
+  return svext(op1, op2, 0);
+}
+
+svuint16_t test_uvext_u16_127(svuint16_t op1, svuint16_t op2)
+{
+  // CHECK-LABEL: test_uvext_u16_127
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ext.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 127)
+  // CHECK: ret
+  return svext(op1, op2, 127);
+}
+
+svuint32_t test_uvext_u32(svuint32_t op1, svuint32_t op2)
+{
+  // CHECK-LABEL: test_uvext_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ext.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 0)
+  // CHECK: ret
+  return svext(op1, op2, 0);
+}
+
+svuint32_t test_uvext_u32_63(svuint32_t op1, svuint32_t op2)
+{
+  // CHECK-LABEL: test_uvext_u32_63
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ext.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 63)
+  // CHECK: ret
+  return svext(op1, op2, 63);
+}
+
+svuint64_t test_uvext_u64(svuint64_t op1, svuint64_t op2)
+{
+  // CHECK-LABEL: test_uvext_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ext.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 0)
+  // CHECK: ret
+  return svext(op1, op2, 0);
+}
+
+svuint64_t test_uvext_u64_31(svuint64_t op1, svuint64_t op2)
+{
+  // CHECK-LABEL: test_uvext_u64_31
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ext.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 31)
+  // CHECK: ret
+  return svext(op1, op2, 31);
+}
+
+svfloat16_t test_svext_f16(svfloat16_t op1, svfloat16_t op2)
+{
+  // CHECK-LABEL: test_svext_f16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.ext.nxv8f16(<vscale x 8 x half> %op1, <vscale x 8 x half> %op2, i32 0)
+  // CHECK: ret
+  return svext(op1, op2, 0);
+}
+
+svfloat16_t test_svext_f16_127(svfloat16_t op1, svfloat16_t op2)
+{
+  // CHECK-LABEL: test_svext_f16_127
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.ext.nxv8f16(<vscale x 8 x half> %op1, <vscale x 8 x half> %op2, i32 127)
+  // CHECK: ret
+  return svext(op1, op2, 127);
+}
+
+svfloat32_t test_svext_f32(svfloat32_t op1, svfloat32_t op2)
+{
+  // CHECK-LABEL: test_svext_f32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ext.nxv4f32(<vscale x 4 x float> %op1, <vscale x 4 x float> %op2, i32 0)
+  // CHECK: ret
+  return svext(op1, op2, 0);
+}
+
+svfloat32_t test_svext_f32_63(svfloat32_t op1, svfloat32_t op2)
+{
+  // CHECK-LABEL: test_svext_f32_63
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ext.nxv4f32(<vscale x 4 x float> %op1, <vscale x 4 x float> %op2, i32 63)
+  // CHECK: ret
+  return svext(op1, op2, 63);
+}
+
+svfloat64_t test_svext_f64(svfloat64_t op1, svfloat64_t op2)
+{
+  // CHECK-LABEL: test_svext_f64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ext.nxv2f64(<vscale x 2 x double> %op1, <vscale x 2 x double> %op2, i32 0)
+  // CHECK: ret
+  return svext(op1, op2, 0);
+}
+
+svfloat64_t test_svext_f64_31(svfloat64_t op1, svfloat64_t op2)
+{
+  // CHECK-LABEL: test_svext_f64_31
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ext.nxv2f64(<vscale x 2 x double> %op1, <vscale x 2 x double> %op2, i32 31)
+  // CHECK: ret
+  return svext(op1, op2, 31);
+}
Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ext.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ext.c
@@ -0,0 +1,179 @@
+// RUN:  %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o -  -D__ARM_FEATURE_SVE %s | FileCheck %s
+
+#include <arm_sve.h>
+
+svint8_t test_svext_s8(svint8_t op1, svint8_t op2)
+{
+  // CHECK-LABEL: test_svext_s8
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ext.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 0)
+  // CHECK: ret
+  return svext_s8(op1, op2, 0);
+}
+
+svint8_t test_svext_s8_255(svint8_t op1, svint8_t op2)
+{
+  // CHECK-LABEL: test_svext_s8_255
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ext.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 255)
+  // CHECK: ret
+  return svext_s8(op1, op2, 255);
+}
+
+svint16_t test_svext_s16(svint16_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svext_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ext.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 0)
+  // CHECK: ret
+  return svext_s16(op1, op2, 0);
+}
+
+svint16_t test_svext_s16_127(svint16_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svext_s16_127
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ext.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 127)
+  // CHECK: ret
+  return svext_s16(op1, op2, 127);
+}
+
+svint32_t test_svext_s32(svint32_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svext_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ext.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 0)
+  // CHECK: ret
+  return svext_s32(op1, op2, 0);
+}
+
+svint32_t test_svext_s32_63(svint32_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svext_s32_63
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ext.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 63)
+  // CHECK: ret
+  return svext_s32(op1, op2, 63);
+}
+
+svint64_t test_svext_s64(svint64_t op1, svint64_t op2)
+{
+  // CHECK-LABEL: test_svext_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ext.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 0)
+  // CHECK: ret
+  return svext_s64(op1, op2, 0);
+}
+
+svint64_t test_svext_s64_31(svint64_t op1, svint64_t op2)
+{
+  // CHECK-LABEL: test_svext_s64_31
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ext.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 31)
+  // CHECK: ret
+  return svext_s64(op1, op2, 31);
+}
+
+svuint8_t test_uvext_u8(svuint8_t op1, svuint8_t op2)
+{
+  // CHECK-LABEL: test_uvext_u8
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ext.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 0)
+  // CHECK: ret
+  return svext_u8(op1, op2, 0);
+}
+
+svuint8_t test_uvext_u8_255(svuint8_t op1, svuint8_t op2)
+{
+  // CHECK-LABEL: test_uvext_u8_255
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ext.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 255)
+  // CHECK: ret
+  return svext_u8(op1, op2, 255);
+}
+
+svuint16_t test_uvext_u16(svuint16_t op1, svuint16_t op2)
+{
+  // CHECK-LABEL: test_uvext_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ext.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 0)
+  // CHECK: ret
+  return svext_u16(op1, op2, 0);
+}
+
+svuint16_t test_uvext_u16_127(svuint16_t op1, svuint16_t op2)
+{
+  // CHECK-LABEL: test_uvext_u16_127
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ext.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 127)
+  // CHECK: ret
+  return svext_u16(op1, op2, 127);
+}
+
+svuint32_t test_uvext_u32(svuint32_t op1, svuint32_t op2)
+{
+  // CHECK-LABEL: test_uvext_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ext.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 0)
+  // CHECK: ret
+  return svext_u32(op1, op2, 0);
+}
+
+svuint32_t test_uvext_u32_63(svuint32_t op1, svuint32_t op2)
+{
+  // CHECK-LABEL: test_uvext_u32_63
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ext.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 63)
+  // CHECK: ret
+  return svext_u32(op1, op2, 63);
+}
+
+svuint64_t test_uvext_u64(svuint64_t op1, svuint64_t op2)
+{
+  // CHECK-LABEL: test_uvext_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ext.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 0)
+  // CHECK: ret
+  return svext_u64(op1, op2, 0);
+}
+
+svuint64_t test_uvext_u64_31(svuint64_t op1, svuint64_t op2)
+{
+  // CHECK-LABEL: test_uvext_u64_31
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ext.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 31)
+  // CHECK: ret
+  return svext_u64(op1, op2, 31);
+}
+
+svfloat16_t test_svext_f16(svfloat16_t op1, svfloat16_t op2)
+{
+  // CHECK-LABEL: test_svext_f16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.ext.nxv8f16(<vscale x 8 x half> %op1, <vscale x 8 x half> %op2, i32 0)
+  // CHECK: ret
+  return svext_f16(op1, op2, 0);
+}
+
+svfloat16_t test_svext_f16_127(svfloat16_t op1, svfloat16_t op2)
+{
+  // CHECK-LABEL: test_svext_f16_127
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.ext.nxv8f16(<vscale x 8 x half> %op1, <vscale x 8 x half> %op2, i32 127)
+  // CHECK: ret
+  return svext_f16(op1, op2, 127);
+}
+
+svfloat32_t test_svext_f32(svfloat32_t op1, svfloat32_t op2)
+{
+  // CHECK-LABEL: test_svext_f32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ext.nxv4f32(<vscale x 4 x float> %op1, <vscale x 4 x float> %op2, i32 0)
+  // CHECK: ret
+  return svext_f32(op1, op2, 0);
+}
+
+svfloat32_t test_svext_f32_63(svfloat32_t op1, svfloat32_t op2)
+{
+  // CHECK-LABEL: test_svext_f32_63
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ext.nxv4f32(<vscale x 4 x float> %op1, <vscale x 4 x float> %op2, i32 63)
+  // CHECK: ret
+  return svext_f32(op1, op2, 63);
+}
+
+svfloat64_t test_svext_f64(svfloat64_t op1, svfloat64_t op2)
+{
+  // CHECK-LABEL: test_svext_f64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ext.nxv2f64(<vscale x 2 x double> %op1, <vscale x 2 x double> %op2, i32 0)
+  // CHECK: ret
+  return svext_f64(op1, op2, 0);
+}
+
+svfloat64_t test_svext_f64_31(svfloat64_t op1, svfloat64_t op2)
+{
+  // CHECK-LABEL: test_svext_f64_31
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ext.nxv2f64(<vscale x 2 x double> %op1, <vscale x 2 x double> %op2, i32 31)
+  // CHECK: ret
+  return svext_f64(op1, op2, 31);
+}
Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_asrd_shortform.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_asrd_shortform.c
@@ -0,0 +1,73 @@
+// RUN:  %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o -  -D__ARM_FEATURE_SVE %s | FileCheck %s
+
+#include <arm_sve.h>
+
+svint8_t test_svasrd_n_s8_m(svbool_t pg, svint8_t op1)
+{
+  // CHECK-LABEL: test_svasrd_n_s8_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asrd.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, i32 1)
+  // CHECK: ret
+  return svasrd_m(pg, op1, 1);
+}
+
+svint16_t test_svasrd_n_s16_m(svbool_t pg, svint16_t op1)
+{
+  // CHECK-LABEL: test_svasrd_n_s16_m
+  // CHECK: %[[P0:.*]] = call <vscale x 8 x i1>  @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asrd.nxv8i16(<vscale x 8 x i1> %[[P0]], <vscale x 8 x i16> %op1, i32 1)
+  // CHECK: ret
+  return svasrd_m(pg, op1, 1);
+}
+
+svint32_t test_svasrd_n_s32_m(svbool_t pg, svint32_t op1)
+{
+  // CHECK-LABEL: test_svasrd_n_s32_m
+  // CHECK: %[[P0:.*]] = call <vscale x 4 x i1>  @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asrd.nxv4i32(<vscale x 4 x i1> %[[P0]], <vscale x 4 x i32> %op1, i32 1)
+  // CHECK: ret
+  return svasrd_m(pg, op1, 1);
+}
+
+svint64_t test_svasrd_n_s64_m(svbool_t pg, svint64_t op1)
+{
+  // CHECK-LABEL: test_svasrd_n_s64_m
+  // CHECK: %[[P0:.*]] = call <vscale x 2 x i1>  @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.asrd.nxv2i64(<vscale x 2 x i1> %[[P0]], <vscale x 2 x i64> %op1, i32 1)
+  // CHECK: ret
+  return svasrd_m(pg, op1, 1);
+}
+
+svint8_t test_svasrd_n_s8_max_m(svbool_t pg, svint8_t op1)
+{
+  // CHECK-LABEL: test_svasrd_n_s8_max_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asrd.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, i32 8)
+  // CHECK: ret
+  return svasrd_m(pg, op1, 8);
+}
+
+svint16_t test_svasrd_n_s16_max_m(svbool_t pg, svint16_t op1)
+{
+  // CHECK-LABEL: test_svasrd_n_s16_max_m
+  // CHECK: %[[P0:.*]] = call <vscale x 8 x i1>  @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asrd.nxv8i16(<vscale x 8 x i1> %[[P0]], <vscale x 8 x i16> %op1, i32 16)
+  // CHECK: ret
+  return svasrd_m(pg, op1, 16);
+}
+
+svint32_t test_svasrd_n_s32_max_m(svbool_t pg, svint32_t op1)
+{
+  // CHECK-LABEL: test_svasrd_n_s32_max_m
+  // CHECK: %[[P0:.*]] = call <vscale x 4 x i1>  @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asrd.nxv4i32(<vscale x 4 x i1> %[[P0]], <vscale x 4 x i32> %op1, i32 32)
+  // CHECK: ret
+  return svasrd_m(pg, op1, 32);
+}
+
+svint64_t test_svasrd_n_s64_max_m(svbool_t pg, svint64_t op1)
+{
+  // CHECK-LABEL: test_svasrd_n_s64_max_m
+  // CHECK: %[[P0:.*]] = call <vscale x 2 x i1>  @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.asrd.nxv2i64(<vscale x 2 x i1> %[[P0]], <vscale x 2 x i64> %op1, i32 64)
+  // CHECK: ret
+  return svasrd_m(pg, op1, 64);
+}
Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_asrd.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_asrd.c
@@ -0,0 +1,73 @@
+// RUN:  %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o -  -D__ARM_FEATURE_SVE %s | FileCheck %s
+
+#include <arm_sve.h>
+
+svint8_t test_svasrd_n_s8_m(svbool_t pg, svint8_t op1)
+{
+  // CHECK-LABEL: test_svasrd_n_s8_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asrd.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, i32 1)
+  // CHECK: ret
+  return svasrd_n_s8_m(pg, op1, 1);
+}
+
+svint16_t test_svasrd_n_s16_m(svbool_t pg, svint16_t op1)
+{
+  // CHECK-LABEL: test_svasrd_n_s16_m
+  // CHECK: %[[P0:.*]] = call <vscale x 8 x i1>  @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asrd.nxv8i16(<vscale x 8 x i1> %[[P0]], <vscale x 8 x i16> %op1, i32 1)
+  // CHECK: ret
+  return svasrd_n_s16_m(pg, op1, 1);
+}
+
+svint32_t test_svasrd_n_s32_m(svbool_t pg, svint32_t op1)
+{
+  // CHECK-LABEL: test_svasrd_n_s32_m
+  // CHECK: %[[P0:.*]] = call <vscale x 4 x i1>  @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asrd.nxv4i32(<vscale x 4 x i1> %[[P0]], <vscale x 4 x i32> %op1, i32 1)
+  // CHECK: ret
+  return svasrd_n_s32_m(pg, op1, 1);
+}
+
+svint64_t test_svasrd_n_s64_m(svbool_t pg, svint64_t op1)
+{
+  // CHECK-LABEL: test_svasrd_n_s64_m
+  // CHECK: %[[P0:.*]] = call <vscale x 2 x i1>  @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.asrd.nxv2i64(<vscale x 2 x i1> %[[P0]], <vscale x 2 x i64> %op1, i32 1)
+  // CHECK: ret
+  return svasrd_n_s64_m(pg, op1, 1);
+}
+
+svint8_t test_svasrd_n_s8_max_m(svbool_t pg, svint8_t op1)
+{
+  // CHECK-LABEL: test_svasrd_n_s8_max_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asrd.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, i32 8)
+  // CHECK: ret
+  return svasrd_n_s8_m(pg, op1, 8);
+}
+
+svint16_t test_svasrd_n_s16_max_m(svbool_t pg, svint16_t op1)
+{
+  // CHECK-LABEL: test_svasrd_n_s16_max_m
+  // CHECK: %[[P0:.*]] = call <vscale x 8 x i1>  @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.asrd.nxv8i16(<vscale x 8 x i1> %[[P0]], <vscale x 8 x i16> %op1, i32 16)
+  // CHECK: ret
+  return svasrd_n_s16_m(pg, op1, 16);
+}
+
+svint32_t test_svasrd_n_s32_max_m(svbool_t pg, svint32_t op1)
+{
+  // CHECK-LABEL: test_svasrd_n_s32_max_m
+  // CHECK: %[[P0:.*]] = call <vscale x 4 x i1>  @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.asrd.nxv4i32(<vscale x 4 x i1> %[[P0]], <vscale x 4 x i32> %op1, i32 32)
+  // CHECK: ret
+  return svasrd_n_s32_m(pg, op1, 32);
+}
+
+svint64_t test_svasrd_n_s64_max_m(svbool_t pg, svint64_t op1)
+{
+  // CHECK-LABEL: test_svasrd_n_s64_max_m
+  // CHECK: %[[P0:.*]] = call <vscale x 2 x i1>  @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.asrd.nxv2i64(<vscale x 2 x i1> %[[P0]], <vscale x 2 x i64> %op1, i32 64)
+  // CHECK: ret
+  return svasrd_n_s64_m(pg, op1, 64);
+}
Index: clang/lib/Sema/SemaChecking.cpp
===================================================================
--- clang/lib/Sema/SemaChecking.cpp
+++ clang/lib/Sema/SemaChecking.cpp
@@ -2020,6 +2020,29 @@
       if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16))
         HasError = true;
       break;
+    case SVETypeFlags::ImmCheck0_7:
+      if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7))
+        HasError = true;
+      break;
+    case SVETypeFlags::ImmCheckExtract:
+      if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
+                                      (2048 / ElementSizeInBits) - 1))
+        HasError = true;
+      break;
+    case SVETypeFlags::ImmCheckShiftRight:
+      if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits))
+        HasError = true;
+      break;
+    case SVETypeFlags::ImmCheckShiftRightNarrow:
+      if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1,
+                                      ElementSizeInBits / 2))
+        HasError = true;
+      break;
+    case SVETypeFlags::ImmCheckShiftLeft:
+      if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
+                                      ElementSizeInBits - 1))
+        HasError = true;
+      break;
     }
   }
 
Index: clang/lib/CodeGen/CGBuiltin.cpp
===================================================================
--- clang/lib/CodeGen/CGBuiltin.cpp
+++ clang/lib/CodeGen/CGBuiltin.cpp
@@ -7570,6 +7570,15 @@
   else if (Builtin->LLVMIntrinsic != 0) {
     llvm::Type* OverloadedTy = getSVEType(TypeFlags);
 
+    // Predicates must match the main datatype.
+    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
+      if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType()))
+        if (PredTy->getScalarType()->isIntegerTy(1)) {
+          auto NewPredTy = cast<llvm::VectorType>(OverloadedTy);
+          Ops[i] = EmitSVEPredicateCast(Ops[i], NewPredTy);
+        }
+    }
+
     Function *F = CGM.getIntrinsic(Builtin->LLVMIntrinsic, OverloadedTy);
     Value *Call = Builder.CreateCall(F, Ops);
 		return Call;
Index: clang/include/clang/Basic/arm_sve.td
===================================================================
--- clang/include/clang/Basic/arm_sve.td
+++ clang/include/clang/Basic/arm_sve.td
@@ -58,9 +58,11 @@
 // -------------------
 // prototype: return (arg, arg, ...)
 //
+// u: vector of unsigned integers
 // d: default
 // c: const pointer type
 // P: predicate type
+// h: 1/2 width elements, 2x element count
 //
 // i: constant uint64_t
 //
@@ -152,14 +154,18 @@
 }
 def ImmCheckPredicatePattern    : ImmCheckType<0>;  // 0..31
 def ImmCheck1_16                : ImmCheckType<1>;  // 1..16
+def ImmCheckExtract             : ImmCheckType<2>;  // 0..(2048/sizeinbits(elt) - 1)
+def ImmCheckShiftRight          : ImmCheckType<3>;  // 1..sizeinbits(elt)
+def ImmCheckShiftRightNarrow    : ImmCheckType<4>;  // 1..sizeinbits(elt)/2
+def ImmCheckShiftLeft           : ImmCheckType<5>;  // 0..(sizeinbits(elt) - 1)
+def ImmCheck0_7                 : ImmCheckType<6>;  // 0..7
 
 class ImmCheck<int arg, ImmCheckType kind, int eltSizeArg = -1> {
   int Arg = arg;
-	int EltSizeArg = eltSizeArg;
+  int EltSizeArg = eltSizeArg;
   ImmCheckType Kind = kind;
 }
 
-// Every intrinsic subclasses Inst.
 class Inst<string n, string p, string t, MergeType mt, string i,
            list<FlagType> ft, list<ImmCheck> ch, MemEltType met> {
   string Name = n;
@@ -276,6 +282,30 @@
 // Store one vector, with no truncation, non-temporal (scalar base, VL displacement)
 def SVSTNT1_VNUM : MInst<"svstnt1_vnum[_{d}]", "vPpld", "csilUcUsUiUlhfd", [IsStore], MemEltTyDefault, "aarch64_sve_stnt1">;
 
+////////////////////////////////////////////////////////////////////////////////
+// Permutations and selection
+def SVEXT        : SInst<"svext[_{d}]",       "dddi", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_ext", [], [ImmCheck<2, ImmCheckExtract, 1>]>;
+
+////////////////////////////////////////////////////////////////////////////////
+// Shifts
+def SVASRD_M : SInst<"svasrd[_n_{d}]", "dPdi", "csil",            MergeOp1,  "aarch64_sve_asrd", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Narrowing DSP operations
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+def SVSHRNB      : SInst<"svshrnb[_n_{d}]",    "hdi",  "silUsUiUl", MergeNone, "aarch64_sve_shrnb",     [], [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Uniform DSP operations
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+def SVQSHLU_M  : SInst<"svqshlu[_n_{d}]", "uPdi", "csil",         MergeOp1,  "aarch64_sve_sqshlu", [], [ImmCheck<2, ImmCheckShiftLeft,  1>]>;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Floating-point arithmetic
+def SVTMAD  : SInst<"svtmad[_{d}]",  "dddi", "hfd", MergeNone, "aarch64_sve_ftmad_x", [], [ImmCheck<2, ImmCheck0_7>]>;
+
 ////////////////////////////////////////////////////////////////////////////////
 // Saturating scalar arithmetic
 def SVQDECH_S : SInst<"svqdech_pat[_{d}]",   "ddIi", "s", MergeNone, "aarch64_sve_sqdech", [], [ImmCheck<2, ImmCheck1_16>]>;
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to