Author: Zakk Chen
Date: 2022-08-02T17:27:55Z
New Revision: 7eddeb9e99f36d907295a2ed75244d39e1e41e33

URL: 
https://github.com/llvm/llvm-project/commit/7eddeb9e99f36d907295a2ed75244d39e1e41e33
DIFF: 
https://github.com/llvm/llvm-project/commit/7eddeb9e99f36d907295a2ed75244d39e1e41e33.diff

LOG: [RISCV][Clang] Support policy functions for vmerge, vfmerge and
vcompress.

We will switch all UndefValue to PoisonValue in follow up patches.

Reviewed By: kito-cheng

Differential Revision: https://reviews.llvm.org/D126745

Added: 
    

Modified: 
    clang/include/clang/Basic/riscv_vector.td
    clang/include/clang/Support/RISCVVIntrinsicUtils.h
    clang/lib/Support/RISCVVIntrinsicUtils.cpp
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcompress.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vcompress.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/riscv_vector.td 
b/clang/include/clang/Basic/riscv_vector.td
index a36ff62231581..75d2f607b1cff 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -137,6 +137,8 @@ class PolicyScheme<int val>{
 def NonePolicy : PolicyScheme<0>;
 def HasPassthruOperand : PolicyScheme<1>;
 def HasPolicyOperand : PolicyScheme<2>;
+// Specail case for passthru operand which is not a first opeand.
+def HasPassthruOperandAtIdx1 : PolicyScheme<3>;
 
 class RVVBuiltin<string suffix, string prototype, string type_range,
                  string overloaded_suffix = ""> {
@@ -1819,13 +1821,16 @@ defm vwmaccus : RVVOutOp1Op2BuiltinSet<"vwmaccus", 
"csi",
 }
 
 // 12.15. Vector Integer Merge Instructions
-// C/C++ Operand: (mask, op1, op2, vl), Intrinsic: (op1, op2, mask, vl)
-let HasMasked = false, MaskedPolicyScheme = NonePolicy,
+// C/C++ Operand: (mask, op1, op2, vl), Intrinsic: (passthru, op1, op2, mask, 
vl)
+let HasMasked = false,
+    UnMaskedPolicyScheme = HasPassthruOperandAtIdx1,
+    MaskedPolicyScheme = NonePolicy,
     ManualCodegen = [{
-      std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3);
-      IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()};
+      std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
       // insert undef passthru
-      Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
+      if (DefaultPolicy == TAIL_AGNOSTIC)
+        Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
+      IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()};
     }] in {
   defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "csil",
                                     [["vvm", "v", "vmvv"],
@@ -1960,12 +1965,15 @@ let Name = "vfclass_v", UnMaskedPolicyScheme = 
HasPassthruOperand in
 
 // 14.15. Vector Floating-Point Merge Instructio
 // C/C++ Operand: (mask, op1, op2, vl), Builtin: (op1, op2, mask, vl)
-let HasMasked = false, MaskedPolicyScheme = NonePolicy,
+let HasMasked = false,
+    UnMaskedPolicyScheme = HasPassthruOperandAtIdx1,
+    MaskedPolicyScheme = NonePolicy,
     ManualCodegen = [{
-      std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3);
-      IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()};
+      std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
       // insert undef passthru
-      Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
+      if (DefaultPolicy == TAIL_AGNOSTIC)
+        Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
+      IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()};
     }] in {
   defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "xfd",
                                     [["vvm", "v", "vmvv"]]>;
@@ -2152,10 +2160,16 @@ defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", 
"csil",
 }
 
 // 17.5. Vector Compress Instruction
-let HasMasked = false, MaskedPolicyScheme = NonePolicy,
+let IsPrototypeDefaultTU = true,
+    HasMasked = false,
+    UnMaskedPolicyScheme = HasPassthruOperandAtIdx1,
+    MaskedPolicyScheme = NonePolicy,
     ManualCodegen = [{
-      std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3);
-      IntrinsicTypes = {ResultType, Ops[3]->getType()};
+      std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+      // insert undef passthru
+      if (DefaultPolicy == TAIL_AGNOSTIC)
+        Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
+      IntrinsicTypes = {ResultType, Ops.back()->getType()};
     }] in {
   // signed and floating type
   defm vcompress : RVVOutBuiltinSet<"vcompress", "csilxfd",

diff  --git a/clang/include/clang/Support/RISCVVIntrinsicUtils.h 
b/clang/include/clang/Support/RISCVVIntrinsicUtils.h
index 756d8d13d6986..4a8bbc78cdee3 100644
--- a/clang/include/clang/Support/RISCVVIntrinsicUtils.h
+++ b/clang/include/clang/Support/RISCVVIntrinsicUtils.h
@@ -290,8 +290,12 @@ class RVVType {
 
 enum PolicyScheme : uint8_t {
   SchemeNone,
+  // Passthru operand is at first parameter in C builtin.
   HasPassthruOperand,
   HasPolicyOperand,
+  // Special case for vmerge, the passthru operand is second
+  // parameter in C builtin.
+  HasPassthruOperandAtIdx1,
 };
 
 // TODO refactor RVVIntrinsic class design after support all intrinsic

diff  --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp 
b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
index 6d025cd95238f..bf81ac60dae3e 100644
--- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp
+++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
@@ -973,6 +973,15 @@ llvm::SmallVector<PrototypeDescriptor> 
RVVIntrinsic::computeBuiltinTypes(
     else if (DefaultPolicy == Policy::TA && HasPassthruOp &&
              IsPrototypeDefaultTU)
       NewPrototype.erase(NewPrototype.begin() + 1);
+    if (DefaultScheme == PolicyScheme::HasPassthruOperandAtIdx1) {
+      if (DefaultPolicy == Policy::TU && !IsPrototypeDefaultTU) {
+        // Insert undisturbed output to index 1
+        NewPrototype.insert(NewPrototype.begin() + 2, NewPrototype[0]);
+      } else if (DefaultPolicy == Policy::TA && IsPrototypeDefaultTU) {
+        // Erase passthru for TA policy
+        NewPrototype.erase(NewPrototype.begin() + 2);
+      }
+    }
   }
 
   // If HasVL, append PrototypeDescriptor:VL to last operand

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcompress.c 
b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcompress.c
index bd787d9b1263f..729fb38beeb9e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcompress.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcompress.c
@@ -482,4 +482,56 @@ vfloat64m8_t test_vcompress_vm_f64m8 (vbool8_t mask, 
vfloat64m8_t dest, vfloat64
   return vcompress(mask, dest, src, vl);
 }
 
+// CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> 
@llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 
x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vcompress_vm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, 
vint32mf2_t src, size_t vl) {
+  return vcompress_tu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> 
@llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 
x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vcompress_vm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, 
vuint32mf2_t src, size_t vl) {
+  return vcompress_tu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> 
@llvm.riscv.vcompress.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 
1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, 
vfloat32mf2_t src, size_t vl) {
+  return vcompress_tu(mask, merge, src, vl);
+}
 
+// CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> 
@llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> 
[[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vcompress_vm_i32mf2_ta(vbool64_t mask, vint32mf2_t src, 
size_t vl) {
+  return vcompress_ta(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> 
@llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> 
[[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vcompress_vm_u32mf2_ta(vbool64_t mask, vuint32mf2_t src, 
size_t vl) {
+  return vcompress_ta(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> 
@llvm.riscv.vcompress.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x 
float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vcompress_vm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t src, 
size_t vl) {
+  return vcompress_ta(mask, src, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c 
b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c
index 96e6282f1dba2..2a76c46e34480 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c
@@ -94,3 +94,21 @@ vfloat64m8_t test_vfmerge_vfm_f64m8(vbool8_t mask, 
vfloat64m8_t op1, double op2,
                                     size_t vl) {
   return vfmerge(mask, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> 
@llvm.riscv.vfmerge.nxv1f32.f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale 
x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], 
i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, 
vfloat32mf2_t op1, float op2, size_t vl) {
+  return vfmerge_tu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> 
@llvm.riscv.vfmerge.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x 
float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 
[[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmerge_vfm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, 
float op2, size_t vl) {
+  return vfmerge_ta(mask, op1, op2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c 
b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c
index 9e0588a2adb84..275801e2647ae 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c
@@ -974,3 +974,93 @@ vfloat64m8_t test_vmerge_vvm_f64m8(vbool8_t mask, 
vfloat64m8_t op1,
                                    vfloat64m8_t op2, size_t vl) {
   return vmerge(mask, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> 
@llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], 
<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x 
i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmerge_vvm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, 
vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vmerge_tu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> 
@llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 
1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 
[[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmerge_vxm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, 
vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vmerge_tu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> 
@llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], 
<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x 
i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, 
vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vmerge_tu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> 
@llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 
1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 
[[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, 
vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vmerge_tu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> 
@llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x 
i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> 
[[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmerge_vvm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, 
vint32mf2_t op2, size_t vl) {
+  return vmerge_ta(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> 
@llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> 
[[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmerge_vxm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, int32_t 
op2, size_t vl) {
+  return vmerge_ta(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> 
@llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x 
i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> 
[[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmerge_vvm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, 
vuint32mf2_t op2, size_t vl) {
+  return vmerge_ta(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> 
@llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> 
[[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmerge_vxm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, 
uint32_t op2, size_t vl) {
+  return vmerge_ta(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> 
@llvm.riscv.vmerge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], 
<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 
x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, 
vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vmerge_tu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> 
@llvm.riscv.vmerge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 
x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> 
[[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vmerge_vvm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, 
vfloat32mf2_t op2, size_t vl) {
+  return vmerge_ta(mask, op1, op2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vcompress.c 
b/clang/test/CodeGen/RISCV/rvv-intrinsics/vcompress.c
index e1950ce4c7e10..6bf205cb504d3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vcompress.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vcompress.c
@@ -536,3 +536,57 @@ vfloat16m4_t test_vcompress_vm_f16m4 (vbool4_t mask, 
vfloat16m4_t dest, vfloat16
 vfloat16m8_t test_vcompress_vm_f16m8 (vbool2_t mask, vfloat16m8_t dest, 
vfloat16m8_t src, size_t vl) {
   return vcompress_vm_f16m8(mask, dest, src, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> 
@llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 
x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vcompress_vm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, 
vint32mf2_t src, size_t vl) {
+  return vcompress_vm_i32mf2_tu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> 
@llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 
x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vcompress_vm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, 
vuint32mf2_t src, size_t vl) {
+  return vcompress_vm_u32mf2_tu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> 
@llvm.riscv.vcompress.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 
1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, 
vfloat32mf2_t src, size_t vl) {
+  return vcompress_vm_f32mf2_tu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> 
@llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> 
[[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vcompress_vm_i32mf2_ta(vbool64_t mask, vint32mf2_t src, 
size_t vl) {
+  return vcompress_vm_i32mf2_ta(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> 
@llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> 
[[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vcompress_vm_u32mf2_ta(vbool64_t mask, vuint32mf2_t src, 
size_t vl) {
+  return vcompress_vm_u32mf2_ta(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> 
@llvm.riscv.vcompress.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x 
float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vcompress_vm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t src, 
size_t vl) {
+  return vcompress_vm_f32mf2_ta(mask, src, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c 
b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c
index bcb3b8d424229..cdf4524df2e50 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c
@@ -101,7 +101,7 @@ vfloat64m8_t test_vfmerge_vfm_f64m8(vbool8_t mask, 
vfloat64m8_t op1, double op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> 
@llvm.riscv.vfmerge.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x 
half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 
[[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vfmerge_vfm_f16mf4 (vbool64_t mask, vfloat16mf4_t op1, 
_Float16 op2, size_t vl) {
+vfloat16mf4_t test_vfmerge_vfm_f16mf4(vbool64_t mask, vfloat16mf4_t op1, 
_Float16 op2, size_t vl) {
   return vfmerge_vfm_f16mf4(mask, op1, op2, vl);
 }
 
@@ -110,7 +110,7 @@ vfloat16mf4_t test_vfmerge_vfm_f16mf4 (vbool64_t mask, 
vfloat16mf4_t op1, _Float
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> 
@llvm.riscv.vfmerge.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x 
half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 
[[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vfmerge_vfm_f16mf2 (vbool32_t mask, vfloat16mf2_t op1, 
_Float16 op2, size_t vl) {
+vfloat16mf2_t test_vfmerge_vfm_f16mf2(vbool32_t mask, vfloat16mf2_t op1, 
_Float16 op2, size_t vl) {
   return vfmerge_vfm_f16mf2(mask, op1, op2, vl);
 }
 
@@ -119,7 +119,7 @@ vfloat16mf2_t test_vfmerge_vfm_f16mf2 (vbool32_t mask, 
vfloat16mf2_t op1, _Float
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> 
@llvm.riscv.vfmerge.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x 
half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 
[[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfmerge_vfm_f16m1 (vbool16_t mask, vfloat16m1_t op1, 
_Float16 op2, size_t vl) {
+vfloat16m1_t test_vfmerge_vfm_f16m1(vbool16_t mask, vfloat16m1_t op1, _Float16 
op2, size_t vl) {
   return vfmerge_vfm_f16m1(mask, op1, op2, vl);
 }
 
@@ -128,7 +128,7 @@ vfloat16m1_t test_vfmerge_vfm_f16m1 (vbool16_t mask, 
vfloat16m1_t op1, _Float16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> 
@llvm.riscv.vfmerge.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x 
half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 
[[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vfmerge_vfm_f16m2 (vbool8_t mask, vfloat16m2_t op1, _Float16 
op2, size_t vl) {
+vfloat16m2_t test_vfmerge_vfm_f16m2(vbool8_t mask, vfloat16m2_t op1, _Float16 
op2, size_t vl) {
   return vfmerge_vfm_f16m2(mask, op1, op2, vl);
 }
 
@@ -137,7 +137,7 @@ vfloat16m2_t test_vfmerge_vfm_f16m2 (vbool8_t mask, 
vfloat16m2_t op1, _Float16 o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> 
@llvm.riscv.vfmerge.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x 
half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 
[[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vfmerge_vfm_f16m4 (vbool4_t mask, vfloat16m4_t op1, _Float16 
op2, size_t vl) {
+vfloat16m4_t test_vfmerge_vfm_f16m4(vbool4_t mask, vfloat16m4_t op1, _Float16 
op2, size_t vl) {
   return vfmerge_vfm_f16m4(mask, op1, op2, vl);
 }
 
@@ -146,6 +146,24 @@ vfloat16m4_t test_vfmerge_vfm_f16m4 (vbool4_t mask, 
vfloat16m4_t op1, _Float16 o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> 
@llvm.riscv.vfmerge.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x 
half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 
[[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vfmerge_vfm_f16m8 (vbool2_t mask, vfloat16m8_t op1, _Float16 
op2, size_t vl) {
+vfloat16m8_t test_vfmerge_vfm_f16m8(vbool2_t mask, vfloat16m8_t op1, _Float16 
op2, size_t vl) {
   return vfmerge_vfm_f16m8(mask, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> 
@llvm.riscv.vfmerge.nxv1f32.f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale 
x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], 
i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, 
vfloat32mf2_t op1, float op2, size_t vl) {
+  return vfmerge_vfm_f32mf2_tu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> 
@llvm.riscv.vfmerge.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x 
float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 
[[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmerge_vfm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, 
float op2, size_t vl) {
+  return vfmerge_vfm_f32mf2_ta(mask, op1, op2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c 
b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c
index 37d8cba30b4e8..69958f38c7aab 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c
@@ -981,7 +981,7 @@ vfloat64m8_t test_vmerge_vvm_f64m8(vbool8_t mask, 
vfloat64m8_t op1,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> 
@llvm.riscv.vmerge.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x 
half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> 
[[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vmerge_vvm_f16mf4 (vbool64_t mask, vfloat16mf4_t op1, 
vfloat16mf4_t op2, size_t vl) {
+vfloat16mf4_t test_vmerge_vvm_f16mf4(vbool64_t mask, vfloat16mf4_t op1, 
vfloat16mf4_t op2, size_t vl) {
   return vmerge_vvm_f16mf4(mask, op1, op2, vl);
 }
 
@@ -990,7 +990,7 @@ vfloat16mf4_t test_vmerge_vvm_f16mf4 (vbool64_t mask, 
vfloat16mf4_t op1, vfloat1
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> 
@llvm.riscv.vmerge.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x 
half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> 
[[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vmerge_vvm_f16mf2 (vbool32_t mask, vfloat16mf2_t op1, 
vfloat16mf2_t op2, size_t vl) {
+vfloat16mf2_t test_vmerge_vvm_f16mf2(vbool32_t mask, vfloat16mf2_t op1, 
vfloat16mf2_t op2, size_t vl) {
   return vmerge_vvm_f16mf2(mask, op1, op2, vl);
 }
 
@@ -999,7 +999,7 @@ vfloat16mf2_t test_vmerge_vvm_f16mf2 (vbool32_t mask, 
vfloat16mf2_t op1, vfloat1
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> 
@llvm.riscv.vmerge.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x 
half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> 
[[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vmerge_vvm_f16m1 (vbool16_t mask, vfloat16m1_t op1, 
vfloat16m1_t op2, size_t vl) {
+vfloat16m1_t test_vmerge_vvm_f16m1(vbool16_t mask, vfloat16m1_t op1, 
vfloat16m1_t op2, size_t vl) {
   return vmerge_vvm_f16m1(mask, op1, op2, vl);
 }
 
@@ -1008,7 +1008,7 @@ vfloat16m1_t test_vmerge_vvm_f16m1 (vbool16_t mask, 
vfloat16m1_t op1, vfloat16m1
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> 
@llvm.riscv.vmerge.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x 
half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> 
[[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vmerge_vvm_f16m2 (vbool8_t mask, vfloat16m2_t op1, 
vfloat16m2_t op2, size_t vl) {
+vfloat16m2_t test_vmerge_vvm_f16m2(vbool8_t mask, vfloat16m2_t op1, 
vfloat16m2_t op2, size_t vl) {
   return vmerge_vvm_f16m2(mask, op1, op2, vl);
 }
 
@@ -1017,7 +1017,7 @@ vfloat16m2_t test_vmerge_vvm_f16m2 (vbool8_t mask, 
vfloat16m2_t op1, vfloat16m2_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> 
@llvm.riscv.vmerge.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 
16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> 
[[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vmerge_vvm_f16m4 (vbool4_t mask, vfloat16m4_t op1, 
vfloat16m4_t op2, size_t vl) {
+vfloat16m4_t test_vmerge_vvm_f16m4(vbool4_t mask, vfloat16m4_t op1, 
vfloat16m4_t op2, size_t vl) {
   return vmerge_vvm_f16m4(mask, op1, op2, vl);
 }
 
@@ -1026,6 +1026,96 @@ vfloat16m4_t test_vmerge_vvm_f16m4 (vbool4_t mask, 
vfloat16m4_t op1, vfloat16m4_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> 
@llvm.riscv.vmerge.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 
32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], <vscale x 32 x i1> 
[[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vmerge_vvm_f16m8 (vbool2_t mask, vfloat16m8_t op1, 
vfloat16m8_t op2, size_t vl) {
+vfloat16m8_t test_vmerge_vvm_f16m8(vbool2_t mask, vfloat16m8_t op1, 
vfloat16m8_t op2, size_t vl) {
   return vmerge_vvm_f16m8(mask, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> 
@llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], 
<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x 
i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmerge_vvm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, 
vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vmerge_vvm_i32mf2_tu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> 
@llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 
1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 
[[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmerge_vxm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, 
vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vmerge_vxm_i32mf2_tu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> 
@llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], 
<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x 
i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, 
vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vmerge_vvm_u32mf2_tu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> 
@llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 
1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 
[[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, 
vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vmerge_vxm_u32mf2_tu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> 
@llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x 
i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> 
[[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmerge_vvm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, 
vint32mf2_t op2, size_t vl) {
+  return vmerge_vvm_i32mf2_ta(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> 
@llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> 
[[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmerge_vxm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, int32_t 
op2, size_t vl) {
+  return vmerge_vxm_i32mf2_ta(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> 
@llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x 
i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> 
[[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmerge_vvm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, 
vuint32mf2_t op2, size_t vl) {
+  return vmerge_vvm_u32mf2_ta(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> 
@llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> 
[[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmerge_vxm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, 
uint32_t op2, size_t vl) {
+  return vmerge_vxm_u32mf2_ta(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> 
@llvm.riscv.vmerge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], 
<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 
x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, 
vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vmerge_vvm_f32mf2_tu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> 
@llvm.riscv.vmerge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 
x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> 
[[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vmerge_vvm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, 
vfloat32mf2_t op2, size_t vl) {
+  return vmerge_vvm_f32mf2_ta(mask, op1, op2, vl);
+}


        
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to