Author: Fraser Cormack Date: 2021-01-19T09:30:36Z New Revision: c81ea9429f8d0f4e4f7a8b3ccf29b63f4810102b
URL: https://github.com/llvm/llvm-project/commit/c81ea9429f8d0f4e4f7a8b3ccf29b63f4810102b DIFF: https://github.com/llvm/llvm-project/commit/c81ea9429f8d0f4e4f7a8b3ccf29b63f4810102b.diff LOG: [RISCV] Add scalable-vector integer extension patterns Reviewed By: craig.topper Differential Revision: https://reviews.llvm.org/D94694 Added: llvm/test/CodeGen/RISCV/rvv/vexts-sdnode-rv32.ll llvm/test/CodeGen/RISCV/rvv/vexts-sdnode-rv64.ll Modified: llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td Removed: ################################################################################ diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td index 6ce6c16d4405..5b4051c534a3 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -275,6 +275,18 @@ multiclass VPatFPSetCCSDNode_VV_VF_FV<CondCode cc, defm : VPatFPSetCCSDNode_FV<cc, swapped_op_inst_name>; } +multiclass VPatExtendSDNode_V<list<SDNode> ops, string inst_name, string suffix, + list <VTypeInfoToFraction> fraction_list> { + foreach vtiTofti = fraction_list in { + defvar vti = vtiTofti.Vti; + defvar fti = vtiTofti.Fti; + foreach op = ops in + def : Pat<(vti.Vector (op (fti.Vector fti.RegClass:$rs2))), + (!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX) + fti.RegClass:$rs2, VLMax, vti.SEW)>; + } +} + //===----------------------------------------------------------------------===// // Patterns. //===----------------------------------------------------------------------===// @@ -301,6 +313,20 @@ foreach vti = AllIntegerVectors in { vti.RegClass:$rs1, simm5:$rs2, VLMax, vti.SEW)>; } +// 12.3. Vector Integer Extension +defm "" : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF2", + AllFractionableVF2IntVectors>; +defm "" : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF2", + AllFractionableVF2IntVectors>; +defm "" : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF4", + AllFractionableVF4IntVectors>; +defm "" : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF4", + AllFractionableVF4IntVectors>; +defm "" : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF8", + AllFractionableVF8IntVectors>; +defm "" : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF8", + AllFractionableVF8IntVectors>; + // 12.5. Vector Bitwise Logical Instructions defm "" : VPatBinarySDNode_VV_VX_VI<and, "PseudoVAND">; defm "" : VPatBinarySDNode_VV_VX_VI<or, "PseudoVOR">; diff --git a/llvm/test/CodeGen/RISCV/rvv/vexts-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vexts-sdnode-rv32.ll new file mode 100644 index 000000000000..4d5add7deb49 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vexts-sdnode-rv32.ll @@ -0,0 +1,619 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define <vscale x 1 x i16> @vsext_nxv1i8_nxv1i16(<vscale x 1 x i8> %va) { +; CHECK-LABEL: vsext_nxv1i8_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsext.vf2 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = sext <vscale x 1 x i8> %va to <vscale x 1 x i16> + ret <vscale x 1 x i16> %evec +} + +define <vscale x 1 x i16> @vzext_nxv1i8_nxv1i16(<vscale x 1 x i8> %va) { +; CHECK-LABEL: vzext_nxv1i8_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vzext.vf2 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = zext <vscale x 1 x i8> %va to <vscale x 1 x i16> + ret <vscale x 1 x i16> %evec +} + +define <vscale x 1 x i32> @vsext_nxv1i8_nxv1i32(<vscale x 1 x i8> %va) { +; CHECK-LABEL: vsext_nxv1i8_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsext.vf4 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = sext <vscale x 1 x i8> %va to <vscale x 1 x i32> + ret <vscale x 1 x i32> %evec +} + +define <vscale x 1 x i32> @vzext_nxv1i8_nxv1i32(<vscale x 1 x i8> %va) { +; CHECK-LABEL: vzext_nxv1i8_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vzext.vf4 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = zext <vscale x 1 x i8> %va to <vscale x 1 x i32> + ret <vscale x 1 x i32> %evec +} + +define <vscale x 1 x i64> @vsext_nxv1i8_nxv1i64(<vscale x 1 x i8> %va) { +; CHECK-LABEL: vsext_nxv1i8_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vsext.vf8 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = sext <vscale x 1 x i8> %va to <vscale x 1 x i64> + ret <vscale x 1 x i64> %evec +} + +define <vscale x 1 x i64> @vzext_nxv1i8_nxv1i64(<vscale x 1 x i8> %va) { +; CHECK-LABEL: vzext_nxv1i8_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vzext.vf8 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = zext <vscale x 1 x i8> %va to <vscale x 1 x i64> + ret <vscale x 1 x i64> %evec +} + +define <vscale x 2 x i16> @vsext_nxv2i8_nxv2i16(<vscale x 2 x i8> %va) { +; CHECK-LABEL: vsext_nxv2i8_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsext.vf2 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = sext <vscale x 2 x i8> %va to <vscale x 2 x i16> + ret <vscale x 2 x i16> %evec +} + +define <vscale x 2 x i16> @vzext_nxv2i8_nxv2i16(<vscale x 2 x i8> %va) { +; CHECK-LABEL: vzext_nxv2i8_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vzext.vf2 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = zext <vscale x 2 x i8> %va to <vscale x 2 x i16> + ret <vscale x 2 x i16> %evec +} + +define <vscale x 2 x i32> @vsext_nxv2i8_nxv2i32(<vscale x 2 x i8> %va) { +; CHECK-LABEL: vsext_nxv2i8_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vsext.vf4 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = sext <vscale x 2 x i8> %va to <vscale x 2 x i32> + ret <vscale x 2 x i32> %evec +} + +define <vscale x 2 x i32> @vzext_nxv2i8_nxv2i32(<vscale x 2 x i8> %va) { +; CHECK-LABEL: vzext_nxv2i8_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vzext.vf4 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = zext <vscale x 2 x i8> %va to <vscale x 2 x i32> + ret <vscale x 2 x i32> %evec +} + +define <vscale x 2 x i64> @vsext_nxv2i8_nxv2i64(<vscale x 2 x i8> %va) { +; CHECK-LABEL: vsext_nxv2i8_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vsext.vf8 v26, v16 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: ret + %evec = sext <vscale x 2 x i8> %va to <vscale x 2 x i64> + ret <vscale x 2 x i64> %evec +} + +define <vscale x 2 x i64> @vzext_nxv2i8_nxv2i64(<vscale x 2 x i8> %va) { +; CHECK-LABEL: vzext_nxv2i8_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vzext.vf8 v26, v16 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: ret + %evec = zext <vscale x 2 x i8> %va to <vscale x 2 x i64> + ret <vscale x 2 x i64> %evec +} + +define <vscale x 4 x i16> @vsext_nxv4i8_nxv4i16(<vscale x 4 x i8> %va) { +; CHECK-LABEL: vsext_nxv4i8_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vsext.vf2 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = sext <vscale x 4 x i8> %va to <vscale x 4 x i16> + ret <vscale x 4 x i16> %evec +} + +define <vscale x 4 x i16> @vzext_nxv4i8_nxv4i16(<vscale x 4 x i8> %va) { +; CHECK-LABEL: vzext_nxv4i8_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vzext.vf2 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = zext <vscale x 4 x i8> %va to <vscale x 4 x i16> + ret <vscale x 4 x i16> %evec +} + +define <vscale x 4 x i32> @vsext_nxv4i8_nxv4i32(<vscale x 4 x i8> %va) { +; CHECK-LABEL: vsext_nxv4i8_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vsext.vf4 v26, v16 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: ret + %evec = sext <vscale x 4 x i8> %va to <vscale x 4 x i32> + ret <vscale x 4 x i32> %evec +} + +define <vscale x 4 x i32> @vzext_nxv4i8_nxv4i32(<vscale x 4 x i8> %va) { +; CHECK-LABEL: vzext_nxv4i8_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vzext.vf4 v26, v16 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: ret + %evec = zext <vscale x 4 x i8> %va to <vscale x 4 x i32> + ret <vscale x 4 x i32> %evec +} + +define <vscale x 4 x i64> @vsext_nxv4i8_nxv4i64(<vscale x 4 x i8> %va) { +; CHECK-LABEL: vsext_nxv4i8_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vsext.vf8 v28, v16 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: ret + %evec = sext <vscale x 4 x i8> %va to <vscale x 4 x i64> + ret <vscale x 4 x i64> %evec +} + +define <vscale x 4 x i64> @vzext_nxv4i8_nxv4i64(<vscale x 4 x i8> %va) { +; CHECK-LABEL: vzext_nxv4i8_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vzext.vf8 v28, v16 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: ret + %evec = zext <vscale x 4 x i8> %va to <vscale x 4 x i64> + ret <vscale x 4 x i64> %evec +} + +define <vscale x 8 x i16> @vsext_nxv8i8_nxv8i16(<vscale x 8 x i8> %va) { +; CHECK-LABEL: vsext_nxv8i8_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vsext.vf2 v26, v16 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: ret + %evec = sext <vscale x 8 x i8> %va to <vscale x 8 x i16> + ret <vscale x 8 x i16> %evec +} + +define <vscale x 8 x i16> @vzext_nxv8i8_nxv8i16(<vscale x 8 x i8> %va) { +; CHECK-LABEL: vzext_nxv8i8_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vzext.vf2 v26, v16 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: ret + %evec = zext <vscale x 8 x i8> %va to <vscale x 8 x i16> + ret <vscale x 8 x i16> %evec +} + +define <vscale x 8 x i32> @vsext_nxv8i8_nxv8i32(<vscale x 8 x i8> %va) { +; CHECK-LABEL: vsext_nxv8i8_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vsext.vf4 v28, v16 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: ret + %evec = sext <vscale x 8 x i8> %va to <vscale x 8 x i32> + ret <vscale x 8 x i32> %evec +} + +define <vscale x 8 x i32> @vzext_nxv8i8_nxv8i32(<vscale x 8 x i8> %va) { +; CHECK-LABEL: vzext_nxv8i8_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vzext.vf4 v28, v16 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: ret + %evec = zext <vscale x 8 x i8> %va to <vscale x 8 x i32> + ret <vscale x 8 x i32> %evec +} + +define <vscale x 8 x i64> @vsext_nxv8i8_nxv8i64(<vscale x 8 x i8> %va) { +; CHECK-LABEL: vsext_nxv8i8_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vsext.vf8 v8, v16 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: ret + %evec = sext <vscale x 8 x i8> %va to <vscale x 8 x i64> + ret <vscale x 8 x i64> %evec +} + +define <vscale x 8 x i64> @vzext_nxv8i8_nxv8i64(<vscale x 8 x i8> %va) { +; CHECK-LABEL: vzext_nxv8i8_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vzext.vf8 v8, v16 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: ret + %evec = zext <vscale x 8 x i8> %va to <vscale x 8 x i64> + ret <vscale x 8 x i64> %evec +} + +define <vscale x 16 x i16> @vsext_nxv16i8_nxv16i16(<vscale x 16 x i8> %va) { +; CHECK-LABEL: vsext_nxv16i8_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vsext.vf2 v28, v16 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: ret + %evec = sext <vscale x 16 x i8> %va to <vscale x 16 x i16> + ret <vscale x 16 x i16> %evec +} + +define <vscale x 16 x i16> @vzext_nxv16i8_nxv16i16(<vscale x 16 x i8> %va) { +; CHECK-LABEL: vzext_nxv16i8_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vzext.vf2 v28, v16 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: ret + %evec = zext <vscale x 16 x i8> %va to <vscale x 16 x i16> + ret <vscale x 16 x i16> %evec +} + +define <vscale x 16 x i32> @vsext_nxv16i8_nxv16i32(<vscale x 16 x i8> %va) { +; CHECK-LABEL: vsext_nxv16i8_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vsext.vf4 v8, v16 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: ret + %evec = sext <vscale x 16 x i8> %va to <vscale x 16 x i32> + ret <vscale x 16 x i32> %evec +} + +define <vscale x 16 x i32> @vzext_nxv16i8_nxv16i32(<vscale x 16 x i8> %va) { +; CHECK-LABEL: vzext_nxv16i8_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vzext.vf4 v8, v16 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: ret + %evec = zext <vscale x 16 x i8> %va to <vscale x 16 x i32> + ret <vscale x 16 x i32> %evec +} + +define <vscale x 32 x i16> @vsext_nxv32i8_nxv32i16(<vscale x 32 x i8> %va) { +; CHECK-LABEL: vsext_nxv32i8_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vsext.vf2 v8, v16 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: ret + %evec = sext <vscale x 32 x i8> %va to <vscale x 32 x i16> + ret <vscale x 32 x i16> %evec +} + +define <vscale x 32 x i16> @vzext_nxv32i8_nxv32i16(<vscale x 32 x i8> %va) { +; CHECK-LABEL: vzext_nxv32i8_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vzext.vf2 v8, v16 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: ret + %evec = zext <vscale x 32 x i8> %va to <vscale x 32 x i16> + ret <vscale x 32 x i16> %evec +} + +define <vscale x 1 x i32> @vsext_nxv1i16_nxv1i32(<vscale x 1 x i16> %va) { +; CHECK-LABEL: vsext_nxv1i16_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsext.vf2 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = sext <vscale x 1 x i16> %va to <vscale x 1 x i32> + ret <vscale x 1 x i32> %evec +} + +define <vscale x 1 x i32> @vzext_nxv1i16_nxv1i32(<vscale x 1 x i16> %va) { +; CHECK-LABEL: vzext_nxv1i16_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vzext.vf2 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = zext <vscale x 1 x i16> %va to <vscale x 1 x i32> + ret <vscale x 1 x i32> %evec +} + +define <vscale x 1 x i64> @vsext_nxv1i16_nxv1i64(<vscale x 1 x i16> %va) { +; CHECK-LABEL: vsext_nxv1i16_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vsext.vf4 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = sext <vscale x 1 x i16> %va to <vscale x 1 x i64> + ret <vscale x 1 x i64> %evec +} + +define <vscale x 1 x i64> @vzext_nxv1i16_nxv1i64(<vscale x 1 x i16> %va) { +; CHECK-LABEL: vzext_nxv1i16_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vzext.vf4 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = zext <vscale x 1 x i16> %va to <vscale x 1 x i64> + ret <vscale x 1 x i64> %evec +} + +define <vscale x 2 x i32> @vsext_nxv2i16_nxv2i32(<vscale x 2 x i16> %va) { +; CHECK-LABEL: vsext_nxv2i16_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vsext.vf2 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = sext <vscale x 2 x i16> %va to <vscale x 2 x i32> + ret <vscale x 2 x i32> %evec +} + +define <vscale x 2 x i32> @vzext_nxv2i16_nxv2i32(<vscale x 2 x i16> %va) { +; CHECK-LABEL: vzext_nxv2i16_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vzext.vf2 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = zext <vscale x 2 x i16> %va to <vscale x 2 x i32> + ret <vscale x 2 x i32> %evec +} + +define <vscale x 2 x i64> @vsext_nxv2i16_nxv2i64(<vscale x 2 x i16> %va) { +; CHECK-LABEL: vsext_nxv2i16_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vsext.vf4 v26, v16 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: ret + %evec = sext <vscale x 2 x i16> %va to <vscale x 2 x i64> + ret <vscale x 2 x i64> %evec +} + +define <vscale x 2 x i64> @vzext_nxv2i16_nxv2i64(<vscale x 2 x i16> %va) { +; CHECK-LABEL: vzext_nxv2i16_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vzext.vf4 v26, v16 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: ret + %evec = zext <vscale x 2 x i16> %va to <vscale x 2 x i64> + ret <vscale x 2 x i64> %evec +} + +define <vscale x 4 x i32> @vsext_nxv4i16_nxv4i32(<vscale x 4 x i16> %va) { +; CHECK-LABEL: vsext_nxv4i16_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vsext.vf2 v26, v16 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: ret + %evec = sext <vscale x 4 x i16> %va to <vscale x 4 x i32> + ret <vscale x 4 x i32> %evec +} + +define <vscale x 4 x i32> @vzext_nxv4i16_nxv4i32(<vscale x 4 x i16> %va) { +; CHECK-LABEL: vzext_nxv4i16_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vzext.vf2 v26, v16 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: ret + %evec = zext <vscale x 4 x i16> %va to <vscale x 4 x i32> + ret <vscale x 4 x i32> %evec +} + +define <vscale x 4 x i64> @vsext_nxv4i16_nxv4i64(<vscale x 4 x i16> %va) { +; CHECK-LABEL: vsext_nxv4i16_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vsext.vf4 v28, v16 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: ret + %evec = sext <vscale x 4 x i16> %va to <vscale x 4 x i64> + ret <vscale x 4 x i64> %evec +} + +define <vscale x 4 x i64> @vzext_nxv4i16_nxv4i64(<vscale x 4 x i16> %va) { +; CHECK-LABEL: vzext_nxv4i16_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vzext.vf4 v28, v16 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: ret + %evec = zext <vscale x 4 x i16> %va to <vscale x 4 x i64> + ret <vscale x 4 x i64> %evec +} + +define <vscale x 8 x i32> @vsext_nxv8i16_nxv8i32(<vscale x 8 x i16> %va) { +; CHECK-LABEL: vsext_nxv8i16_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vsext.vf2 v28, v16 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: ret + %evec = sext <vscale x 8 x i16> %va to <vscale x 8 x i32> + ret <vscale x 8 x i32> %evec +} + +define <vscale x 8 x i32> @vzext_nxv8i16_nxv8i32(<vscale x 8 x i16> %va) { +; CHECK-LABEL: vzext_nxv8i16_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vzext.vf2 v28, v16 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: ret + %evec = zext <vscale x 8 x i16> %va to <vscale x 8 x i32> + ret <vscale x 8 x i32> %evec +} + +define <vscale x 8 x i64> @vsext_nxv8i16_nxv8i64(<vscale x 8 x i16> %va) { +; CHECK-LABEL: vsext_nxv8i16_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vsext.vf4 v8, v16 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: ret + %evec = sext <vscale x 8 x i16> %va to <vscale x 8 x i64> + ret <vscale x 8 x i64> %evec +} + +define <vscale x 8 x i64> @vzext_nxv8i16_nxv8i64(<vscale x 8 x i16> %va) { +; CHECK-LABEL: vzext_nxv8i16_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vzext.vf4 v8, v16 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: ret + %evec = zext <vscale x 8 x i16> %va to <vscale x 8 x i64> + ret <vscale x 8 x i64> %evec +} + +define <vscale x 16 x i32> @vsext_nxv16i16_nxv16i32(<vscale x 16 x i16> %va) { +; CHECK-LABEL: vsext_nxv16i16_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vsext.vf2 v8, v16 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: ret + %evec = sext <vscale x 16 x i16> %va to <vscale x 16 x i32> + ret <vscale x 16 x i32> %evec +} + +define <vscale x 16 x i32> @vzext_nxv16i16_nxv16i32(<vscale x 16 x i16> %va) { +; CHECK-LABEL: vzext_nxv16i16_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vzext.vf2 v8, v16 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: ret + %evec = zext <vscale x 16 x i16> %va to <vscale x 16 x i32> + ret <vscale x 16 x i32> %evec +} + +define <vscale x 1 x i64> @vsext_nxv1i32_nxv1i64(<vscale x 1 x i32> %va) { +; CHECK-LABEL: vsext_nxv1i32_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vsext.vf2 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = sext <vscale x 1 x i32> %va to <vscale x 1 x i64> + ret <vscale x 1 x i64> %evec +} + +define <vscale x 1 x i64> @vzext_nxv1i32_nxv1i64(<vscale x 1 x i32> %va) { +; CHECK-LABEL: vzext_nxv1i32_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vzext.vf2 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = zext <vscale x 1 x i32> %va to <vscale x 1 x i64> + ret <vscale x 1 x i64> %evec +} + +define <vscale x 2 x i64> @vsext_nxv2i32_nxv2i64(<vscale x 2 x i32> %va) { +; CHECK-LABEL: vsext_nxv2i32_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vsext.vf2 v26, v16 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: ret + %evec = sext <vscale x 2 x i32> %va to <vscale x 2 x i64> + ret <vscale x 2 x i64> %evec +} + +define <vscale x 2 x i64> @vzext_nxv2i32_nxv2i64(<vscale x 2 x i32> %va) { +; CHECK-LABEL: vzext_nxv2i32_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vzext.vf2 v26, v16 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: ret + %evec = zext <vscale x 2 x i32> %va to <vscale x 2 x i64> + ret <vscale x 2 x i64> %evec +} + +define <vscale x 4 x i64> @vsext_nxv4i32_nxv4i64(<vscale x 4 x i32> %va) { +; CHECK-LABEL: vsext_nxv4i32_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vsext.vf2 v28, v16 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: ret + %evec = sext <vscale x 4 x i32> %va to <vscale x 4 x i64> + ret <vscale x 4 x i64> %evec +} + +define <vscale x 4 x i64> @vzext_nxv4i32_nxv4i64(<vscale x 4 x i32> %va) { +; CHECK-LABEL: vzext_nxv4i32_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vzext.vf2 v28, v16 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: ret + %evec = zext <vscale x 4 x i32> %va to <vscale x 4 x i64> + ret <vscale x 4 x i64> %evec +} + +define <vscale x 8 x i64> @vsext_nxv8i32_nxv8i64(<vscale x 8 x i32> %va) { +; CHECK-LABEL: vsext_nxv8i32_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vsext.vf2 v8, v16 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: ret + %evec = sext <vscale x 8 x i32> %va to <vscale x 8 x i64> + ret <vscale x 8 x i64> %evec +} + +define <vscale x 8 x i64> @vzext_nxv8i32_nxv8i64(<vscale x 8 x i32> %va) { +; CHECK-LABEL: vzext_nxv8i32_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vzext.vf2 v8, v16 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: ret + %evec = zext <vscale x 8 x i32> %va to <vscale x 8 x i64> + ret <vscale x 8 x i64> %evec +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vexts-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vexts-sdnode-rv64.ll new file mode 100644 index 000000000000..20df322a3aaa --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vexts-sdnode-rv64.ll @@ -0,0 +1,619 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define <vscale x 1 x i16> @vsext_nxv1i8_nxv1i16(<vscale x 1 x i8> %va) { +; CHECK-LABEL: vsext_nxv1i8_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsext.vf2 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = sext <vscale x 1 x i8> %va to <vscale x 1 x i16> + ret <vscale x 1 x i16> %evec +} + +define <vscale x 1 x i16> @vzext_nxv1i8_nxv1i16(<vscale x 1 x i8> %va) { +; CHECK-LABEL: vzext_nxv1i8_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vzext.vf2 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = zext <vscale x 1 x i8> %va to <vscale x 1 x i16> + ret <vscale x 1 x i16> %evec +} + +define <vscale x 1 x i32> @vsext_nxv1i8_nxv1i32(<vscale x 1 x i8> %va) { +; CHECK-LABEL: vsext_nxv1i8_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsext.vf4 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = sext <vscale x 1 x i8> %va to <vscale x 1 x i32> + ret <vscale x 1 x i32> %evec +} + +define <vscale x 1 x i32> @vzext_nxv1i8_nxv1i32(<vscale x 1 x i8> %va) { +; CHECK-LABEL: vzext_nxv1i8_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vzext.vf4 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = zext <vscale x 1 x i8> %va to <vscale x 1 x i32> + ret <vscale x 1 x i32> %evec +} + +define <vscale x 1 x i64> @vsext_nxv1i8_nxv1i64(<vscale x 1 x i8> %va) { +; CHECK-LABEL: vsext_nxv1i8_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vsext.vf8 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = sext <vscale x 1 x i8> %va to <vscale x 1 x i64> + ret <vscale x 1 x i64> %evec +} + +define <vscale x 1 x i64> @vzext_nxv1i8_nxv1i64(<vscale x 1 x i8> %va) { +; CHECK-LABEL: vzext_nxv1i8_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vzext.vf8 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = zext <vscale x 1 x i8> %va to <vscale x 1 x i64> + ret <vscale x 1 x i64> %evec +} + +define <vscale x 2 x i16> @vsext_nxv2i8_nxv2i16(<vscale x 2 x i8> %va) { +; CHECK-LABEL: vsext_nxv2i8_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsext.vf2 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = sext <vscale x 2 x i8> %va to <vscale x 2 x i16> + ret <vscale x 2 x i16> %evec +} + +define <vscale x 2 x i16> @vzext_nxv2i8_nxv2i16(<vscale x 2 x i8> %va) { +; CHECK-LABEL: vzext_nxv2i8_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vzext.vf2 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = zext <vscale x 2 x i8> %va to <vscale x 2 x i16> + ret <vscale x 2 x i16> %evec +} + +define <vscale x 2 x i32> @vsext_nxv2i8_nxv2i32(<vscale x 2 x i8> %va) { +; CHECK-LABEL: vsext_nxv2i8_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vsext.vf4 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = sext <vscale x 2 x i8> %va to <vscale x 2 x i32> + ret <vscale x 2 x i32> %evec +} + +define <vscale x 2 x i32> @vzext_nxv2i8_nxv2i32(<vscale x 2 x i8> %va) { +; CHECK-LABEL: vzext_nxv2i8_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vzext.vf4 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = zext <vscale x 2 x i8> %va to <vscale x 2 x i32> + ret <vscale x 2 x i32> %evec +} + +define <vscale x 2 x i64> @vsext_nxv2i8_nxv2i64(<vscale x 2 x i8> %va) { +; CHECK-LABEL: vsext_nxv2i8_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vsext.vf8 v26, v16 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: ret + %evec = sext <vscale x 2 x i8> %va to <vscale x 2 x i64> + ret <vscale x 2 x i64> %evec +} + +define <vscale x 2 x i64> @vzext_nxv2i8_nxv2i64(<vscale x 2 x i8> %va) { +; CHECK-LABEL: vzext_nxv2i8_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vzext.vf8 v26, v16 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: ret + %evec = zext <vscale x 2 x i8> %va to <vscale x 2 x i64> + ret <vscale x 2 x i64> %evec +} + +define <vscale x 4 x i16> @vsext_nxv4i8_nxv4i16(<vscale x 4 x i8> %va) { +; CHECK-LABEL: vsext_nxv4i8_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vsext.vf2 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = sext <vscale x 4 x i8> %va to <vscale x 4 x i16> + ret <vscale x 4 x i16> %evec +} + +define <vscale x 4 x i16> @vzext_nxv4i8_nxv4i16(<vscale x 4 x i8> %va) { +; CHECK-LABEL: vzext_nxv4i8_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vzext.vf2 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = zext <vscale x 4 x i8> %va to <vscale x 4 x i16> + ret <vscale x 4 x i16> %evec +} + +define <vscale x 4 x i32> @vsext_nxv4i8_nxv4i32(<vscale x 4 x i8> %va) { +; CHECK-LABEL: vsext_nxv4i8_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vsext.vf4 v26, v16 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: ret + %evec = sext <vscale x 4 x i8> %va to <vscale x 4 x i32> + ret <vscale x 4 x i32> %evec +} + +define <vscale x 4 x i32> @vzext_nxv4i8_nxv4i32(<vscale x 4 x i8> %va) { +; CHECK-LABEL: vzext_nxv4i8_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vzext.vf4 v26, v16 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: ret + %evec = zext <vscale x 4 x i8> %va to <vscale x 4 x i32> + ret <vscale x 4 x i32> %evec +} + +define <vscale x 4 x i64> @vsext_nxv4i8_nxv4i64(<vscale x 4 x i8> %va) { +; CHECK-LABEL: vsext_nxv4i8_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vsext.vf8 v28, v16 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: ret + %evec = sext <vscale x 4 x i8> %va to <vscale x 4 x i64> + ret <vscale x 4 x i64> %evec +} + +define <vscale x 4 x i64> @vzext_nxv4i8_nxv4i64(<vscale x 4 x i8> %va) { +; CHECK-LABEL: vzext_nxv4i8_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vzext.vf8 v28, v16 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: ret + %evec = zext <vscale x 4 x i8> %va to <vscale x 4 x i64> + ret <vscale x 4 x i64> %evec +} + +define <vscale x 8 x i16> @vsext_nxv8i8_nxv8i16(<vscale x 8 x i8> %va) { +; CHECK-LABEL: vsext_nxv8i8_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vsext.vf2 v26, v16 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: ret + %evec = sext <vscale x 8 x i8> %va to <vscale x 8 x i16> + ret <vscale x 8 x i16> %evec +} + +define <vscale x 8 x i16> @vzext_nxv8i8_nxv8i16(<vscale x 8 x i8> %va) { +; CHECK-LABEL: vzext_nxv8i8_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vzext.vf2 v26, v16 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: ret + %evec = zext <vscale x 8 x i8> %va to <vscale x 8 x i16> + ret <vscale x 8 x i16> %evec +} + +define <vscale x 8 x i32> @vsext_nxv8i8_nxv8i32(<vscale x 8 x i8> %va) { +; CHECK-LABEL: vsext_nxv8i8_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vsext.vf4 v28, v16 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: ret + %evec = sext <vscale x 8 x i8> %va to <vscale x 8 x i32> + ret <vscale x 8 x i32> %evec +} + +define <vscale x 8 x i32> @vzext_nxv8i8_nxv8i32(<vscale x 8 x i8> %va) { +; CHECK-LABEL: vzext_nxv8i8_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vzext.vf4 v28, v16 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: ret + %evec = zext <vscale x 8 x i8> %va to <vscale x 8 x i32> + ret <vscale x 8 x i32> %evec +} + +define <vscale x 8 x i64> @vsext_nxv8i8_nxv8i64(<vscale x 8 x i8> %va) { +; CHECK-LABEL: vsext_nxv8i8_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vsext.vf8 v8, v16 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: ret + %evec = sext <vscale x 8 x i8> %va to <vscale x 8 x i64> + ret <vscale x 8 x i64> %evec +} + +define <vscale x 8 x i64> @vzext_nxv8i8_nxv8i64(<vscale x 8 x i8> %va) { +; CHECK-LABEL: vzext_nxv8i8_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vzext.vf8 v8, v16 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: ret + %evec = zext <vscale x 8 x i8> %va to <vscale x 8 x i64> + ret <vscale x 8 x i64> %evec +} + +define <vscale x 16 x i16> @vsext_nxv16i8_nxv16i16(<vscale x 16 x i8> %va) { +; CHECK-LABEL: vsext_nxv16i8_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vsext.vf2 v28, v16 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: ret + %evec = sext <vscale x 16 x i8> %va to <vscale x 16 x i16> + ret <vscale x 16 x i16> %evec +} + +define <vscale x 16 x i16> @vzext_nxv16i8_nxv16i16(<vscale x 16 x i8> %va) { +; CHECK-LABEL: vzext_nxv16i8_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vzext.vf2 v28, v16 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: ret + %evec = zext <vscale x 16 x i8> %va to <vscale x 16 x i16> + ret <vscale x 16 x i16> %evec +} + +define <vscale x 16 x i32> @vsext_nxv16i8_nxv16i32(<vscale x 16 x i8> %va) { +; CHECK-LABEL: vsext_nxv16i8_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vsext.vf4 v8, v16 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: ret + %evec = sext <vscale x 16 x i8> %va to <vscale x 16 x i32> + ret <vscale x 16 x i32> %evec +} + +define <vscale x 16 x i32> @vzext_nxv16i8_nxv16i32(<vscale x 16 x i8> %va) { +; CHECK-LABEL: vzext_nxv16i8_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vzext.vf4 v8, v16 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: ret + %evec = zext <vscale x 16 x i8> %va to <vscale x 16 x i32> + ret <vscale x 16 x i32> %evec +} + +define <vscale x 32 x i16> @vsext_nxv32i8_nxv32i16(<vscale x 32 x i8> %va) { +; CHECK-LABEL: vsext_nxv32i8_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vsext.vf2 v8, v16 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: ret + %evec = sext <vscale x 32 x i8> %va to <vscale x 32 x i16> + ret <vscale x 32 x i16> %evec +} + +define <vscale x 32 x i16> @vzext_nxv32i8_nxv32i16(<vscale x 32 x i8> %va) { +; CHECK-LABEL: vzext_nxv32i8_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vzext.vf2 v8, v16 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: ret + %evec = zext <vscale x 32 x i8> %va to <vscale x 32 x i16> + ret <vscale x 32 x i16> %evec +} + +define <vscale x 1 x i32> @vsext_nxv1i16_nxv1i32(<vscale x 1 x i16> %va) { +; CHECK-LABEL: vsext_nxv1i16_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsext.vf2 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = sext <vscale x 1 x i16> %va to <vscale x 1 x i32> + ret <vscale x 1 x i32> %evec +} + +define <vscale x 1 x i32> @vzext_nxv1i16_nxv1i32(<vscale x 1 x i16> %va) { +; CHECK-LABEL: vzext_nxv1i16_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vzext.vf2 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = zext <vscale x 1 x i16> %va to <vscale x 1 x i32> + ret <vscale x 1 x i32> %evec +} + +define <vscale x 1 x i64> @vsext_nxv1i16_nxv1i64(<vscale x 1 x i16> %va) { +; CHECK-LABEL: vsext_nxv1i16_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vsext.vf4 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = sext <vscale x 1 x i16> %va to <vscale x 1 x i64> + ret <vscale x 1 x i64> %evec +} + +define <vscale x 1 x i64> @vzext_nxv1i16_nxv1i64(<vscale x 1 x i16> %va) { +; CHECK-LABEL: vzext_nxv1i16_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vzext.vf4 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = zext <vscale x 1 x i16> %va to <vscale x 1 x i64> + ret <vscale x 1 x i64> %evec +} + +define <vscale x 2 x i32> @vsext_nxv2i16_nxv2i32(<vscale x 2 x i16> %va) { +; CHECK-LABEL: vsext_nxv2i16_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vsext.vf2 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = sext <vscale x 2 x i16> %va to <vscale x 2 x i32> + ret <vscale x 2 x i32> %evec +} + +define <vscale x 2 x i32> @vzext_nxv2i16_nxv2i32(<vscale x 2 x i16> %va) { +; CHECK-LABEL: vzext_nxv2i16_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vzext.vf2 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = zext <vscale x 2 x i16> %va to <vscale x 2 x i32> + ret <vscale x 2 x i32> %evec +} + +define <vscale x 2 x i64> @vsext_nxv2i16_nxv2i64(<vscale x 2 x i16> %va) { +; CHECK-LABEL: vsext_nxv2i16_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vsext.vf4 v26, v16 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: ret + %evec = sext <vscale x 2 x i16> %va to <vscale x 2 x i64> + ret <vscale x 2 x i64> %evec +} + +define <vscale x 2 x i64> @vzext_nxv2i16_nxv2i64(<vscale x 2 x i16> %va) { +; CHECK-LABEL: vzext_nxv2i16_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vzext.vf4 v26, v16 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: ret + %evec = zext <vscale x 2 x i16> %va to <vscale x 2 x i64> + ret <vscale x 2 x i64> %evec +} + +define <vscale x 4 x i32> @vsext_nxv4i16_nxv4i32(<vscale x 4 x i16> %va) { +; CHECK-LABEL: vsext_nxv4i16_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vsext.vf2 v26, v16 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: ret + %evec = sext <vscale x 4 x i16> %va to <vscale x 4 x i32> + ret <vscale x 4 x i32> %evec +} + +define <vscale x 4 x i32> @vzext_nxv4i16_nxv4i32(<vscale x 4 x i16> %va) { +; CHECK-LABEL: vzext_nxv4i16_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vzext.vf2 v26, v16 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: ret + %evec = zext <vscale x 4 x i16> %va to <vscale x 4 x i32> + ret <vscale x 4 x i32> %evec +} + +define <vscale x 4 x i64> @vsext_nxv4i16_nxv4i64(<vscale x 4 x i16> %va) { +; CHECK-LABEL: vsext_nxv4i16_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vsext.vf4 v28, v16 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: ret + %evec = sext <vscale x 4 x i16> %va to <vscale x 4 x i64> + ret <vscale x 4 x i64> %evec +} + +define <vscale x 4 x i64> @vzext_nxv4i16_nxv4i64(<vscale x 4 x i16> %va) { +; CHECK-LABEL: vzext_nxv4i16_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vzext.vf4 v28, v16 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: ret + %evec = zext <vscale x 4 x i16> %va to <vscale x 4 x i64> + ret <vscale x 4 x i64> %evec +} + +define <vscale x 8 x i32> @vsext_nxv8i16_nxv8i32(<vscale x 8 x i16> %va) { +; CHECK-LABEL: vsext_nxv8i16_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vsext.vf2 v28, v16 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: ret + %evec = sext <vscale x 8 x i16> %va to <vscale x 8 x i32> + ret <vscale x 8 x i32> %evec +} + +define <vscale x 8 x i32> @vzext_nxv8i16_nxv8i32(<vscale x 8 x i16> %va) { +; CHECK-LABEL: vzext_nxv8i16_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vzext.vf2 v28, v16 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: ret + %evec = zext <vscale x 8 x i16> %va to <vscale x 8 x i32> + ret <vscale x 8 x i32> %evec +} + +define <vscale x 8 x i64> @vsext_nxv8i16_nxv8i64(<vscale x 8 x i16> %va) { +; CHECK-LABEL: vsext_nxv8i16_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vsext.vf4 v8, v16 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: ret + %evec = sext <vscale x 8 x i16> %va to <vscale x 8 x i64> + ret <vscale x 8 x i64> %evec +} + +define <vscale x 8 x i64> @vzext_nxv8i16_nxv8i64(<vscale x 8 x i16> %va) { +; CHECK-LABEL: vzext_nxv8i16_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vzext.vf4 v8, v16 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: ret + %evec = zext <vscale x 8 x i16> %va to <vscale x 8 x i64> + ret <vscale x 8 x i64> %evec +} + +define <vscale x 16 x i32> @vsext_nxv16i16_nxv16i32(<vscale x 16 x i16> %va) { +; CHECK-LABEL: vsext_nxv16i16_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vsext.vf2 v8, v16 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: ret + %evec = sext <vscale x 16 x i16> %va to <vscale x 16 x i32> + ret <vscale x 16 x i32> %evec +} + +define <vscale x 16 x i32> @vzext_nxv16i16_nxv16i32(<vscale x 16 x i16> %va) { +; CHECK-LABEL: vzext_nxv16i16_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vzext.vf2 v8, v16 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: ret + %evec = zext <vscale x 16 x i16> %va to <vscale x 16 x i32> + ret <vscale x 16 x i32> %evec +} + +define <vscale x 1 x i64> @vsext_nxv1i32_nxv1i64(<vscale x 1 x i32> %va) { +; CHECK-LABEL: vsext_nxv1i32_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vsext.vf2 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = sext <vscale x 1 x i32> %va to <vscale x 1 x i64> + ret <vscale x 1 x i64> %evec +} + +define <vscale x 1 x i64> @vzext_nxv1i32_nxv1i64(<vscale x 1 x i32> %va) { +; CHECK-LABEL: vzext_nxv1i32_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vzext.vf2 v25, v16 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: ret + %evec = zext <vscale x 1 x i32> %va to <vscale x 1 x i64> + ret <vscale x 1 x i64> %evec +} + +define <vscale x 2 x i64> @vsext_nxv2i32_nxv2i64(<vscale x 2 x i32> %va) { +; CHECK-LABEL: vsext_nxv2i32_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vsext.vf2 v26, v16 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: ret + %evec = sext <vscale x 2 x i32> %va to <vscale x 2 x i64> + ret <vscale x 2 x i64> %evec +} + +define <vscale x 2 x i64> @vzext_nxv2i32_nxv2i64(<vscale x 2 x i32> %va) { +; CHECK-LABEL: vzext_nxv2i32_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vzext.vf2 v26, v16 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: ret + %evec = zext <vscale x 2 x i32> %va to <vscale x 2 x i64> + ret <vscale x 2 x i64> %evec +} + +define <vscale x 4 x i64> @vsext_nxv4i32_nxv4i64(<vscale x 4 x i32> %va) { +; CHECK-LABEL: vsext_nxv4i32_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vsext.vf2 v28, v16 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: ret + %evec = sext <vscale x 4 x i32> %va to <vscale x 4 x i64> + ret <vscale x 4 x i64> %evec +} + +define <vscale x 4 x i64> @vzext_nxv4i32_nxv4i64(<vscale x 4 x i32> %va) { +; CHECK-LABEL: vzext_nxv4i32_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vzext.vf2 v28, v16 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: ret + %evec = zext <vscale x 4 x i32> %va to <vscale x 4 x i64> + ret <vscale x 4 x i64> %evec +} + +define <vscale x 8 x i64> @vsext_nxv8i32_nxv8i64(<vscale x 8 x i32> %va) { +; CHECK-LABEL: vsext_nxv8i32_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vsext.vf2 v8, v16 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: ret + %evec = sext <vscale x 8 x i32> %va to <vscale x 8 x i64> + ret <vscale x 8 x i64> %evec +} + +define <vscale x 8 x i64> @vzext_nxv8i32_nxv8i64(<vscale x 8 x i32> %va) { +; CHECK-LABEL: vzext_nxv8i32_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vzext.vf2 v8, v16 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: ret + %evec = zext <vscale x 8 x i32> %va to <vscale x 8 x i64> + ret <vscale x 8 x i64> %evec +} + _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits