Author: Fraser Cormack Date: 2021-01-09T11:31:22Z New Revision: 2c442629f0bd210fdb76fa409e131c87387e884d
URL: https://github.com/llvm/llvm-project/commit/2c442629f0bd210fdb76fa409e131c87387e884d DIFF: https://github.com/llvm/llvm-project/commit/2c442629f0bd210fdb76fa409e131c87387e884d.diff LOG: [RISCV] Add tests for scalable constant-folding (NFC) Added: Modified: llvm/test/CodeGen/RISCV/rvv/vadd-sdnode-rv32.ll llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode-rv32.ll Removed: ################################################################################ diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode-rv32.ll index 98b30b565a3b..57006f5f30d4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode-rv32.ll @@ -37,6 +37,23 @@ define <vscale x 1 x i8> @vadd_vx_nxv1i8_1(<vscale x 1 x i8> %va) { ret <vscale x 1 x i8> %vc } +; Test constant adds to see if we can optimize them away for scalable vectors. +; FIXME: We can't. +define <vscale x 1 x i8> @vadd_ii_nxv1i8_1() { +; CHECK-LABEL: vadd_ii_nxv1i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmv.v.i v25, 2 +; CHECK-NEXT: vadd.vi v16, v25, 3 +; CHECK-NEXT: ret + %heada = insertelement <vscale x 1 x i8> undef, i8 2, i32 0 + %splata = shufflevector <vscale x 1 x i8> %heada, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer + %headb = insertelement <vscale x 1 x i8> undef, i8 3, i32 0 + %splatb = shufflevector <vscale x 1 x i8> %headb, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer + %vc = add <vscale x 1 x i8> %splata, %splatb + ret <vscale x 1 x i8> %vc +} + define <vscale x 2 x i8> @vadd_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) { ; CHECK-LABEL: vadd_vx_nxv2i8: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll index ae93e8fbc50f..21fab827232d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll @@ -36,6 +36,36 @@ define <vscale x 1 x i8> @vdiv_vi_nxv1i8_0(<vscale x 1 x i8> %va) { ret <vscale x 1 x i8> %vc } +; Test V/1 to see if we can optimize it away for scalable vectors. +; FIXME: We can't. +define <vscale x 1 x i8> @vdiv_vi_nxv1i8_1(<vscale x 1 x i8> %va) { +; CHECK-LABEL: vdiv_vi_nxv1i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement <vscale x 1 x i8> undef, i8 1, i32 0 + %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer + %vc = sdiv <vscale x 1 x i8> %va, %splat + ret <vscale x 1 x i8> %vc +} + +; Test 0/V to see if we can optimize it away for scalable vectors. +; FIXME: We can't. +define <vscale x 1 x i8> @vdiv_iv_nxv1i8_0(<vscale x 1 x i8> %va) { +; CHECK-LABEL: vdiv_iv_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vdivu.vv v16, v25, v16 +; CHECK-NEXT: ret + %head = insertelement <vscale x 1 x i8> undef, i8 0, i32 0 + %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer + %vc = sdiv <vscale x 1 x i8> %splat, %va + ret <vscale x 1 x i8> %vc +} + define <vscale x 2 x i8> @vdiv_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) { ; CHECK-LABEL: vdiv_vv_nxv2i8: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode-rv32.ll index 0acc0eb4302e..7982e12278fe 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode-rv32.ll @@ -36,6 +36,36 @@ define <vscale x 1 x i8> @vdivu_vi_nxv1i8_0(<vscale x 1 x i8> %va) { ret <vscale x 1 x i8> %vc } +; Test V/1 to see if we can optimize it away for scalable vectors. +; FIXME: We can't. +define <vscale x 1 x i8> @vdivu_vi_nxv1i8_1(<vscale x 1 x i8> %va) { +; CHECK-LABEL: vdivu_vi_nxv1i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement <vscale x 1 x i8> undef, i8 1, i32 0 + %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer + %vc = udiv <vscale x 1 x i8> %va, %splat + ret <vscale x 1 x i8> %vc +} + +; Test 0/V to see if we can optimize it away for scalable vectors. +; FIXME: We can't. +define <vscale x 1 x i8> @vdivu_iv_nxv1i8_0(<vscale x 1 x i8> %va) { +; CHECK-LABEL: vdivu_iv_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vdiv.vv v16, v25, v16 +; CHECK-NEXT: ret + %head = insertelement <vscale x 1 x i8> undef, i8 0, i32 0 + %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer + %vc = udiv <vscale x 1 x i8> %splat, %va + ret <vscale x 1 x i8> %vc +} + define <vscale x 2 x i8> @vdivu_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) { ; CHECK-LABEL: vdivu_vv_nxv2i8: ; CHECK: # %bb.0: _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits