[llvm-branch-commits] [llvm] f3f9ce3 - [RISCV] Define vmclr.m/vmset.m intrinsics.

2020-12-28 Thread Zakk Chen via llvm-branch-commits

Author: Zakk Chen
Date: 2020-12-28T18:57:17-08:00
New Revision: f3f9ce3b7948b250bc532818ed76a64cea8b6fbe

URL: 
https://github.com/llvm/llvm-project/commit/f3f9ce3b7948b250bc532818ed76a64cea8b6fbe
DIFF: 
https://github.com/llvm/llvm-project/commit/f3f9ce3b7948b250bc532818ed76a64cea8b6fbe.diff

LOG: [RISCV] Define vmclr.m/vmset.m intrinsics.

Define vmclr.m/vmset.m intrinsics and lower to vmxor.mm/vmxnor.mm.

Ideally all rvv pseudo instructions could be implemented in C header,
but those two instructions don't take an input, codegen can not guarantee
that the source register becomes the same as the destination.

We expand pseduo-v-inst into corresponding v-inst in
RISCVExpandPseudoInsts pass.

Reviewed By: craig.topper, frasercrmck

Differential Revision: https://reviews.llvm.org/D93849

Added: 
llvm/test/CodeGen/RISCV/rvv/vmclr-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vmclr-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vmset-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vmset-rv64.ll

Modified: 
llvm/include/llvm/IR/IntrinsicsRISCV.td
llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td

Removed: 




diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td 
b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index be11b518416c..d72dc5a4dd59 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -404,6 +404,12 @@ let TargetPrefix = "riscv" in {
 [LLVMMatchType<0>, LLVMMatchType<0>,
  LLVMMatchType<0>, llvm_anyint_ty],
 [IntrNoMem]>, RISCVVIntrinsic;
+  // Output: (vector)
+  // Input: (vl)
+  class RISCVNullaryIntrinsic
+: Intrinsic<[llvm_anyvector_ty],
+[llvm_anyint_ty],
+[IntrNoMem]>, RISCVVIntrinsic;
 
   multiclass RISCVUSLoad {
 def "int_riscv_" # NAME : RISCVUSLoad;
@@ -701,6 +707,8 @@ let TargetPrefix = "riscv" in {
   def int_riscv_vmnor: RISCVBinaryAAANoMask;
   def int_riscv_vmornot: RISCVBinaryAAANoMask;
   def int_riscv_vmxnor: RISCVBinaryAAANoMask;
+  def int_riscv_vmclr : RISCVNullaryIntrinsic;
+  def int_riscv_vmset : RISCVNullaryIntrinsic;
 
   defm vpopc : RISCVMaskUnarySOut;
   defm vfirst : RISCVMaskUnarySOut;
@@ -724,9 +732,8 @@ let TargetPrefix = "riscv" in {
[IntrNoMem]>, RISCVVIntrinsic;
   // Output: (vector)
   // Input: (vl)
-  def int_riscv_vid : Intrinsic<[llvm_anyvector_ty],
-[llvm_anyint_ty],
-[IntrNoMem]>, RISCVVIntrinsic;
+  def int_riscv_vid : RISCVNullaryIntrinsic;
+
   // Output: (vector)
   // Input: (maskedoff, mask, vl)
   def int_riscv_vid_mask : Intrinsic<[llvm_anyvector_ty],

diff  --git a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp 
b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
index 660ae915f7b8..5f50892ca886 100644
--- a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
@@ -60,6 +60,8 @@ class RISCVExpandPseudo : public MachineFunctionPass {
   MachineBasicBlock::iterator MBBI,
   MachineBasicBlock::iterator &NextMBBI);
   bool expandVSetVL(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI);
+  bool expandVMSET_VMCLR(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, unsigned Opcode);
 };
 
 char RISCVExpandPseudo::ID = 0;
@@ -102,6 +104,24 @@ bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB,
 return expandLoadTLSGDAddress(MBB, MBBI, NextMBBI);
   case RISCV::PseudoVSETVLI:
 return expandVSetVL(MBB, MBBI);
+  case RISCV::PseudoVMCLR_M_B1:
+  case RISCV::PseudoVMCLR_M_B2:
+  case RISCV::PseudoVMCLR_M_B4:
+  case RISCV::PseudoVMCLR_M_B8:
+  case RISCV::PseudoVMCLR_M_B16:
+  case RISCV::PseudoVMCLR_M_B32:
+  case RISCV::PseudoVMCLR_M_B64:
+// vmclr.m vd => vmxor.mm vd, vd, vd
+return expandVMSET_VMCLR(MBB, MBBI, RISCV::VMXOR_MM);
+  case RISCV::PseudoVMSET_M_B1:
+  case RISCV::PseudoVMSET_M_B2:
+  case RISCV::PseudoVMSET_M_B4:
+  case RISCV::PseudoVMSET_M_B8:
+  case RISCV::PseudoVMSET_M_B16:
+  case RISCV::PseudoVMSET_M_B32:
+  case RISCV::PseudoVMSET_M_B64:
+// vmset.m vd => vmxnor.mm vd, vd, vd
+return expandVMSET_VMCLR(MBB, MBBI, RISCV::VMXNOR_MM);
   }
 
   return false;
@@ -213,6 +233,19 @@ bool RISCVExpandPseudo::expandVSetVL(MachineBasicBlock 
&MBB,
   return true;
 }
 
+bool RISCVExpandPseudo::expandVMSET_VMCLR(MachineBasicBlock &MBB,
+  MachineBasicBlock::iterator MBBI,
+  unsigned Opcode) {
+  DebugLoc DL = MBBI->getDebugLoc();
+  Register DstReg = MBBI->getOperand(0).getReg();
+  const MCInstrDesc &Desc = TII->get(Opcode);
+  BuildMI(MBB, MBBI, DL, Desc, DstReg)
+  .addReg(DstReg, RegState::Undef)
+  .addR

[llvm-branch-commits] [llvm] 15ce0ab - [RISCV] Refine vector load/store tablegen pattern, NFC.

2020-12-15 Thread Zakk Chen via llvm-branch-commits

Author: Zakk Chen
Date: 2020-12-15T18:55:55-08:00
New Revision: 15ce0ab7ac46382ec38e7de59ec40c099b85cbf7

URL: 
https://github.com/llvm/llvm-project/commit/15ce0ab7ac46382ec38e7de59ec40c099b85cbf7
DIFF: 
https://github.com/llvm/llvm-project/commit/15ce0ab7ac46382ec38e7de59ec40c099b85cbf7.diff

LOG: [RISCV] Refine vector load/store tablegen pattern, NFC.

Refine tablegen pattern for vector load/store, and follow
D93012 to separate masked and unmasked definitions for
pseudo load/store instructions.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D93284

Added: 


Modified: 
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir
llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll

Removed: 




diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td 
b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 81c47abab595..25fd7435affd 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -277,6 +277,68 @@ class VPseudo :
   let VLMul = m.value;
 }
 
+class VPseudoUSLoadNoMask:
+  Pseudo<(outs RetClass:$rd),
+ (ins GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>,
+  RISCVVPseudo {
+  let mayLoad = 1;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let usesCustomInserter = 1;
+  let Uses = [VL, VTYPE];
+  let VLIndex = 2;
+  let SEWIndex = 3;
+  let HasDummyMask = 1;
+  let BaseInstr = !cast(PseudoToVInst.VInst);
+}
+
+class VPseudoUSLoadMask:
+  Pseudo<(outs GetVRegNoV0.R:$rd),
+  (ins GetVRegNoV0.R:$merge,
+   GPR:$rs1,
+   VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
+  RISCVVPseudo {
+  let mayLoad = 1;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let usesCustomInserter = 1;
+  let Constraints = "$rd = $merge";
+  let Uses = [VL, VTYPE];
+  let VLIndex = 4;
+  let SEWIndex = 5;
+  let MergeOpIndex = 1;
+  let BaseInstr = !cast(PseudoToVInst.VInst);
+}
+
+class VPseudoUSStoreNoMask:
+  Pseudo<(outs),
+  (ins StClass:$rd, GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>,
+  RISCVVPseudo {
+  let mayLoad = 0;
+  let mayStore = 1;
+  let hasSideEffects = 0;
+  let usesCustomInserter = 1;
+  let Uses = [VL, VTYPE];
+  let VLIndex = 2;
+  let SEWIndex = 3;
+  let HasDummyMask = 1;
+  let BaseInstr = !cast(PseudoToVInst.VInst);
+}
+
+class VPseudoUSStoreMask:
+  Pseudo<(outs),
+  (ins StClass:$rd, GPR:$rs1, VMaskOp:$vm, GPR:$vl, 
ixlenimm:$sew),[]>,
+  RISCVVPseudo {
+  let mayLoad = 0;
+  let mayStore = 1;
+  let hasSideEffects = 0;
+  let usesCustomInserter = 1;
+  let Uses = [VL, VTYPE];
+  let VLIndex = 3;
+  let SEWIndex = 4;
+  let BaseInstr = !cast(PseudoToVInst.VInst);
+}
+
 class VPseudoBinaryNoMask;
+  def "_V_" # LInfo # "_MASK" : VPseudoUSLoadMask;
+}
+  }
+}
+
+multiclass VPseudoUSStore {
+  foreach lmul = MxList.m in {
+defvar LInfo = lmul.MX;
+defvar vreg = lmul.vrclass;
+let VLMul = lmul.value in {
+  def "_V_" # LInfo : VPseudoUSStoreNoMask;
+  def "_V_" # LInfo # "_MASK" : VPseudoUSStoreMask;
+}
+  }
+}
+
 multiclass VPseudoBinary
+{
+  defvar load_instr = !cast("PseudoVLE"#sew#"_V_"#vlmul.MX);
+  defvar store_instr = !cast("PseudoVSE"#sew#"_V_"#vlmul.MX);
+  // Load
+  def : Pat<(type (load reg_rs1:$rs1)),
+(load_instr reg_rs1:$rs1, VLMax, sew)>;
+  // Store
+  def : Pat<(store type:$rs2, reg_rs1:$rs1),
+(store_instr reg_class:$rs2, reg_rs1:$rs1, VLMax, sew)>;
+}
+
+multiclass VPatUSLoadStoreSDNodes {
+  foreach vti = AllVectors in
+defm "" : VPatUSLoadStoreSDNode;
+}
+
 class VPatBinarySDNode
vti.LMul, vti.RegClass, vti.RegClass>;
 }
 
+//===--===//
+// Helpers to define the intrinsic patterns.
+//===--===//
 class VPatBinaryNoMask
 // 7. Vector Loads and Stores
 
//===--===//
 
-// Pseudos.
+// Pseudos Unit-Stride Loads and Stores
 foreach eew = EEWList in {
-  foreach lmul = MxList.m in {
-defvar LInfo = lmul.MX;
-defvar vreg = lmul.vrclass;
-defvar vlmul = lmul.value;
-defvar constraint = "$rd = $merge";
-
-let mayLoad = 1, mayStore = 0, hasSideEffects = 0,
-usesCustomInserter = 1,
-VLMul = vlmul in
-{
-  let Uses = [VL, VTYPE], VLIndex = 4, SEWIndex = 5, MergeOpIndex = 1,
-  Constraints = constraint,
-  BaseInstr = !cast("VLE" # eew # "_V") in
-  def "PseudoVLE" # eew # "_V_" # LInfo
-: Pseudo<(outs vreg:$rd),
- (ins vreg:$merge, GPR:$rs1, VMaskOp:$mask, GPR:$vl,
-  ixlenimm:$sew),
- []>,
-  RISCVVPseudo;
-}
-
-let mayLoad

[llvm-branch-commits] [llvm] 7a2c8be - [RISCV] Define vleff intrinsics.

2020-12-21 Thread Zakk Chen via llvm-branch-commits

Author: Zakk Chen
Date: 2020-12-21T22:05:38-08:00
New Revision: 7a2c8be641ded68b3424b46dbf47f2879a9eaa2e

URL: 
https://github.com/llvm/llvm-project/commit/7a2c8be641ded68b3424b46dbf47f2879a9eaa2e
DIFF: 
https://github.com/llvm/llvm-project/commit/7a2c8be641ded68b3424b46dbf47f2879a9eaa2e.diff

LOG: [RISCV] Define vleff intrinsics.

Define vleff intrinsics and lower to V instructions.

We work with @rogfer01 from BSC to come out this patch.

Authored-by: Roger Ferrer Ibanez 
Co-Authored-by: Zakk Chen 

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D93516

Added: 
llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll

Modified: 
llvm/include/llvm/IR/IntrinsicsRISCV.td
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td

Removed: 




diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td 
b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index dc1d56322191..d3ccd2eaf186 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -378,6 +378,7 @@ let TargetPrefix = "riscv" in {
   }
 
   defm vle : RISCVUSLoad;
+  defm vleff : RISCVUSLoad;
   defm vse : RISCVUSStore;
   defm vlse: RISCVSLoad;
   defm vsse: RISCVSStore;

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td 
b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index a5c5c04542e1..68c656a049ae 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -1599,6 +1599,16 @@ foreach eew = EEWList in {
   defm PseudoVSUXEI # eew : VPseudoIStore;
 }
 
+//===--===//
+// 7.7. Unit-stride Fault-Only-First Loads
+//===--===//
+
+// vleff may update VL register
+let hasSideEffects = 1, Defs = [VL] in
+foreach eew = EEWList in {
+  defm PseudoVLE # eew # FF : VPseudoUSLoad;
+}
+
 
//===--===//
 // Pseudo Instructions
 
//===--===//
@@ -1866,6 +1876,9 @@ foreach vti = AllVectors in
   defm : VPatUSLoad<"int_riscv_vle",
 "PseudoVLE" # vti.SEW,
 vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>;
+  defm : VPatUSLoad<"int_riscv_vleff",
+"PseudoVLE" # vti.SEW # "FF",
+vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>;
   defm : VPatUSStore<"int_riscv_vse",
  "PseudoVSE" # vti.SEW,
  vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>;

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll 
b/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll
new file mode 100644
index ..ea882a5bf587
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll
@@ -0,0 +1,1045 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh,+f,+d 
-verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare  @llvm.riscv.vleff.nxv1i32(
+  *,
+  i32);
+
+define  @intrinsic_vleff_v_nxv1i32_nxv1i32(* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32
+; CHECK:   vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:   vle32ff.v {{v[0-9]+}}, (a0)
+  %a = call  @llvm.riscv.vleff.nxv1i32(
+* %0,
+i32 %1)
+
+  ret  %a
+}
+
+declare  @llvm.riscv.vleff.mask.nxv1i32(
+  ,
+  *,
+  ,
+  i32);
+
+define  @intrinsic_vleff_mask_v_nxv1i32_nxv1i32( %0, * %1,  %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32
+; CHECK:   vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:   vle32ff.v {{v[0-9]+}}, (a0), v0.t
+  %a = call  @llvm.riscv.vleff.mask.nxv1i32(
+ %0,
+* %1,
+ %2,
+i32 %3)
+
+  ret  %a
+}
+
+declare  @llvm.riscv.vleff.nxv2i32(
+  *,
+  i32);
+
+define  @intrinsic_vleff_v_nxv2i32_nxv2i32(* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32
+; CHECK:   vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:   vle32ff.v {{v[0-9]+}}, (a0)
+  %a = call  @llvm.riscv.vleff.nxv2i32(
+* %0,
+i32 %1)
+
+  ret  %a
+}
+
+declare  @llvm.riscv.vleff.mask.nxv2i32(
+  ,
+  *,
+  ,
+  i32);
+
+define  @intrinsic_vleff_mask_v_nxv2i32_nxv2i32( %0, * %1,  %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32
+; CHECK:   vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:   vle32ff.v {{v[0-9]+}}, (a0), v0.t
+  %a = call  @llvm.riscv.vleff.mask.nxv2i32(
+ %0,
+* %1,
+ %2,
+i32 %3)
+
+  ret  %a
+}
+
+declare  @llvm.riscv.vleff.nxv4i32(
+  *,
+  i32);
+
+define  @intrinsic_vleff_v_nxv4i32_nxv4i32(* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32
+; CHECK:   vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,

[llvm-branch-commits] [llvm] 351c216 - [RISCV] Define vector mask-register logical intrinsics.

2020-12-24 Thread Zakk Chen via llvm-branch-commits

Author: Zakk Chen
Date: 2020-12-24T18:59:05-08:00
New Revision: 351c216f36afab3bb88eb74995a39940b85e3812

URL: 
https://github.com/llvm/llvm-project/commit/351c216f36afab3bb88eb74995a39940b85e3812
DIFF: 
https://github.com/llvm/llvm-project/commit/351c216f36afab3bb88eb74995a39940b85e3812.diff

LOG: [RISCV] Define vector mask-register logical intrinsics.

Define vector mask-register logical intrinsics and lower them
to V instructions. Also define pseudo instructions vmmv.m
and vmnot.m.

We work with @rogfer01 from BSC to come out this patch.

Authored-by: Roger Ferrer Ibanez 
Co-Authored-by: Zakk Chen 

Differential Revision: https://reviews.llvm.org/D93705

Added: 
llvm/test/CodeGen/RISCV/rvv/vmand-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vmand-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vmandnot-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vmandnot-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vmnand-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vmnand-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vmnor-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vmnor-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vmor-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vmor-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vmornot-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vmornot-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vmxnor-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vmxnor-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vmxor-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vmxor-rv64.ll

Modified: 
llvm/include/llvm/IR/IntrinsicsRISCV.td
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td

Removed: 




diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td 
b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index cb335e739266..6778b20ac0a8 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -189,6 +189,12 @@ let TargetPrefix = "riscv" in {
  LLVMPointerType>, llvm_anyvector_ty,
  LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 
llvm_anyint_ty],
 [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic;
+  // For destination vector type is the same as first and second source vector.
+  // Input: (vector_in, vector_in, vl)
+  class RISCVBinaryAAANoMask
+: Intrinsic<[llvm_anyvector_ty],
+[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
+[IntrNoMem]>, RISCVVIntrinsic;
   // For destination vector type is the same as first source vector.
   // Input: (vector_in, vector_in/scalar_in, vl)
   class RISCVBinaryAAXNoMask
@@ -643,4 +649,13 @@ let TargetPrefix = "riscv" in {
   defm vfredsum : RISCVReduction;
   defm vfredmin : RISCVReduction;
   defm vfredmax : RISCVReduction;
+
+  def int_riscv_vmand: RISCVBinaryAAANoMask;
+  def int_riscv_vmnand: RISCVBinaryAAANoMask;
+  def int_riscv_vmandnot: RISCVBinaryAAANoMask;
+  def int_riscv_vmxor: RISCVBinaryAAANoMask;
+  def int_riscv_vmor: RISCVBinaryAAANoMask;
+  def int_riscv_vmnor: RISCVBinaryAAANoMask;
+  def int_riscv_vmornot: RISCVBinaryAAANoMask;
+  def int_riscv_vmxnor: RISCVBinaryAAANoMask;
 } // TargetPrefix = "riscv"

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td 
b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 713a289badc2..c23c650973b3 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -188,6 +188,25 @@ class GetIntVTypeInfo
   VTypeInfo Vti = !cast(!subst("VF", "VI", !cast(vti)));
 }
 
+class MTypeInfo {
+  ValueType Mask = Mas;
+  // {SEW, VLMul} values set a valid VType to deal with this mask type.
+  // we assume SEW=8 and set corresponding LMUL.
+  int SEW = 8;
+  LMULInfo LMul = M;
+}
+
+defset list AllMasks = {
+  // vbool_t,  = SEW/LMUL, we assume SEW=8 and corresponding LMUL.
+  def : MTypeInfo;
+  def : MTypeInfo;
+  def : MTypeInfo;
+  def : MTypeInfo;
+  def : MTypeInfo;
+  def : MTypeInfo;
+  def : MTypeInfo;
+}
+
 class VTypeInfoToWide
 {
   VTypeInfo Vti = vti;
@@ -697,6 +716,13 @@ multiclass VPseudoBinaryV_VI {
 defm _VI : VPseudoBinary;
 }
 
+multiclass VPseudoBinaryM_MM {
+  foreach m = MxList.m in
+let VLMul = m.value in {
+  def "_MM_" # m.MX : VPseudoBinaryNoMask;
+}
+}
+
 // We use earlyclobber here due to
 // * The destination EEW is smaller than the source EEW and the overlap is
 //   in the lowest-numbered part of the source register group is legal.
@@ -1297,6 +1323,13 @@ multiclass VPatBinaryV_VI;
 }
 
+multiclass VPatBinaryM_MM {
+  foreach mti = AllMasks in
+def : VPatBinaryNoMask;
+}
+
 multiclass VPatBinaryW_VV vtilist> {
   foreach VtiToWti = vtilist in {
@@ -2053,6 +2086,27 @@ defm PseudoVFREDMIN: VPseudoReductionV_VS;
 defm PseudoVFREDMAX: VPseudoReductionV_VS;
 } // Predicates = [HasStdExtV, HasStdExtF]
 
+//===--===//
+// 16. Vector Mask Instructions
+//===--

[llvm-branch-commits] [llvm] da4a637 - [RISCV] Define vpopc/vfirst intrinsics.

2020-12-24 Thread Zakk Chen via llvm-branch-commits

Author: Zakk Chen
Date: 2020-12-24T19:44:34-08:00
New Revision: da4a637e99170b16e1f15e5cfa5e0b020bd6736d

URL: 
https://github.com/llvm/llvm-project/commit/da4a637e99170b16e1f15e5cfa5e0b020bd6736d
DIFF: 
https://github.com/llvm/llvm-project/commit/da4a637e99170b16e1f15e5cfa5e0b020bd6736d.diff

LOG: [RISCV] Define vpopc/vfirst intrinsics.

Define vpopc/vfirst intrinsics and lower to V instructions.

We work with @rogfer01 from BSC to come out this patch.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D93795

Added: 
llvm/test/CodeGen/RISCV/rvv/vfirst-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vfirst-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vpopc-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vpopc-rv64.ll

Modified: 
llvm/include/llvm/IR/IntrinsicsRISCV.td
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td

Removed: 




diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td 
b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 6778b20ac0a8..5e222e7474d2 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -375,6 +375,20 @@ let TargetPrefix = "riscv" in {
 [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
  LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, 
llvm_anyint_ty],
 [IntrNoMem]>, RISCVVIntrinsic;
+  // For unary operations with scalar type output without mask
+  // Output: (scalar type)
+  // Input: (vector_in, vl)
+  class RISCVMaskUnarySOutNoMask
+: Intrinsic<[llvm_anyint_ty],
+[llvm_anyvector_ty, LLVMMatchType<0>],
+[IntrNoMem]>, RISCVVIntrinsic;
+  // For unary operations with scalar type output with mask
+  // Output: (scalar type)
+  // Input: (vector_in, mask, vl)
+  class RISCVMaskUnarySOutMask
+: Intrinsic<[llvm_anyint_ty],
+[llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<0>],
+[IntrNoMem]>, RISCVVIntrinsic;
 
   multiclass RISCVUSLoad {
 def "int_riscv_" # NAME : RISCVUSLoad;
@@ -451,6 +465,10 @@ let TargetPrefix = "riscv" in {
 def "int_riscv_" # NAME : RISCVReductionNoMask;
 def "int_riscv_" # NAME # "_mask" : RISCVReductionMask;
   }
+  multiclass RISCVMaskUnarySOut {
+def "int_riscv_" # NAME : RISCVMaskUnarySOutNoMask;
+def "int_riscv_" # NAME # "_mask" : RISCVMaskUnarySOutMask;
+  }
 
   defm vle : RISCVUSLoad;
   defm vleff : RISCVUSLoad;
@@ -658,4 +676,8 @@ let TargetPrefix = "riscv" in {
   def int_riscv_vmnor: RISCVBinaryAAANoMask;
   def int_riscv_vmornot: RISCVBinaryAAANoMask;
   def int_riscv_vmxnor: RISCVBinaryAAANoMask;
+
+  defm vpopc : RISCVMaskUnarySOut;
+  defm vfirst : RISCVMaskUnarySOut;
+
 } // TargetPrefix = "riscv"

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td 
b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index c23c650973b3..fd4fb7c3e219 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -188,23 +188,24 @@ class GetIntVTypeInfo
   VTypeInfo Vti = !cast(!subst("VF", "VI", !cast(vti)));
 }
 
-class MTypeInfo {
+class MTypeInfo {
   ValueType Mask = Mas;
   // {SEW, VLMul} values set a valid VType to deal with this mask type.
   // we assume SEW=8 and set corresponding LMUL.
   int SEW = 8;
   LMULInfo LMul = M;
+  string BX = Bx; // Appendix of mask operations.
 }
 
 defset list AllMasks = {
   // vbool_t,  = SEW/LMUL, we assume SEW=8 and corresponding LMUL.
-  def : MTypeInfo;
-  def : MTypeInfo;
-  def : MTypeInfo;
-  def : MTypeInfo;
-  def : MTypeInfo;
-  def : MTypeInfo;
-  def : MTypeInfo;
+  def : MTypeInfo;
+  def : MTypeInfo;
+  def : MTypeInfo;
+  def : MTypeInfo;
+  def : MTypeInfo;
+  def : MTypeInfo;
+  def : MTypeInfo;
 }
 
 class VTypeInfoToWide
@@ -294,8 +295,15 @@ class PseudoToVInst {
  !subst("_MF2", "",
  !subst("_MF4", "",
  !subst("_MF8", "",
+ !subst("_B1", "",
+ !subst("_B2", "",
+ !subst("_B4", "",
+ !subst("_B8", "",
+ !subst("_B16", "",
+ !subst("_B32", "",
+ !subst("_B64", "",
  !subst("_MASK", "",
- !subst("Pseudo", "", PseudoInst);
+ !subst("Pseudo", "", PseudoInst;
 }
 
 // The destination vector register group for a masked vector instruction cannot
@@ -499,6 +507,36 @@ class VPseudoUnaryNoDummyMask(PseudoToVInst.VInst);
 }
 
+class VMaskPseudoUnarySOutNoMask:
+Pseudo<(outs GPR:$rd),
+   (ins VR:$rs1, GPR:$vl, ixlenimm:$sew), []>,
+RISCVVPseudo {
+  let mayLoad = 0;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let usesCustomInserter = 1;
+  let Uses = [VL, VTYPE];
+  let VLIndex = 2;
+  let SEWIndex = 3;
+  let HasDummyMask = 1;
+  let BaseInstr = !cast(PseudoT