[llvm-branch-commits] [llvm] ea88dfd - [RISCV] Add target specific loop unrolling and peeling preferences

2021-12-07 Thread Michael Berg via llvm-branch-commits

Author: Michael Berg
Date: 2021-12-07T14:31:40-08:00
New Revision: ea88dfda6f90b33f1e1bd7231bf249ac3b73a6f8

URL: 
https://github.com/llvm/llvm-project/commit/ea88dfda6f90b33f1e1bd7231bf249ac3b73a6f8
DIFF: 
https://github.com/llvm/llvm-project/commit/ea88dfda6f90b33f1e1bd7231bf249ac3b73a6f8.diff

LOG: [RISCV] Add target specific loop unrolling and peeling preferences

Both these preference helper functions have initial support with
this change. The loop unrolling preferences are set with initial
settings to control thresholds, size and attributes of loops to
unroll with some tuning done.  The peeling preferences may need
some tuning as well as the initial support looks much like what
other architectures utilize.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D113798

Added: 
llvm/test/Transforms/LoopUnroll/RISCV/unroll.ll

Modified: 
llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h

Removed: 




diff  --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp 
b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index 56f0952fafc9b..54458d1ce6d56 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -162,3 +162,82 @@ InstructionCost RISCVTTIImpl::getGatherScatterOpCost(
   getMemoryOpCost(Opcode, VTy->getElementType(), Alignment, 0, CostKind, 
I);
   return NumLoads * MemOpCost;
 }
+
+void RISCVTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
+   TTI::UnrollingPreferences &UP,
+   OptimizationRemarkEmitter *ORE) {
+  // TODO: More tuning on benchmarks and metrics with changes as needed
+  //   would apply to all settings below to enable performance.
+
+  // Enable Upper bound unrolling universally, not dependant upon the 
conditions
+  // below.
+  UP.UpperBound = true;
+
+  // Disable loop unrolling for Oz and Os.
+  UP.OptSizeThreshold = 0;
+  UP.PartialOptSizeThreshold = 0;
+  if (L->getHeader()->getParent()->hasOptSize())
+return;
+
+  SmallVector ExitingBlocks;
+  L->getExitingBlocks(ExitingBlocks);
+  LLVM_DEBUG(dbgs() << "Loop has:\n"
+<< "Blocks: " << L->getNumBlocks() << "\n"
+<< "Exit blocks: " << ExitingBlocks.size() << "\n");
+
+  // Only allow another exit other than the latch. This acts as an early exit
+  // as it mirrors the profitability calculation of the runtime unroller.
+  if (ExitingBlocks.size() > 2)
+return;
+
+  // Limit the CFG of the loop body for targets with a branch predictor.
+  // Allowing 4 blocks permits if-then-else diamonds in the body.
+  if (L->getNumBlocks() > 4)
+return;
+
+  // Don't unroll vectorized loops, including the remainder loop
+  if (getBooleanLoopAttribute(L, "llvm.loop.isvectorized"))
+return;
+
+  // Scan the loop: don't unroll loops with calls as this could prevent
+  // inlining.
+  InstructionCost Cost = 0;
+  for (auto *BB : L->getBlocks()) {
+for (auto &I : *BB) {
+  // Initial setting - Don't unroll loops containing vectorized
+  // instructions.
+  if (I.getType()->isVectorTy())
+return;
+
+  if (isa(I) || isa(I)) {
+if (const Function *F = cast(I).getCalledFunction()) {
+  if (!isLoweredToCall(F))
+continue;
+}
+return;
+  }
+
+  SmallVector Operands(I.operand_values());
+  Cost +=
+  getUserCost(&I, Operands, TargetTransformInfo::TCK_SizeAndLatency);
+}
+  }
+
+  LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n");
+
+  UP.Partial = true;
+  UP.Runtime = true;
+  UP.UnrollRemainder = true;
+  UP.UnrollAndJam = true;
+  UP.UnrollAndJamInnerLoopThreshold = 60;
+
+  // Force unrolling small loops can be very useful because of the branch
+  // taken cost of the backedge.
+  if (Cost < 12)
+UP.Force = true;
+}
+
+void RISCVTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
+ TTI::PeelingPreferences &PP) {
+  BaseT::getPeelingPreferences(L, SE, PP);
+}

diff  --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h 
b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
index 675681616d6e4..016db9d4c26dc 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
@@ -73,6 +73,13 @@ class RISCVTTIImpl : public BasicTTIImplBase {
 llvm_unreachable("Unsupported register kind");
   }
 
+  void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
+   TTI::UnrollingPreferences &UP,
+   OptimizationRemarkEmitter *ORE);
+
+  void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
+ TTI::PeelingPreferences &PP);
+
   unsigned getMinVectorRegisterBitWidth() const {
 r

[llvm-branch-commits] [llvm] 5633f32 - Propagate MIFlags in table gen

2020-05-13 Thread Michael Berg via llvm-branch-commits

Author: Michael Berg
Date: 2020-05-13T18:25:33-07:00
New Revision: 5633f32102c454edc6f9d333c16ae920a7bac888

URL: 
https://github.com/llvm/llvm-project/commit/5633f32102c454edc6f9d333c16ae920a7bac888
DIFF: 
https://github.com/llvm/llvm-project/commit/5633f32102c454edc6f9d333c16ae920a7bac888.diff

LOG: Propagate MIFlags in table gen

Summary: Add flag propagation to tablegen via OutMIs from originating MI in 
InstructionSelector::executeMatchTable.

Reviewers: dsanders, volkan

Reviewed By: dsanders

Subscribers: llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D74988

Added: 


Modified: 
llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
llvm/test/CodeGen/AArch64/GlobalISel/select-jump-table-brjt.mir

Removed: 




diff  --git a/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h 
b/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
index 9e9e1806bc6f..cb48122047f1 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
@@ -57,6 +57,7 @@ bool InstructionSelector::executeMatchTable(
 
   uint64_t CurrentIdx = 0;
   SmallVector OnFailResumeAt;
+  uint16_t Flags = State.MIs[0]->getFlags();
 
   enum RejectAction { RejectAndGiveUp, RejectAndResume };
   auto handleReject = [&]() -> RejectAction {
@@ -71,6 +72,15 @@ bool InstructionSelector::executeMatchTable(
 return RejectAndResume;
   };
 
+  auto propagateFlags = [&](NewMIVector &OutMIs) {
+if (Flags == MachineInstr::MIFlag::NoFlags)
+  return false;
+for (auto MIB : OutMIs)
+  MIB.setMIFlags(Flags);
+
+return true;
+  };
+
   while (true) {
 assert(CurrentIdx != ~0u && "Invalid MatchTable index");
 int64_t MatcherOpcode = MatchTable[CurrentIdx++];
@@ -1065,6 +1075,7 @@ bool InstructionSelector::executeMatchTable(
 case GIR_Done:
   DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
   dbgs() << CurrentIdx << ": GIR_Done\n");
+  propagateFlags(OutMIs);
   return true;
 
 default:

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-jump-table-brjt.mir 
b/llvm/test/CodeGen/AArch64/GlobalISel/select-jump-table-brjt.mir
index 8e1ee344264c..7139c7b8d94d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-jump-table-brjt.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-jump-table-brjt.mir
@@ -71,12 +71,12 @@ body: |
   ; CHECK:   BR %18
   ; CHECK: bb.2.sw.bb:
   ; CHECK:   successors: %bb.4(0x8000)
-  ; CHECK:   [[ADDWri:%[0-9]+]]:gpr32sp = ADDWri [[COPY]], 42, 0
+  ; CHECK:   [[ADDWri:%[0-9]+]]:gpr32sp = nsw ADDWri [[COPY]], 42, 0
   ; CHECK:   B %bb.4
   ; CHECK: bb.3.sw.bb1:
   ; CHECK:   successors: %bb.4(0x8000)
   ; CHECK:   [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 3
-  ; CHECK:   [[MADDWrrr:%[0-9]+]]:gpr32 = MADDWrrr [[COPY]], [[MOVi32imm]], 
$wzr
+  ; CHECK:   [[MADDWrrr:%[0-9]+]]:gpr32 = nsw MADDWrrr [[COPY]], 
[[MOVi32imm]], $wzr
   ; CHECK: bb.4.return:
   ; CHECK:   [[PHI:%[0-9]+]]:gpr32 = PHI [[MADDWrrr]], %bb.3, [[ADDWri]], 
%bb.2, [[COPY1]], %bb.0, [[COPY2]], %bb.1
   ; CHECK:   $w0 = COPY [[PHI]]



___
llvm-branch-commits mailing list
llvm-branch-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits