[llvm-commits] CVS: llvm/include/llvm/CodeGen/Passes.h LinkAllCodegenComponents.h
Changes in directory llvm/include/llvm/CodeGen: Passes.h updated: 1.27 -> 1.28 LinkAllCodegenComponents.h updated: 1.5 -> 1.6 --- Log message: check in the BigBlock local register allocator --- Diffs of the changes: (+10 -0) LinkAllCodegenComponents.h |1 + Passes.h |9 + 2 files changed, 10 insertions(+) Index: llvm/include/llvm/CodeGen/Passes.h diff -u llvm/include/llvm/CodeGen/Passes.h:1.27 llvm/include/llvm/CodeGen/Passes.h:1.28 --- llvm/include/llvm/CodeGen/Passes.h:1.27 Fri Jun 8 12:18:56 2007 +++ llvm/include/llvm/CodeGen/Passes.h Fri Jun 22 03:27:12 2007 @@ -70,6 +70,15 @@ /// FunctionPass *createLocalRegisterAllocator(); + /// BigBlockRegisterAllocation Pass - The BigBlock register allocator + /// munches single basic blocks at a time, like the local register + /// allocator. While the BigBlock allocator is a little slower, and uses + /// somewhat more memory than the local register allocator, it tends to + /// yield the best allocations (of any of the allocators) for blocks that + /// have hundreds or thousands of instructions in sequence. + /// + FunctionPass *createBigBlockRegisterAllocator(); + /// LinearScanRegisterAllocation Pass - This pass implements the linear scan /// register allocation algorithm, a global register allocator. /// Index: llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h diff -u llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h:1.5 llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h:1.6 --- llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h:1.5Sun Dec 17 05:13:13 2006 +++ llvm/include/llvm/CodeGen/LinkAllCodegenComponents.hFri Jun 22 03:27:12 2007 @@ -30,6 +30,7 @@ (void) llvm::createSimpleRegisterAllocator(); (void) llvm::createLocalRegisterAllocator(); + (void) llvm::createBigBlockRegisterAllocator(); (void) llvm::createLinearScanRegisterAllocator(); (void) llvm::createBFS_DAGScheduler(NULL, NULL, NULL); ___ llvm-commits mailing list llvm-commits@cs.uiuc.edu http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
[llvm-commits] CVS: llvm/lib/CodeGen/RegAllocBigBlock.cpp
Changes in directory llvm/lib/CodeGen: RegAllocBigBlock.cpp added (r1.1) --- Log message: check in the BigBlock local register allocator --- Diffs of the changes: (+852 -0) RegAllocBigBlock.cpp | 852 +++ 1 files changed, 852 insertions(+) Index: llvm/lib/CodeGen/RegAllocBigBlock.cpp diff -c /dev/null llvm/lib/CodeGen/RegAllocBigBlock.cpp:1.1 *** /dev/null Fri Jun 22 03:27:22 2007 --- llvm/lib/CodeGen/RegAllocBigBlock.cpp Fri Jun 22 03:27:12 2007 *** *** 0 --- 1,852 + //===- RegAllocBigBlock.cpp - A register allocator for large basic blocks -===// + // + // The LLVM Compiler Infrastructure + // + // This file was developed by the LLVM research group and is distributed under + // the University of Illinois Open Source License. See LICENSE.TXT for details. + // + //===--===// + // + // This register allocator is derived from RegAllocLocal.cpp. Like it, this + // allocator works on one basic block at a time, oblivious to others. + // However, the algorithm used here is suited for long blocks of + // instructions - registers are spilled by greedily choosing those holding + // values that will not be needed for the longest amount of time. This works + // particularly well for blocks with 10 or more times as many instructions + // as machine registers, but can be used for general code. + // + //===--===// + // + // TODO: - automagically invoke linearscan for (groups of) small BBs? + // - break ties when picking regs? (probably not worth it in a + // JIT context) + // + //===--===// + + #define DEBUG_TYPE "regalloc" + #include "llvm/BasicBlock.h" + #include "llvm/CodeGen/Passes.h" + #include "llvm/CodeGen/MachineFunctionPass.h" + #include "llvm/CodeGen/MachineInstr.h" + #include "llvm/CodeGen/SSARegMap.h" + #include "llvm/CodeGen/MachineFrameInfo.h" + #include "llvm/CodeGen/LiveVariables.h" + #include "llvm/CodeGen/RegAllocRegistry.h" + #include "llvm/Target/TargetInstrInfo.h" + #include "llvm/Target/TargetMachine.h" + #include "llvm/Support/CommandLine.h" + #include "llvm/Support/Debug.h" + #include "llvm/Support/Compiler.h" + #include "llvm/ADT/IndexedMap.h" + #include "llvm/ADT/DenseMap.h" + #include "llvm/ADT/SmallVector.h" + #include "llvm/ADT/Statistic.h" + #include + using namespace llvm; + + STATISTIC(NumStores, "Number of stores added"); + STATISTIC(NumLoads , "Number of loads added"); + STATISTIC(NumFolded, "Number of loads/stores folded into instructions"); + + namespace { + static RegisterRegAlloc + bigBlockRegAlloc("bigblock", " Big-block register allocator", + createBigBlockRegisterAllocator); + + struct VRegKeyInfo { + static inline unsigned getEmptyKey() { return -1U; } + static inline unsigned getTombstoneKey() { return -2U; } + static unsigned getHashValue(const unsigned &Key) { return Key; } + }; + + class VISIBILITY_HIDDEN RABigBlock : public MachineFunctionPass { + public: + static char ID; + RABigBlock() : MachineFunctionPass((intptr_t)&ID) {} + private: + const TargetMachine *TM; + MachineFunction *MF; + const MRegisterInfo *RegInfo; + LiveVariables *LV; + + // InsnTimes - maps machine instructions to their "execute times" + std::map InsnTimes; + + // VRegReadTable - maps VRegs in a BB to the set of times they are read + DenseMap*, VRegKeyInfo> VRegReadTable; + + // StackSlotForVirtReg - Maps virtual regs to the frame index where these + // values are spilled. + std::map StackSlotForVirtReg; + + // Virt2PhysRegMap - This map contains entries for each virtual register + // that is currently available in a physical register. + IndexedMap Virt2PhysRegMap; + + unsigned &getVirt2PhysRegMapSlot(unsigned VirtReg) { + return Virt2PhysRegMap[VirtReg]; + } + + // PhysRegsUsed - This array is effectively a map, containing entries for + // each physical register that currently has a value (ie, it is in + // Virt2PhysRegMap). The value mapped to is the virtual register + // corresponding to the physical register (the inverse of the + // Virt2PhysRegMap), or 0. The value is set to 0 if this register is pinned + // because it is used by a future instruction, and to -2 if it is not + // allocatable. If the entry for a physical register is -1, then the + // physical register is "not in the map". + // + std::vector PhysRegsUsed; + + // PhysRegsUseOrder - This contains a list of the physical registers that + // currently have a virtual register value in them. This list provides an + // ordering of registers, imposing a reallocation order. This list is only + // used if all registe
Re: [llvm-commits] CVS: llvm/lib/CodeGen/RegAllocBigBlock.cpp
Hi Duraid .. This looks nifty. Thanks for adding it. Some minor comments ... Reid. Duraid Madina wrote: >Changes in directory llvm/lib/CodeGen: > >RegAllocBigBlock.cpp added (r1.1) >--- >Log message: > >check in the BigBlock local register allocator > > > >--- >Diffs of the changes: (+852 -0) > > RegAllocBigBlock.cpp | 852 > +++ > 1 files changed, 852 insertions(+) > > >Index: llvm/lib/CodeGen/RegAllocBigBlock.cpp >diff -c /dev/null llvm/lib/CodeGen/RegAllocBigBlock.cpp:1.1 >*** /dev/null Fri Jun 22 03:27:22 2007 >--- llvm/lib/CodeGen/RegAllocBigBlock.cpp Fri Jun 22 03:27:12 2007 >*** >*** 0 >--- 1,852 >+ //===- RegAllocBigBlock.cpp - A register allocator for large basic blocks >-===// >+ // >+ // The LLVM Compiler Infrastructure >+ // >+ // This file was developed by the LLVM research group and is distributed >under > > Really? Wasn't the author Duraid? >+ // the University of Illinois Open Source License. See LICENSE.TXT for >details. >+ // >+ >//===--===// >+ // >+ // This register allocator is derived from RegAllocLocal.cpp. Like it, this >+ // allocator works on one basic block at a time, oblivious to others. >+ // However, the algorithm used here is suited for long blocks of >+ // instructions - registers are spilled by greedily choosing those holding >+ // values that will not be needed for the longest amount of time. This works >+ // particularly well for blocks with 10 or more times as many instructions >+ // as machine registers, but can be used for general code. >+ // >+ >//===--===// >+ // >+ // TODO: - automagically invoke linearscan for (groups of) small BBs? >+ // - break ties when picking regs? (probably not worth it in a >+ // JIT context) >+ // >+ >//===--===// > > This entire comment block belongs above the RABigBlock class and should be a doxygen comment (3 /). At this file level the comment should just say "This file implements the RABigBlock class". >+ >+ #define DEBUG_TYPE "regalloc" > > >+ #include "llvm/BasicBlock.h" >+ #include "llvm/CodeGen/Passes.h" >+ #include "llvm/CodeGen/MachineFunctionPass.h" >+ #include "llvm/CodeGen/MachineInstr.h" >+ #include "llvm/CodeGen/SSARegMap.h" >+ #include "llvm/CodeGen/MachineFrameInfo.h" >+ #include "llvm/CodeGen/LiveVariables.h" >+ #include "llvm/CodeGen/RegAllocRegistry.h" >+ #include "llvm/Target/TargetInstrInfo.h" >+ #include "llvm/Target/TargetMachine.h" >+ #include "llvm/Support/CommandLine.h" >+ #include "llvm/Support/Debug.h" >+ #include "llvm/Support/Compiler.h" >+ #include "llvm/ADT/IndexedMap.h" >+ #include "llvm/ADT/DenseMap.h" >+ #include "llvm/ADT/SmallVector.h" >+ #include "llvm/ADT/Statistic.h" >+ #include >+ using namespace llvm; >+ >+ STATISTIC(NumStores, "Number of stores added"); >+ STATISTIC(NumLoads , "Number of loads added"); >+ STATISTIC(NumFolded, "Number of loads/stores folded into instructions"); > > >+ >+ namespace { >+ static RegisterRegAlloc >+ bigBlockRegAlloc("bigblock", " Big-block register allocator", >+ createBigBlockRegisterAllocator); >+ >+ struct VRegKeyInfo { >+ static inline unsigned getEmptyKey() { return -1U; } >+ static inline unsigned getTombstoneKey() { return -2U; } >+ static unsigned getHashValue(const unsigned &Key) { return Key; } >+ }; > > This struct needs a doxygen comment >+ >+ class VISIBILITY_HIDDEN RABigBlock : public MachineFunctionPass { > > This is where you need to move the big comment at the start of the file to. >+ public: >+ static char ID; > > Please add a doxygen comment for this variable. >+ RABigBlock() : MachineFunctionPass((intptr_t)&ID) {} >+ private: >+ const TargetMachine *TM; >+ MachineFunction *MF; >+ const MRegisterInfo *RegInfo; >+ LiveVariables *LV; > > Please use a ///< comment to describe each of these member variables like you do for the ones that follow. >+ >+ // InsnTimes - maps machine instructions to their "execute times" >+ std::map InsnTimes; >+ >+ // VRegReadTable - maps VRegs in a BB to the set of times they are read >+ DenseMap*, VRegKeyInfo> VRegReadTable; >+ >+ // StackSlotForVirtReg - Maps virtual regs to the frame index where these >+ // values are spilled. >+ std::map StackSlotForVirtReg; >+ >+ // Virt2PhysRegMap - This map contains entries for each virtual register >+ // that is currently available in a physical register. >+ IndexedMap Virt2PhysRegMap; > > Please make the above comments into doxygen comments by using /// >+ >+ unsigned &getVirt2PhysRegMapSlot(unsigned VirtReg) { >+ return Virt2PhysRegMap[VirtReg]; >+ } > > This is mis-placed. Please move this fun
[llvm-commits] CVS: llvm/lib/Target/ARM/ARMISelLowering.cpp ARMISelLowering.h
Changes in directory llvm/lib/Target/ARM: ARMISelLowering.cpp updated: 1.58 -> 1.59 ARMISelLowering.h updated: 1.17 -> 1.18 --- Log message: Move ComputeMaskedBits, MaskedValueIsZero, and ComputeNumSignBits from TargetLowering to SelectionDAG so that they have more convenient access to the current DAG, in preparation for the ValueType routines being changed from standalone functions to members of SelectionDAG for the pre-legalize vector type changes. --- Diffs of the changes: (+9 -8) ARMISelLowering.cpp | 16 ARMISelLowering.h |1 + 2 files changed, 9 insertions(+), 8 deletions(-) Index: llvm/lib/Target/ARM/ARMISelLowering.cpp diff -u llvm/lib/Target/ARM/ARMISelLowering.cpp:1.58 llvm/lib/Target/ARM/ARMISelLowering.cpp:1.59 --- llvm/lib/Target/ARM/ARMISelLowering.cpp:1.58Tue Jun 19 18:55:02 2007 +++ llvm/lib/Target/ARM/ARMISelLowering.cpp Fri Jun 22 09:59:07 2007 @@ -1254,9 +1254,8 @@ SDOperand RL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(1), DAG.getConstant(0, MVT::i32)); - const TargetLowering &TL = DAG.getTargetLoweringInfo(); - unsigned LHSSB = TL.ComputeNumSignBits(Op.getOperand(0)); - unsigned RHSSB = TL.ComputeNumSignBits(Op.getOperand(1)); + unsigned LHSSB = DAG.ComputeNumSignBits(Op.getOperand(0)); + unsigned RHSSB = DAG.ComputeNumSignBits(Op.getOperand(1)); SDOperand Lo, Hi; // Figure out how to lower this multiply. @@ -1265,8 +1264,8 @@ Lo = DAG.getNode(ISD::MUL, MVT::i32, LL, RL); Hi = DAG.getNode(ISD::MULHS, MVT::i32, LL, RL); } else if (LHSSB == 32 && RHSSB == 32 && - TL.MaskedValueIsZero(Op.getOperand(0), 0xULL) && - TL.MaskedValueIsZero(Op.getOperand(1), 0xULL)) { + DAG.MaskedValueIsZero(Op.getOperand(0), 0xULL) && + DAG.MaskedValueIsZero(Op.getOperand(1), 0xULL)) { // If the inputs are zero extended, use mulhu. Lo = DAG.getNode(ISD::MUL, MVT::i32, LL, RL); Hi = DAG.getNode(ISD::MULHU, MVT::i32, LL, RL); @@ -1757,6 +1756,7 @@ uint64_t Mask, uint64_t &KnownZero, uint64_t &KnownOne, + const SelectionDAG &DAG, unsigned Depth) const { KnownZero = 0; KnownOne = 0; @@ -1764,12 +1764,12 @@ default: break; case ARMISD::CMOV: { // Bits are known zero/one if known on the LHS and RHS. -ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); +DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); if (KnownZero == 0 && KnownOne == 0) return; uint64_t KnownZeroRHS, KnownOneRHS; -ComputeMaskedBits(Op.getOperand(1), Mask, - KnownZeroRHS, KnownOneRHS, Depth+1); +DAG.ComputeMaskedBits(Op.getOperand(1), Mask, + KnownZeroRHS, KnownOneRHS, Depth+1); KnownZero &= KnownZeroRHS; KnownOne &= KnownOneRHS; return; Index: llvm/lib/Target/ARM/ARMISelLowering.h diff -u llvm/lib/Target/ARM/ARMISelLowering.h:1.17 llvm/lib/Target/ARM/ARMISelLowering.h:1.18 --- llvm/lib/Target/ARM/ARMISelLowering.h:1.17 Tue Jun 19 16:05:09 2007 +++ llvm/lib/Target/ARM/ARMISelLowering.h Fri Jun 22 09:59:07 2007 @@ -108,6 +108,7 @@ uint64_t Mask, uint64_t &KnownZero, uint64_t &KnownOne, +const SelectionDAG &DAG, unsigned Depth) const; ConstraintType getConstraintType(const std::string &Constraint) const; std::pair ___ llvm-commits mailing list llvm-commits@cs.uiuc.edu http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
[llvm-commits] CVS: llvm/lib/Target/X86/X86ISelDAGToDAG.cpp X86ISelLowering.cpp X86ISelLowering.h
Changes in directory llvm/lib/Target/X86: X86ISelDAGToDAG.cpp updated: 1.149 -> 1.150 X86ISelLowering.cpp updated: 1.413 -> 1.414 X86ISelLowering.h updated: 1.100 -> 1.101 --- Log message: Move ComputeMaskedBits, MaskedValueIsZero, and ComputeNumSignBits from TargetLowering to SelectionDAG so that they have more convenient access to the current DAG, in preparation for the ValueType routines being changed from standalone functions to members of SelectionDAG for the pre-legalize vector type changes. --- Diffs of the changes: (+3 -1) X86ISelDAGToDAG.cpp |2 +- X86ISelLowering.cpp |1 + X86ISelLowering.h |1 + 3 files changed, 3 insertions(+), 1 deletion(-) Index: llvm/lib/Target/X86/X86ISelDAGToDAG.cpp diff -u llvm/lib/Target/X86/X86ISelDAGToDAG.cpp:1.149 llvm/lib/Target/X86/X86ISelDAGToDAG.cpp:1.150 --- llvm/lib/Target/X86/X86ISelDAGToDAG.cpp:1.149 Wed Apr 11 17:29:46 2007 +++ llvm/lib/Target/X86/X86ISelDAGToDAG.cpp Fri Jun 22 09:59:07 2007 @@ -744,7 +744,7 @@ // On x86-64, the resultant disp must fit in 32-bits. isInt32(AM.Disp + CN->getSignExtended()) && // Check to see if the LHS & C is zero. -TLI.MaskedValueIsZero(N.getOperand(0), CN->getValue())) { +CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getValue())) { AM.Disp += CN->getValue(); return false; } Index: llvm/lib/Target/X86/X86ISelLowering.cpp diff -u llvm/lib/Target/X86/X86ISelLowering.cpp:1.413 llvm/lib/Target/X86/X86ISelLowering.cpp:1.414 --- llvm/lib/Target/X86/X86ISelLowering.cpp:1.413 Mon Jun 18 19:13:10 2007 +++ llvm/lib/Target/X86/X86ISelLowering.cpp Fri Jun 22 09:59:07 2007 @@ -4506,6 +4506,7 @@ uint64_t Mask, uint64_t &KnownZero, uint64_t &KnownOne, + const SelectionDAG &DAG, unsigned Depth) const { unsigned Opc = Op.getOpcode(); assert((Opc >= ISD::BUILTIN_OP_END || Index: llvm/lib/Target/X86/X86ISelLowering.h diff -u llvm/lib/Target/X86/X86ISelLowering.h:1.100 llvm/lib/Target/X86/X86ISelLowering.h:1.101 --- llvm/lib/Target/X86/X86ISelLowering.h:1.100 Tue Apr 24 16:16:55 2007 +++ llvm/lib/Target/X86/X86ISelLowering.h Fri Jun 22 09:59:07 2007 @@ -319,6 +319,7 @@ uint64_t Mask, uint64_t &KnownZero, uint64_t &KnownOne, +const SelectionDAG &DAG, unsigned Depth = 0) const; SDOperand getReturnAddressFrameIndex(SelectionDAG &DAG); ___ llvm-commits mailing list llvm-commits@cs.uiuc.edu http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
[llvm-commits] CVS: llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp
Changes in directory llvm/lib/Target/Sparc: SparcISelDAGToDAG.cpp updated: 1.122 -> 1.123 --- Log message: Move ComputeMaskedBits, MaskedValueIsZero, and ComputeNumSignBits from TargetLowering to SelectionDAG so that they have more convenient access to the current DAG, in preparation for the ValueType routines being changed from standalone functions to members of SelectionDAG for the pre-legalize vector type changes. --- Diffs of the changes: (+6 -2) SparcISelDAGToDAG.cpp |8 ++-- 1 files changed, 6 insertions(+), 2 deletions(-) Index: llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp diff -u llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp:1.122 llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp:1.123 --- llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp:1.122 Wed Mar 7 10:25:09 2007 +++ llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp Fri Jun 22 09:59:07 2007 @@ -112,6 +112,7 @@ uint64_t Mask, uint64_t &KnownZero, uint64_t &KnownOne, +const SelectionDAG &DAG, unsigned Depth = 0) const; virtual std::vector @@ -261,6 +262,7 @@ uint64_t Mask, uint64_t &KnownZero, uint64_t &KnownOne, + const SelectionDAG &DAG, unsigned Depth) const { uint64_t KnownZero2, KnownOne2; KnownZero = KnownOne = 0; // Don't know anything. @@ -269,8 +271,10 @@ default: break; case SPISD::SELECT_ICC: case SPISD::SELECT_FCC: -ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1); -ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1); +DAG.ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, + Depth+1); +DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, + Depth+1); assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); ___ llvm-commits mailing list llvm-commits@cs.uiuc.edu http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
[llvm-commits] CVS: llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp PPCISelLowering.cpp PPCISelLowering.h
Changes in directory llvm/lib/Target/PowerPC: PPCISelDAGToDAG.cpp updated: 1.230 -> 1.231 PPCISelLowering.cpp updated: 1.274 -> 1.275 PPCISelLowering.h updated: 1.65 -> 1.66 --- Log message: Move ComputeMaskedBits, MaskedValueIsZero, and ComputeNumSignBits from TargetLowering to SelectionDAG so that they have more convenient access to the current DAG, in preparation for the ValueType routines being changed from standalone functions to members of SelectionDAG for the pre-legalize vector type changes. --- Diffs of the changes: (+8 -6) PPCISelDAGToDAG.cpp |4 ++-- PPCISelLowering.cpp |9 + PPCISelLowering.h |1 + 3 files changed, 8 insertions(+), 6 deletions(-) Index: llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp diff -u llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp:1.230 llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp:1.231 --- llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp:1.230 Mon Apr 2 00:59:42 2007 +++ llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp Fri Jun 22 09:59:07 2007 @@ -424,8 +424,8 @@ SDOperand Op1 = N->getOperand(1); uint64_t LKZ, LKO, RKZ, RKO; - TLI.ComputeMaskedBits(Op0, 0xULL, LKZ, LKO); - TLI.ComputeMaskedBits(Op1, 0xULL, RKZ, RKO); + CurDAG->ComputeMaskedBits(Op0, 0xULL, LKZ, LKO); + CurDAG->ComputeMaskedBits(Op1, 0xULL, RKZ, RKO); unsigned TargetMask = LKZ; unsigned InsertMask = RKZ; Index: llvm/lib/Target/PowerPC/PPCISelLowering.cpp diff -u llvm/lib/Target/PowerPC/PPCISelLowering.cpp:1.274 llvm/lib/Target/PowerPC/PPCISelLowering.cpp:1.275 --- llvm/lib/Target/PowerPC/PPCISelLowering.cpp:1.274 Tue Jun 19 00:46:06 2007 +++ llvm/lib/Target/PowerPC/PPCISelLowering.cpp Fri Jun 22 09:59:07 2007 @@ -688,10 +688,10 @@ // disjoint. uint64_t LHSKnownZero, LHSKnownOne; uint64_t RHSKnownZero, RHSKnownOne; -ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne); +DAG.ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne); if (LHSKnownZero) { - ComputeMaskedBits(N.getOperand(1), ~0U, RHSKnownZero, RHSKnownOne); + DAG.ComputeMaskedBits(N.getOperand(1), ~0U, RHSKnownZero, RHSKnownOne); // If all of the bits are known zero on the LHS or RHS, the add won't // carry. if ((LHSKnownZero | RHSKnownZero) == ~0U) { @@ -742,7 +742,7 @@ // (for better address arithmetic) if the LHS and RHS of the OR are // provably disjoint. uint64_t LHSKnownZero, LHSKnownOne; - ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne); + DAG.ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne); if ((LHSKnownZero|~(unsigned)imm) == ~0U) { // If all of the bits are known zero on the LHS or RHS, the add won't // carry. @@ -850,7 +850,7 @@ // (for better address arithmetic) if the LHS and RHS of the OR are // provably disjoint. uint64_t LHSKnownZero, LHSKnownOne; - ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne); + DAG.ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne); if ((LHSKnownZero|~(unsigned)imm) == ~0U) { // If all of the bits are known zero on the LHS or RHS, the add won't // carry. @@ -3235,6 +3235,7 @@ uint64_t Mask, uint64_t &KnownZero, uint64_t &KnownOne, + const SelectionDAG &DAG, unsigned Depth) const { KnownZero = 0; KnownOne = 0; Index: llvm/lib/Target/PowerPC/PPCISelLowering.h diff -u llvm/lib/Target/PowerPC/PPCISelLowering.h:1.65 llvm/lib/Target/PowerPC/PPCISelLowering.h:1.66 --- llvm/lib/Target/PowerPC/PPCISelLowering.h:1.65 Tue Apr 3 08:59:52 2007 +++ llvm/lib/Target/PowerPC/PPCISelLowering.h Fri Jun 22 09:59:07 2007 @@ -230,6 +230,7 @@ uint64_t Mask, uint64_t &KnownZero, uint64_t &KnownOne, +const SelectionDAG &DAG, unsigned Depth = 0) const; virtual MachineBasicBlock *InsertAtEndOfBasicBlock(MachineInstr *MI, ___ llvm-commits mailing list llvm-commits@cs.uiuc.edu http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
[llvm-commits] CVS: llvm/include/llvm/CodeGen/SelectionDAG.h
Changes in directory llvm/include/llvm/CodeGen: SelectionDAG.h updated: 1.150 -> 1.151 --- Log message: Move ComputeMaskedBits, MaskedValueIsZero, and ComputeNumSignBits from TargetLowering to SelectionDAG so that they have more convenient access to the current DAG, in preparation for the ValueType routines being changed from standalone functions to members of SelectionDAG for the pre-legalize vector type changes. --- Diffs of the changes: (+23 -0) SelectionDAG.h | 23 +++ 1 files changed, 23 insertions(+) Index: llvm/include/llvm/CodeGen/SelectionDAG.h diff -u llvm/include/llvm/CodeGen/SelectionDAG.h:1.150 llvm/include/llvm/CodeGen/SelectionDAG.h:1.151 --- llvm/include/llvm/CodeGen/SelectionDAG.h:1.150 Sun Apr 22 18:15:29 2007 +++ llvm/include/llvm/CodeGen/SelectionDAG.hFri Jun 22 09:59:07 2007 @@ -465,6 +465,29 @@ SDOperand FoldSetCC(MVT::ValueType VT, SDOperand N1, SDOperand N2, ISD::CondCode Cond); + /// MaskedValueIsZero - Return true if 'Op & Mask' is known to be zero. We + /// use this predicate to simplify operations downstream. Op and Mask are + /// known to be the same type. + bool MaskedValueIsZero(SDOperand Op, uint64_t Mask, unsigned Depth = 0) +const; + + /// ComputeMaskedBits - Determine which of the bits specified in Mask are + /// known to be either zero or one and return them in the KnownZero/KnownOne + /// bitsets. This code only analyzes bits in Mask, in order to short-circuit + /// processing. Targets can implement the computeMaskedBitsForTargetNode + /// method in the TargetLowering class to allow target nodes to be understood. + void ComputeMaskedBits(SDOperand Op, uint64_t Mask, uint64_t &KnownZero, + uint64_t &KnownOne, unsigned Depth = 0) const; + + /// ComputeNumSignBits - Return the number of times the sign bit of the + /// register is replicated into the other bits. We know that at least 1 bit + /// is always equal to the sign bit (itself), but other cases can give us + /// information. For example, immediately after an "SRA X, 2", we know that + /// the top 3 bits are all equal to each other, so we return 3. Targets can + /// implement the ComputeNumSignBitsForTarget method in the TargetLowering + /// class to allow target nodes to be understood. + unsigned ComputeNumSignBits(SDOperand Op, unsigned Depth = 0) const; + private: void RemoveNodeFromCSEMaps(SDNode *N); SDNode *AddNonLeafNodeToCSEMaps(SDNode *N); ___ llvm-commits mailing list llvm-commits@cs.uiuc.edu http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
[llvm-commits] CVS: llvm/include/llvm/Target/TargetLowering.h
Changes in directory llvm/include/llvm/Target: TargetLowering.h updated: 1.128 -> 1.129 --- Log message: Move ComputeMaskedBits, MaskedValueIsZero, and ComputeNumSignBits from TargetLowering to SelectionDAG so that they have more convenient access to the current DAG, in preparation for the ValueType routines being changed from standalone functions to members of SelectionDAG for the pre-legalize vector type changes. --- Diffs of the changes: (+1 -21) TargetLowering.h | 22 +- 1 files changed, 1 insertion(+), 21 deletions(-) Index: llvm/include/llvm/Target/TargetLowering.h diff -u llvm/include/llvm/Target/TargetLowering.h:1.128 llvm/include/llvm/Target/TargetLowering.h:1.129 --- llvm/include/llvm/Target/TargetLowering.h:1.128 Thu Jun 21 09:42:22 2007 +++ llvm/include/llvm/Target/TargetLowering.h Fri Jun 22 09:59:07 2007 @@ -494,20 +494,6 @@ bool ShrinkDemandedConstant(SDOperand Op, uint64_t Demanded); }; - /// MaskedValueIsZero - Return true if 'Op & Mask' is known to be zero. We - /// use this predicate to simplify operations downstream. Op and Mask are - /// known to be the same type. - bool MaskedValueIsZero(SDOperand Op, uint64_t Mask, unsigned Depth = 0) -const; - - /// ComputeMaskedBits - Determine which of the bits specified in Mask are - /// known to be either zero or one and return them in the KnownZero/KnownOne - /// bitsets. This code only analyzes bits in Mask, in order to short-circuit - /// processing. Targets can implement the computeMaskedBitsForTargetNode - /// method, to allow target nodes to be understood. - void ComputeMaskedBits(SDOperand Op, uint64_t Mask, uint64_t &KnownZero, - uint64_t &KnownOne, unsigned Depth = 0) const; - /// SimplifyDemandedBits - Look at Op. At this point, we know that only the /// DemandedMask bits of the result of Op are ever used downstream. If we can /// use this information to simplify Op, create a new simplified DAG node and @@ -527,15 +513,9 @@ uint64_t Mask, uint64_t &KnownZero, uint64_t &KnownOne, + const SelectionDAG &DAG, unsigned Depth = 0) const; - /// ComputeNumSignBits - Return the number of times the sign bit of the - /// register is replicated into the other bits. We know that at least 1 bit - /// is always equal to the sign bit (itself), but other cases can give us - /// information. For example, immediately after an "SRA X, 2", we know that - /// the top 3 bits are all equal to each other, so we return 3. - unsigned ComputeNumSignBits(SDOperand Op, unsigned Depth = 0) const; - /// ComputeNumSignBitsForTargetNode - This method can be implemented by /// targets that want to expose additional information about sign bits to the /// DAG Combiner. ___ llvm-commits mailing list llvm-commits@cs.uiuc.edu http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
[llvm-commits] CVS: llvm/lib/Target/Alpha/AlphaISelDAGToDAG.cpp
Changes in directory llvm/lib/Target/Alpha: AlphaISelDAGToDAG.cpp updated: 1.70 -> 1.71 --- Log message: Move ComputeMaskedBits, MaskedValueIsZero, and ComputeNumSignBits from TargetLowering to SelectionDAG so that they have more convenient access to the current DAG, in preparation for the ValueType routines being changed from standalone functions to members of SelectionDAG for the pre-legalize vector type changes. --- Diffs of the changes: (+1 -1) AlphaISelDAGToDAG.cpp |2 +- 1 files changed, 1 insertion(+), 1 deletion(-) Index: llvm/lib/Target/Alpha/AlphaISelDAGToDAG.cpp diff -u llvm/lib/Target/Alpha/AlphaISelDAGToDAG.cpp:1.70 llvm/lib/Target/Alpha/AlphaISelDAGToDAG.cpp:1.71 --- llvm/lib/Target/Alpha/AlphaISelDAGToDAG.cpp:1.70Tue Jun 19 09:13:56 2007 +++ llvm/lib/Target/Alpha/AlphaISelDAGToDAG.cpp Fri Jun 22 09:59:07 2007 @@ -90,7 +90,7 @@ // see if the missing bits (0x1000) are already known zero if not, the zap // isn't okay to do, as it won't clear all the required bits. if (BitsToCheck && - !getTargetLowering().MaskedValueIsZero(LHS, BitsToCheck)) + !CurDAG->MaskedValueIsZero(LHS, BitsToCheck)) return 0; return Result; ___ llvm-commits mailing list llvm-commits@cs.uiuc.edu http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
Re: [llvm-commits] CVS: llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp LegalizeDAG.cpp SelectionDAG.cpp SelectionDAGISel.cpp TargetLowering.cpp
On Jun 22, 2007, at 7:59 AM, Dan Gohman wrote: > Move ComputeMaskedBits, MaskedValueIsZero, and ComputeNumSignBits from > TargetLowering to SelectionDAG so that they have more convenient > access to the current DAG, in preparation for the ValueType routines > being changed from standalone functions to members of SelectionDAG for > the pre-legalize vector type changes. Ah, that's also a much more logical place for them anyway. They originally started out in DAGCombine, this is their third home :). Thanks Dan, -Chris ___ llvm-commits mailing list llvm-commits@cs.uiuc.edu http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
[llvm-commits] CVS: llvm/lib/Target/ARM/ARMInstrInfo.td
Changes in directory llvm/lib/Target/ARM: ARMInstrInfo.td updated: 1.113 -> 1.114 --- Log message: Fix the build. --- Diffs of the changes: (+1 -1) ARMInstrInfo.td |2 +- 1 files changed, 1 insertion(+), 1 deletion(-) Index: llvm/lib/Target/ARM/ARMInstrInfo.td diff -u llvm/lib/Target/ARM/ARMInstrInfo.td:1.113 llvm/lib/Target/ARM/ARMInstrInfo.td:1.114 --- llvm/lib/Target/ARM/ARMInstrInfo.td:1.113 Tue Jun 19 16:05:09 2007 +++ llvm/lib/Target/ARM/ARMInstrInfo.td Fri Jun 22 11:59:54 2007 @@ -152,7 +152,7 @@ // sext_16_node predicate - True if the SDNode is sign-extended 16 or more bits. def sext_16_node : PatLeaf<(i32 GPR:$a), [{ - return TLI.ComputeNumSignBits(SDOperand(N,0)) >= 17; + return CurDAG->ComputeNumSignBits(SDOperand(N,0)) >= 17; }]>; ___ llvm-commits mailing list llvm-commits@cs.uiuc.edu http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
[llvm-commits] CVS: llvm/lib/Transforms/Scalar/GVNPRE.cpp
Changes in directory llvm/lib/Transforms/Scalar: GVNPRE.cpp updated: 1.52 -> 1.53 --- Log message: Remove some code that I was using for collecting performance information that should not have been committed. --- Diffs of the changes: (+18 -18) GVNPRE.cpp | 36 ++-- 1 files changed, 18 insertions(+), 18 deletions(-) Index: llvm/lib/Transforms/Scalar/GVNPRE.cpp diff -u llvm/lib/Transforms/Scalar/GVNPRE.cpp:1.52 llvm/lib/Transforms/Scalar/GVNPRE.cpp:1.53 --- llvm/lib/Transforms/Scalar/GVNPRE.cpp:1.52 Thu Jun 21 22:14:03 2007 +++ llvm/lib/Transforms/Scalar/GVNPRE.cpp Fri Jun 22 12:04:40 2007 @@ -367,47 +367,47 @@ // Helper fuctions // FIXME: eliminate or document these better -void dump(const SmallPtrSet& s) const __attribute__((noinline)); -void clean(SmallPtrSet& set) __attribute__((noinline)); +void dump(const SmallPtrSet& s) const; +void clean(SmallPtrSet& set); Value* find_leader(SmallPtrSet& vals, - uint32_t v) __attribute__((noinline)); -Value* phi_translate(Value* V, BasicBlock* pred, BasicBlock* succ) __attribute__((noinline)); + uint32_t v); +Value* phi_translate(Value* V, BasicBlock* pred, BasicBlock* succ); void phi_translate_set(SmallPtrSet& anticIn, BasicBlock* pred, - BasicBlock* succ, SmallPtrSet& out) __attribute__((noinline)); + BasicBlock* succ, SmallPtrSet& out); void topo_sort(SmallPtrSet& set, - std::vector& vec) __attribute__((noinline)); + std::vector& vec); -void cleanup() __attribute__((noinline)); -bool elimination() __attribute__((noinline)); +void cleanup(); +bool elimination(); -void val_insert(SmallPtrSet& s, Value* v) __attribute__((noinline)); -void val_replace(SmallPtrSet& s, Value* v) __attribute__((noinline)); -bool dependsOnInvoke(Value* V) __attribute__((noinline)); +void val_insert(SmallPtrSet& s, Value* v); +void val_replace(SmallPtrSet& s, Value* v); +bool dependsOnInvoke(Value* V); void buildsets_availout(BasicBlock::iterator I, SmallPtrSet& currAvail, SmallPtrSet& currPhis, SmallPtrSet& currExps, SmallPtrSet& currTemps, BitVector& availNumbers, -BitVector& expNumbers) __attribute__((noinline)); +BitVector& expNumbers); bool buildsets_anticout(BasicBlock* BB, SmallPtrSet& anticOut, -std::set& visited) __attribute__((noinline)); +std::set& visited); unsigned buildsets_anticin(BasicBlock* BB, SmallPtrSet& anticOut, SmallPtrSet& currExps, SmallPtrSet& currTemps, - std::set& visited) __attribute__((noinline)); -unsigned buildsets(Function& F) __attribute__((noinline)); + std::set& visited); +unsigned buildsets(Function& F); void insertion_pre(Value* e, BasicBlock* BB, std::map& avail, - SmallPtrSet& new_set) __attribute__((noinline)); + SmallPtrSet& new_set); unsigned insertion_mergepoint(std::vector& workList, df_iterator& D, - SmallPtrSet& new_set) __attribute__((noinline)); -bool insertion(Function& F) __attribute__((noinline)); + SmallPtrSet& new_set); +bool insertion(Function& F); }; ___ llvm-commits mailing list llvm-commits@cs.uiuc.edu http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
Re: [llvm-commits] CVS: llvm/lib/Target/X86/X86InstrFPStack.td X86InstrInfo.cpp X86InstrInfo.h X86InstrInfo.td X86InstrMMX.td X86InstrSSE.td
>> How about this proposal (Obviously feel free to pick better names for >> these things): >> >> 1. Reintroduce the 'isremat-able' flag, set it to true for all the >> instructions that are *potentially* rematerializable. >> 2. Add a virtual target hook that can override the flag: >> "TII::isReallyRematerializable(Machineinstr*)". >> 3. Introduce a new non-virtual method: >>bool TII::isRematerializable(Machineinstr *MI) { >> return MI->flags->isrematable && isReallyRematerializable(MI); >>} > > I tried this, and got circular dependencies between libLLVMAnalysis.a, > libLLVMTarget.a, and libLLVMCodeGen.a. I think it's because the actual > code for 3. uses MachineInstr::getOpcode. It shouldn't need to, it can be an inline function that calls: MI->getInstrDescriptor()->isrematable() MachineInstrs have a direct pointer to their TargetInstrDescriptor record. They actually don't hold their opcode :) >> I'm sorry I didn't look at your patch when you asked for comments, >> but does this proposal sound sane? > > *shrug*. Adding isReMaterializable flags to all the load > instructions in > the X86 files isn't unambiguously prettier though. But I've already > strayed from my tangent here :-}. True, in the future we can add some smarts to tblgen... we already know what the loads are (from the patterns), so tblgen could do this automatically for every target. Unfortunately, the tblgen code that interprets the patterns is built into the DAGISelEmitter.cpp file. Someday we should refactor the code for interpreting the patterns out from the code that emits the isel. That way, other tblgen backends could use the patterns to autogenerate things (e.g. the "fold load into instruction" code). -Chris ___ llvm-commits mailing list llvm-commits@cs.uiuc.edu http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
[llvm-commits] CVS: llvm/lib/Transforms/Scalar/GVNPRE.cpp
Changes in directory llvm/lib/Transforms/Scalar: GVNPRE.cpp updated: 1.53 -> 1.54 --- Log message: Perform fewer set insertions while calculating ANTIC_IN. This reduces the amount of time to optimize 403.gcc from 21.9s to 18.2s. --- Diffs of the changes: (+31 -40) GVNPRE.cpp | 71 ++--- 1 files changed, 31 insertions(+), 40 deletions(-) Index: llvm/lib/Transforms/Scalar/GVNPRE.cpp diff -u llvm/lib/Transforms/Scalar/GVNPRE.cpp:1.53 llvm/lib/Transforms/Scalar/GVNPRE.cpp:1.54 --- llvm/lib/Transforms/Scalar/GVNPRE.cpp:1.53 Fri Jun 22 12:04:40 2007 +++ llvm/lib/Transforms/Scalar/GVNPRE.cpp Fri Jun 22 13:27:04 2007 @@ -818,20 +818,20 @@ availNumbers.resize(VN.size()); if (isa(leftValue)) - if (!expNumbers.test(VN.lookup(leftValue)-1)) { + if (!expNumbers.test(VN.lookup(leftValue))) { currExps.insert(leftValue); -expNumbers.set(VN.lookup(leftValue)-1); +expNumbers.set(VN.lookup(leftValue)); } if (isa(rightValue)) - if (!expNumbers.test(VN.lookup(rightValue)-1)) { + if (!expNumbers.test(VN.lookup(rightValue))) { currExps.insert(rightValue); -expNumbers.set(VN.lookup(rightValue)-1); +expNumbers.set(VN.lookup(rightValue)); } -if (!expNumbers.test(VN.lookup(BO)-1)) { +if (!expNumbers.test(VN.lookup(BO))) { currExps.insert(BO); - expNumbers.set(num-1); + expNumbers.set(num); } // Handle cmp ops... @@ -846,19 +846,19 @@ availNumbers.resize(VN.size()); if (isa(leftValue)) - if (!expNumbers.test(VN.lookup(leftValue)-1)) { + if (!expNumbers.test(VN.lookup(leftValue))) { currExps.insert(leftValue); -expNumbers.set(VN.lookup(leftValue)-1); +expNumbers.set(VN.lookup(leftValue)); } if (isa(rightValue)) - if (!expNumbers.test(VN.lookup(rightValue)-1)) { + if (!expNumbers.test(VN.lookup(rightValue))) { currExps.insert(rightValue); -expNumbers.set(VN.lookup(rightValue)-1); +expNumbers.set(VN.lookup(rightValue)); } -if (!expNumbers.test(VN.lookup(C)-1)) { +if (!expNumbers.test(VN.lookup(C))) { currExps.insert(C); - expNumbers.set(num-1); + expNumbers.set(num); } // Handle unsupported ops @@ -871,9 +871,9 @@ } if (!I->isTerminator()) -if (!availNumbers.test(VN.lookup(I)-1)) { +if (!availNumbers.test(VN.lookup(I))) { currAvail.insert(I); - availNumbers.set(VN.lookup(I)-1); + availNumbers.set(VN.lookup(I)); } } @@ -921,45 +921,36 @@ SmallPtrSet& currTemps, std::set& visited) { SmallPtrSet& anticIn = anticipatedIn[BB]; - SmallPtrSet old (anticIn.begin(), anticIn.end()); + unsigned old = anticIn.size(); bool defer = buildsets_anticout(BB, anticOut, visited); if (defer) return 0; - - SmallPtrSet S; - for (SmallPtrSet::iterator I = anticOut.begin(), - E = anticOut.end(); I != E; ++I) -if (currTemps.count(*I) == 0) - S.insert(*I); anticIn.clear(); + BitVector numbers(VN.size()); + for (SmallPtrSet::iterator I = anticOut.begin(), + E = anticOut.end(); I != E; ++I) { +anticIn.insert(*I); +numbers.set(VN.lookup_or_add(*I)); + } for (SmallPtrSet::iterator I = currExps.begin(), - E = currExps.end(); I != E; ++I) -if (currTemps.count(*I) == 0) + E = currExps.end(); I != E; ++I) { +if (!numbers.test(VN.lookup_or_add(*I))) { anticIn.insert(*I); + numbers.set(VN.lookup(*I)); +} + } + + for (SmallPtrSet::iterator I = currTemps.begin(), + E = currTemps.end(); I != E; ++I) +anticIn.erase(*I); - BitVector numbers(VN.size()); - for (SmallPtrSet::iterator I = anticIn.begin(), - E = anticIn.end(); I != E; ++I) -numbers.set(VN.lookup(*I)-1); - for (SmallPtrSet::iterator I = S.begin(), E = S.end(); - I != E; ++I) { -// For non-opaque values, we should already have a value numbering. -// However, for opaques, such as constants within PHI nodes, it is -// possible that they have not yet received a number. Make sure they do -// so now. -if (!isa(*I) && !isa(*I)) - VN.lookup_or_add(*I); -if (!numbers.test(VN.lookup(*I)-1)) - anticIn.insert(*I); - } - clean(anticIn); anticOut.clear(); - if (old.size() != anticIn.size()) + if (old != anticIn.size()) return 2; else return 1; ___ llvm-commits mailing list llvm-commits@cs.uiuc.edu http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
[llvm-commits] [128742] Fix warning.
Revision: 128742 Author: dpatel Date: 2007-06-22 13:28:02 -0700 (Fri, 22 Jun 2007) Log Message: --- Fix warning. Modified Paths: -- apple-local/branches/llvm/gcc/c-lex.c Modified: apple-local/branches/llvm/gcc/c-lex.c === --- apple-local/branches/llvm/gcc/c-lex.c 2007-06-22 17:28:14 UTC (rev 128741) +++ apple-local/branches/llvm/gcc/c-lex.c 2007-06-22 20:28:02 UTC (rev 128742) @@ -826,7 +826,7 @@ my_cpp_num_sign_extend (cpp_num num, size_t precision) { if (num.high) -printf("%lu\n", num.high); +printf("%lu\n", (long unsigned int) num.high); if (!num.unsignedp) { if (precision > PART_PRECISION) ___ llvm-commits mailing list llvm-commits@cs.uiuc.edu http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
[llvm-commits] CVS: llvm/lib/Transforms/Scalar/GVNPRE.cpp
Changes in directory llvm/lib/Transforms/Scalar: GVNPRE.cpp updated: 1.54 -> 1.55 --- Log message: Rework topo_sort so eliminate some behavior that scaled terribly. This reduces the time to optimize 403.gcc from 18.2s to 17.5s, and has an even larger effect on larger testcases. --- Diffs of the changes: (+40 -57) GVNPRE.cpp | 97 + 1 files changed, 40 insertions(+), 57 deletions(-) Index: llvm/lib/Transforms/Scalar/GVNPRE.cpp diff -u llvm/lib/Transforms/Scalar/GVNPRE.cpp:1.54 llvm/lib/Transforms/Scalar/GVNPRE.cpp:1.55 --- llvm/lib/Transforms/Scalar/GVNPRE.cpp:1.54 Fri Jun 22 13:27:04 2007 +++ llvm/lib/Transforms/Scalar/GVNPRE.cpp Fri Jun 22 16:31:16 2007 @@ -650,71 +650,54 @@ /// topo_sort - Given a set of values, sort them by topological /// order into the provided vector. void GVNPRE::topo_sort(SmallPtrSet& set, std::vector& vec) { - SmallPtrSet toErase; + SmallPtrSet visited; + std::vector stack; for (SmallPtrSet::iterator I = set.begin(), E = set.end(); I != E; ++I) { -if (BinaryOperator* BO = dyn_cast(*I)) - for (SmallPtrSet::iterator SI = set.begin(); SI != E; ++SI) { -if (VN.lookup(BO->getOperand(0)) == VN.lookup(*SI) || -VN.lookup(BO->getOperand(1)) == VN.lookup(*SI)) { - toErase.insert(*SI); +if (visited.count(*I) == 0) + stack.push_back(*I); + +while (!stack.empty()) { + Value* e = stack.back(); + + if (BinaryOperator* BO = dyn_cast(e)) { +Value* l = find_leader(set, VN.lookup(BO->getOperand(0))); +Value* r = find_leader(set, VN.lookup(BO->getOperand(1))); + +if (l != 0 && isa(l) && +visited.count(l) == 0) + stack.push_back(l); +else if (r != 0 && isa(r) && + visited.count(r) == 0) + stack.push_back(r); +else { + vec.push_back(e); + visited.insert(e); + stack.pop_back(); } - } -else if (CmpInst* C = dyn_cast(*I)) - for (SmallPtrSet::iterator SI = set.begin(); SI != E; ++SI) { -if (VN.lookup(C->getOperand(0)) == VN.lookup(*SI) || -VN.lookup(C->getOperand(1)) == VN.lookup(*SI)) { - toErase.insert(*SI); + } else if (CmpInst* C = dyn_cast(e)) { +Value* l = find_leader(set, VN.lookup(C->getOperand(0))); +Value* r = find_leader(set, VN.lookup(C->getOperand(1))); + +if (l != 0 && isa(l) && +visited.count(l) == 0) + stack.push_back(l); +else if (r != 0 && isa(r) && + visited.count(r) == 0) + stack.push_back(r); +else { + vec.push_back(e); + visited.insert(e); + stack.pop_back(); } - } - } - - std::vector Q; - for (SmallPtrSet::iterator I = set.begin(), E = set.end(); - I != E; ++I) { -if (toErase.count(*I) == 0) - Q.push_back(*I); - } - - SmallPtrSet visited; - while (!Q.empty()) { -Value* e = Q.back(); - -if (BinaryOperator* BO = dyn_cast(e)) { - Value* l = find_leader(set, VN.lookup(BO->getOperand(0))); - Value* r = find_leader(set, VN.lookup(BO->getOperand(1))); - - if (l != 0 && isa(l) && - visited.count(l) == 0) -Q.push_back(l); - else if (r != 0 && isa(r) && - visited.count(r) == 0) -Q.push_back(r); - else { -vec.push_back(e); + } else { visited.insert(e); -Q.pop_back(); - } -} else if (CmpInst* C = dyn_cast(e)) { - Value* l = find_leader(set, VN.lookup(C->getOperand(0))); - Value* r = find_leader(set, VN.lookup(C->getOperand(1))); - - if (l != 0 && isa(l) && - visited.count(l) == 0) -Q.push_back(l); - else if (r != 0 && isa(r) && - visited.count(r) == 0) -Q.push_back(r); - else { vec.push_back(e); -visited.insert(e); -Q.pop_back(); +stack.pop_back(); } -} else { - visited.insert(e); - vec.push_back(e); - Q.pop_back(); } + +stack.clear(); } } ___ llvm-commits mailing list llvm-commits@cs.uiuc.edu http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits