[llvm-commits] CVS: llvm/test/Regression/Transforms/InstCombine/2006-05-06-Infloop.ll

2006-05-06 Thread Chris Lattner


Changes in directory llvm/test/Regression/Transforms/InstCombine:

2006-05-06-Infloop.ll added (r1.1)
---
Log message:

new testcase from ghostscript that inf looped instcombine


---
Diffs of the changes:  (+522 -0)

 2006-05-06-Infloop.ll |  522 ++
 1 files changed, 522 insertions(+)


Index: llvm/test/Regression/Transforms/InstCombine/2006-05-06-Infloop.ll
diff -c /dev/null 
llvm/test/Regression/Transforms/InstCombine/2006-05-06-Infloop.ll:1.1
*** /dev/null   Sat May  6 03:58:16 2006
--- llvm/test/Regression/Transforms/InstCombine/2006-05-06-Infloop.ll   Sat May 
 6 03:58:06 2006
***
*** 0 
--- 1,522 
+ ; RUN: llvm-as < %s | opt -instcombine -disable-output
+ 
+   %struct.gs_matrix = type { float, int, float, int, float, int, float, 
int, float, int, float, int }
+   %struct.gx_bitmap = type { ubyte*, int, int, int }
+   %struct.gx_device = type { int, %struct.gx_device_procs*, sbyte*, int, 
int, float, float, int, ushort, int, int }
+   %struct.gx_device_memory = type { int, %struct.gx_device_procs*, 
sbyte*, int, int, float, float, int, ushort, int, int, %struct.gs_matrix, int, 
ubyte*, ubyte**, int (%struct.gx_device_memory*, int, int, int, int, int)*, 
int, int, ubyte* }
+   %struct.gx_device_procs = type { int (%struct.gx_device*)*, void 
(%struct.gx_device*, %struct.gs_matrix*)*, int (%struct.gx_device*)*, int 
(%struct.gx_device*)*, int (%struct.gx_device*)*, uint (%struct.gx_device*, 
ushort, ushort, ushort)*, int (%struct.gx_device*, uint, ushort*)*, int 
(%struct.gx_device*, int, int, int, int, uint)*, int (%struct.gx_device*, 
%struct.gx_bitmap*, int, int, int, int, uint, uint)*, int (%struct.gx_device*, 
ubyte*, int, int, int, int, int, int, uint, uint)*, int (%struct.gx_device*, 
ubyte*, int, int, int, int, int, int)*, int (%struct.gx_device*, int, int, int, 
int, uint)*, int (%struct.gx_device*, int, int, int, int, int, int, uint)*, int 
(%struct.gx_device*, %struct.gx_bitmap*, int, int, int, int, int, int, uint, 
uint)* }
+ 
+ implementation   ; Functions:
+ 
+ int %mem_mono_copy_mono(%struct.gx_device* %dev, ubyte* %base, int %sourcex, 
int %raster, int %x, int %y, int %w, int %h, uint %zero, uint %one) {
+ entry:
+   %raster = cast int %raster to uint  ;  [#uses=3]
+   %tmp = seteq uint %one, %zero   ;  [#uses=1]
+   br bool %tmp, label %cond_true, label %cond_next
+ 
+ cond_true:; preds = %entry
+   %tmp6 = tail call int %mem_mono_fill_rectangle( %struct.gx_device* 
%dev, int %x, int %y, int %w, int %h, uint %zero )   ;  [#uses=1]
+   ret int %tmp6
+ 
+ cond_next:; preds = %entry
+   %tmp8 = cast %struct.gx_device* %dev to %struct.gx_device_memory*   
; <%struct.gx_device_memory*> [#uses=6]
+   %tmp = getelementptr %struct.gx_device_memory* %tmp8, int 0, uint 15
;  [#uses=1]
+   %tmp = load int (%struct.gx_device_memory*, int, int, int, int, int)** 
%tmp ;  
[#uses=2]
+   %tmp9 = seteq int (%struct.gx_device_memory*, int, int, int, int, int)* 
%tmp, %mem_no_fault_proc;  [#uses=1]
+   br bool %tmp9, label %cond_next46, label %cond_true10
+ 
+ cond_true10:  ; preds = %cond_next
+   %tmp16 = add int %x, 7  ;  [#uses=1]
+   %tmp17 = add int %tmp16, %w ;  [#uses=1]
+   %tmp18 = shr int %tmp17, ubyte 3;  [#uses=1]
+   %tmp20 = shr int %x, ubyte 3;  [#uses=2]
+   %tmp21 = sub int %tmp18, %tmp20 ;  [#uses=1]
+   %tmp27 = tail call int %tmp( %struct.gx_device_memory* %tmp8, int 
%tmp20, int %y, int %tmp21, int %h, int 1 )   ;  [#uses=2]
+   %tmp29 = setlt int %tmp27, 0;  [#uses=1]
+   br bool %tmp29, label %cond_true30, label %cond_next46
+ 
+ cond_true30:  ; preds = %cond_true10
+   %tmp41 = tail call int %mem_copy_mono_recover( %struct.gx_device* %dev, 
ubyte* %base, int %sourcex, int %raster, int %x, int %y, int %w, int %h, uint 
%zero, uint %one, int %tmp27 );  [#uses=1]
+   ret int %tmp41
+ 
+ cond_next46:  ; preds = %cond_true10, %cond_next
+   %tmp48 = setgt int %w, 0;  [#uses=1]
+   %tmp53 = setgt int %h, 0;  [#uses=1]
+   %bothcond = and bool %tmp53, %tmp48 ;  [#uses=1]
+   br bool %bothcond, label %bb58, label %return
+ 
+ bb58: ; preds = %cond_next46
+   %tmp60 = setlt int %x, 0;  [#uses=1]
+   br bool %tmp60, label %return, label %cond_next63
+ 
+ cond_next63:  ; preds = %bb58
+   %tmp65 = getelementptr %struct.gx_device_memory* %tmp8, int 0, uint 3   
;  [#uses=1]
+   %tmp66 = load int* %tmp65   ;  [#uses=1]
+   %tmp68 = sub int %tmp66, %w ;  [#uses=1]
+   %tmp70 = setlt int %tmp68, %x   ;  [#uses=1]
+   %tmp75 = setlt int %y, 0; 

[llvm-commits] CVS: llvm/lib/Transforms/Scalar/InstructionCombining.cpp

2006-05-06 Thread Chris Lattner


Changes in directory llvm/lib/Transforms/Scalar:

InstructionCombining.cpp updated: 1.476 -> 1.477
---
Log message:

Move some code around.

Make the "fold (and (cast A), (cast B)) -> (cast (and A, B))" transformation
only apply when both casts really will cause code to be generated.  If one or
both doesn't, then this xform doesn't remove a cast.

This fixes Transforms/InstCombine/2006-05-06-Infloop.ll


---
Diffs of the changes:  (+140 -124)

 InstructionCombining.cpp |  264 ---
 1 files changed, 140 insertions(+), 124 deletions(-)


Index: llvm/lib/Transforms/Scalar/InstructionCombining.cpp
diff -u llvm/lib/Transforms/Scalar/InstructionCombining.cpp:1.476 
llvm/lib/Transforms/Scalar/InstructionCombining.cpp:1.477
--- llvm/lib/Transforms/Scalar/InstructionCombining.cpp:1.476   Fri May  5 
15:51:30 2006
+++ llvm/lib/Transforms/Scalar/InstructionCombining.cpp Sat May  6 04:00:16 2006
@@ -304,6 +304,137 @@
   return 0;
 }
 
+enum CastType {
+  Noop = 0,
+  Truncate = 1,
+  Signext  = 2,
+  Zeroext  = 3
+};
+
+/// getCastType - In the future, we will split the cast instruction into these
+/// various types.  Until then, we have to do the analysis here.
+static CastType getCastType(const Type *Src, const Type *Dest) {
+  assert(Src->isIntegral() && Dest->isIntegral() &&
+ "Only works on integral types!");
+  unsigned SrcSize = Src->getPrimitiveSizeInBits();
+  unsigned DestSize = Dest->getPrimitiveSizeInBits();
+  
+  if (SrcSize == DestSize) return Noop;
+  if (SrcSize > DestSize)  return Truncate;
+  if (Src->isSigned()) return Signext;
+  return Zeroext;
+}
+
+
+// isEliminableCastOfCast - Return true if it is valid to eliminate the CI
+// instruction.
+//
+static bool isEliminableCastOfCast(const Type *SrcTy, const Type *MidTy,
+   const Type *DstTy, TargetData *TD) {
+  
+  // It is legal to eliminate the instruction if casting A->B->A if the sizes
+  // are identical and the bits don't get reinterpreted (for example
+  // int->float->int would not be allowed).
+  if (SrcTy == DstTy && SrcTy->isLosslesslyConvertibleTo(MidTy))
+return true;
+  
+  // If we are casting between pointer and integer types, treat pointers as
+  // integers of the appropriate size for the code below.
+  if (isa(SrcTy)) SrcTy = TD->getIntPtrType();
+  if (isa(MidTy)) MidTy = TD->getIntPtrType();
+  if (isa(DstTy)) DstTy = TD->getIntPtrType();
+  
+  // Allow free casting and conversion of sizes as long as the sign doesn't
+  // change...
+  if (SrcTy->isIntegral() && MidTy->isIntegral() && DstTy->isIntegral()) {
+CastType FirstCast = getCastType(SrcTy, MidTy);
+CastType SecondCast = getCastType(MidTy, DstTy);
+
+// Capture the effect of these two casts.  If the result is a legal cast,
+// the CastType is stored here, otherwise a special code is used.
+static const unsigned CastResult[] = {
+  // First cast is noop
+  0, 1, 2, 3,
+  // First cast is a truncate
+  1, 1, 4, 4, // trunc->extend is not safe to eliminate
+  // First cast is a sign ext
+  2, 5, 2, 4, // signext->zeroext never ok
+  // First cast is a zero ext
+  3, 5, 3, 3,
+};
+
+unsigned Result = CastResult[FirstCast*4+SecondCast];
+switch (Result) {
+default: assert(0 && "Illegal table value!");
+case 0:
+case 1:
+case 2:
+case 3:
+  // FIXME: in the future, when LLVM has explicit sign/zeroextends and
+  // truncates, we could eliminate more casts.
+  return (unsigned)getCastType(SrcTy, DstTy) == Result;
+case 4:
+  return false;  // Not possible to eliminate this here.
+case 5:
+  // Sign or zero extend followed by truncate is always ok if the result
+  // is a truncate or noop.
+  CastType ResultCast = getCastType(SrcTy, DstTy);
+  if (ResultCast == Noop || ResultCast == Truncate)
+return true;
+// Otherwise we are still growing the value, we are only safe if the
+// result will match the sign/zeroextendness of the result.
+return ResultCast == FirstCast;
+}
+  }
+  
+  // If this is a cast from 'float -> double -> integer', cast from
+  // 'float -> integer' directly, as the value isn't changed by the 
+  // float->double conversion.
+  if (SrcTy->isFloatingPoint() && MidTy->isFloatingPoint() &&
+  DstTy->isIntegral() && 
+  SrcTy->getPrimitiveSize() < MidTy->getPrimitiveSize())
+return true;
+  
+  // Packed type conversions don't modify bits.
+  if (isa(SrcTy) && isa(MidTy) 
&&isa(DstTy))
+return true;
+  
+  return false;
+}
+
+/// ValueRequiresCast - Return true if the cast from "V to Ty" actually results
+/// in any code being generated.  It does not require codegen if V is simple
+/// enough or if the cast can be folded into other casts.
+static bool ValueRequiresCast(const Value *V, const Type *Ty, TargetData *TD) {
+  if (V->get

[llvm-commits] CVS: llvm/test/Regression/CodeGen/Generic/2006-05-06-GEP-Cast-Sink-Crash.ll

2006-05-06 Thread Chris Lattner


Changes in directory llvm/test/Regression/CodeGen/Generic:

2006-05-06-GEP-Cast-Sink-Crash.ll added (r1.1)
---
Log message:

new testcase


---
Diffs of the changes:  (+33 -0)

 2006-05-06-GEP-Cast-Sink-Crash.ll |   33 +
 1 files changed, 33 insertions(+)


Index: llvm/test/Regression/CodeGen/Generic/2006-05-06-GEP-Cast-Sink-Crash.ll
diff -c /dev/null 
llvm/test/Regression/CodeGen/Generic/2006-05-06-GEP-Cast-Sink-Crash.ll:1.1
*** /dev/null   Sat May  6 04:09:57 2006
--- llvm/test/Regression/CodeGen/Generic/2006-05-06-GEP-Cast-Sink-Crash.ll  
Sat May  6 04:09:47 2006
***
*** 0 
--- 1,33 
+ ; RUN: llvm-as < %s | llc
+ 
+%struct.FILE = type { ubyte*, int, int, short, short, %struct.__sbuf, 
int, sbyte*, int (sbyte*)*, int (sbyte*, sbyte*, int)*, long (sbyte*, long, 
int)*, int (sbyte*, sbyte*, int)*, %struct.__sbuf, %struct.__sFILEX*, int, [3 x 
ubyte], [1 x ubyte], %struct.__sbuf, int, long }
+ %struct.SYMBOL_TABLE_ENTRY = type { [9 x sbyte], [9 x sbyte], int, 
int, uint, %struct.SYMBOL_TABLE_ENTRY* }
+ %struct.__sFILEX = type opaque
+ %struct.__sbuf = type { ubyte*, int }
+ %str14 = external global [6 x sbyte]; <[6 x sbyte]*> [#uses=0]
+ 
+ implementation   ; Functions:
+ 
+ declare void %fprintf(int, ...)
+ 
+ void %OUTPUT_TABLE(%struct.SYMBOL_TABLE_ENTRY* %SYM_TAB) {
+ entry:
+ %tmp11 = getelementptr %struct.SYMBOL_TABLE_ENTRY* %SYM_TAB, int 0, 
uint 1, int 0   ;  [#uses=2]
+ %tmp.i = cast sbyte* %tmp11 to ubyte*   ;  [#uses=1]
+ br label %bb.i
+ 
+ bb.i:   ; preds = %cond_next.i, %entry
+ %s1.0.i = phi ubyte* [ %tmp.i, %entry ], [ null, %cond_next.i ]   
  ;  [#uses=0]
+ br bool false, label %cond_true.i31, label %cond_next.i
+ 
+ cond_true.i31:  ; preds = %bb.i
+ call void (int, ...)* %fprintf( int 0, sbyte* %tmp11, sbyte* null )
+ ret void
+ 
+ cond_next.i:; preds = %bb.i
+ br bool false, label %bb.i, label %bb19.i
+ 
+ bb19.i: ; preds = %cond_next.i
+ ret void
+ }
+ 



___
llvm-commits mailing list
llvm-commits@cs.uiuc.edu
http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits


[llvm-commits] CVS: llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp

2006-05-06 Thread Chris Lattner


Changes in directory llvm/lib/CodeGen/SelectionDAG:

SelectionDAGISel.cpp updated: 1.235 -> 1.236
---
Log message:

When inserting casts, be careful of where we put them.  We cannot insert
a cast immediately before a PHI node.

This fixes Regression/CodeGen/Generic/2006-05-06-GEP-Cast-Sink-Crash.ll


---
Diffs of the changes:  (+12 -9)

 SelectionDAGISel.cpp |   21 -
 1 files changed, 12 insertions(+), 9 deletions(-)


Index: llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
diff -u llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp:1.235 
llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp:1.236
--- llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp:1.235Fri May  5 
16:17:49 2006
+++ llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp  Sat May  6 04:10:37 2006
@@ -2789,8 +2789,9 @@
 
 /// InsertGEPComputeCode - Insert code into BB to compute Ptr+PtrOffset,
 /// casting to the type of GEPI.
-static Value *InsertGEPComputeCode(Value *&V, BasicBlock *BB, Instruction 
*GEPI,
-   Value *Ptr, Value *PtrOffset) {
+static Instruction *InsertGEPComputeCode(Instruction *&V, BasicBlock *BB,
+ Instruction *GEPI, Value *Ptr,
+ Value *PtrOffset) {
   if (V) return V;   // Already computed.
   
   BasicBlock::iterator InsertPt;
@@ -2813,8 +2814,7 @@
   
   // Add the offset, cast it to the right type.
   Ptr = BinaryOperator::createAdd(Ptr, PtrOffset, "", InsertPt);
-  Ptr = new CastInst(Ptr, GEPI->getType(), "", InsertPt);
-  return V = Ptr;
+  return V = new CastInst(Ptr, GEPI->getType(), "", InsertPt);
 }
 
 /// ReplaceUsesOfGEPInst - Replace all uses of RepPtr with inserted code to
@@ -2827,7 +2827,7 @@
 static void ReplaceUsesOfGEPInst(Instruction *RepPtr, Value *Ptr, 
  Constant *PtrOffset, BasicBlock *DefBB,
  GetElementPtrInst *GEPI,
- std::map &InsertedExprs) {
+   std::map &InsertedExprs) {
   while (!RepPtr->use_empty()) {
 Instruction *User = cast(RepPtr->use_back());
 
@@ -2843,7 +2843,7 @@
 
 // If this is a load of the pointer, or a store through the pointer, emit
 // the increment into the load/store block.
-Value *NewVal;
+Instruction *NewVal;
 if (isa(User) ||
 (isa(User) && User->getOperand(0) != RepPtr)) {
   NewVal = InsertGEPComputeCode(InsertedExprs[User->getParent()], 
@@ -2856,8 +2856,11 @@
 Ptr, PtrOffset);
 }
 
-if (GEPI->getType() != RepPtr->getType())
-  NewVal = new CastInst(NewVal, RepPtr->getType(), "", User);
+if (GEPI->getType() != RepPtr->getType()) {
+  BasicBlock::iterator IP = NewVal;
+  ++IP;
+  NewVal = new CastInst(NewVal, RepPtr->getType(), "", IP);
+}
 User->replaceUsesOfWith(RepPtr, NewVal);
   }
 }
@@ -2970,7 +2973,7 @@
   // block, otherwise we use a canonical version right next to the gep (these 
   // won't be foldable as addresses, so we might as well share the 
computation).
   
-  std::map InsertedExprs;
+  std::map InsertedExprs;
   ReplaceUsesOfGEPInst(GEPI, Ptr, PtrOffset, DefBB, GEPI, InsertedExprs);
   
   // Finally, the GEP is dead, remove it.



___
llvm-commits mailing list
llvm-commits@cs.uiuc.edu
http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits


[llvm-commits] CVS: llvm/include/llvm/Target/TargetLowering.h

2006-05-06 Thread Chris Lattner


Changes in directory llvm/include/llvm/Target:

TargetLowering.h updated: 1.70 -> 1.71
---
Log message:

Add some new methods for computing sign bit information.


---
Diffs of the changes:  (+13 -0)

 TargetLowering.h |   13 +
 1 files changed, 13 insertions(+)


Index: llvm/include/llvm/Target/TargetLowering.h
diff -u llvm/include/llvm/Target/TargetLowering.h:1.70 
llvm/include/llvm/Target/TargetLowering.h:1.71
--- llvm/include/llvm/Target/TargetLowering.h:1.70  Tue May  2 20:29:56 2006
+++ llvm/include/llvm/Target/TargetLowering.h   Sat May  6 04:26:22 2006
@@ -389,6 +389,19 @@
   uint64_t &KnownOne,
   unsigned Depth = 0) const;
 
+  /// ComputeNumSignBits - Return the number of times the sign bit of the
+  /// register is replicated into the other bits.  We know that at least 1 bit
+  /// is always equal to the sign bit (itself), but other cases can give us
+  /// information.  For example, immediately after an "SRA X, 2", we know that
+  /// the top 3 bits are all equal to each other, so we return 3.
+  unsigned ComputeNumSignBits(SDOperand Op, unsigned Depth = 0) const;
+  
+  /// ComputeNumSignBitsForTargetNode - This method can be implemented by
+  /// targets that want to expose additional information about sign bits to the
+  /// DAG Combiner.
+  virtual unsigned ComputeNumSignBitsForTargetNode(SDOperand Op,
+   unsigned Depth = 0) const;
+  
   struct DAGCombinerInfo {
 void *DC;  // The DAG Combiner object.
 bool BeforeLegalize;



___
llvm-commits mailing list
llvm-commits@cs.uiuc.edu
http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits


[llvm-commits] CVS: llvm/lib/Target/TargetLowering.cpp

2006-05-06 Thread Chris Lattner


Changes in directory llvm/lib/Target:

TargetLowering.cpp updated: 1.56 -> 1.57
---
Log message:

Add some really really simple code for computing sign-bit propagation.

This will certainly be enhanced in the future.


---
Diffs of the changes:  (+95 -0)

 TargetLowering.cpp |   95 +
 1 files changed, 95 insertions(+)


Index: llvm/lib/Target/TargetLowering.cpp
diff -u llvm/lib/Target/TargetLowering.cpp:1.56 
llvm/lib/Target/TargetLowering.cpp:1.57
--- llvm/lib/Target/TargetLowering.cpp:1.56 Fri May  5 19:11:52 2006
+++ llvm/lib/Target/TargetLowering.cpp  Sat May  6 04:27:13 2006
@@ -995,6 +995,101 @@
   KnownOne = 0;
 }
 
+/// ComputeNumSignBits - Return the number of times the sign bit of the
+/// register is replicated into the other bits.  We know that at least 1 bit
+/// is always equal to the sign bit (itself), but other cases can give us
+/// information.  For example, immediately after an "SRA X, 2", we know that
+/// the top 3 bits are all equal to each other, so we return 3.
+unsigned TargetLowering::ComputeNumSignBits(SDOperand Op, unsigned Depth) 
const{
+  MVT::ValueType VT = Op.getValueType();
+  assert(MVT::isInteger(VT) && "Invalid VT!");
+  unsigned VTBits = MVT::getSizeInBits(VT);
+  unsigned Tmp, Tmp2;
+  
+  if (Depth == 6)
+return 1;  // Limit search depth.
+
+  switch (Op.getOpcode()) {
+  default: 
+// Allow the target to implement this method for its nodes.
+if (Op.getOpcode() >= ISD::BUILTIN_OP_END) {
+  case ISD::INTRINSIC_WO_CHAIN:
+  case ISD::INTRINSIC_W_CHAIN:
+  case ISD::INTRINSIC_VOID:
+  unsigned NumBits = ComputeNumSignBitsForTargetNode(Op, Depth);
+  if (NumBits > 1) return NumBits;
+}
+
+// FIXME: Should use computemaskedbits to look at the top bits.
+return 1;
+
+  case ISD::AssertSext:
+Tmp = MVT::getSizeInBits(cast(Op.getOperand(1))->getVT());
+return VTBits-Tmp+1;
+  case ISD::AssertZext:
+Tmp = MVT::getSizeInBits(cast(Op.getOperand(1))->getVT());
+return VTBits-Tmp;
+
+  case ISD::SIGN_EXTEND_INREG:
+// Max of the input and what this extends.
+Tmp = MVT::getSizeInBits(cast(Op.getOperand(1))->getVT());
+Tmp = VTBits-Tmp+1;
+
+Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
+return std::max(Tmp, Tmp2);
+
+  case ISD::SRA:
+Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
+// SRA X, C   -> adds C sign bits.
+if (ConstantSDNode *C = dyn_cast(Op.getOperand(1))) {
+  Tmp += C->getValue();
+  if (Tmp > VTBits) Tmp = VTBits;
+}
+return Tmp;
+
+  case ISD::ADD:
+  case ISD::SUB:
+// Add and sub can have at most one carry bit.  Thus we know that the 
output
+// is, at worst, one more bit than the inputs.
+Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
+if (Tmp == 1) return 1;
+Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
+if (Tmp2 == 1) return 1;
+return std::min(Tmp, Tmp2)-1;
+
+  //case ISD::ZEXTLOAD:   // 16 bits known
+  //case ISD::SEXTLOAD:   // 17 bits known
+  //case ISD::Constant:
+  //case ISD::SIGN_EXTEND:
+  //
+  }
+  
+#if 0
+  // fold (sext_in_reg (setcc x)) -> setcc x iff (setcc x) == 0 or -1
+  if (N0.getOpcode() == ISD::SETCC &&
+  TLI.getSetCCResultContents() == 
+  TargetLowering::ZeroOrNegativeOneSetCCResult)
+return N0;
+#endif
+}
+
+
+
+/// ComputeNumSignBitsForTargetNode - This method can be implemented by
+/// targets that want to expose additional information about sign bits to the
+/// DAG Combiner.
+unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDOperand Op,
+ unsigned Depth) const 
{
+  assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
+  Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
+  Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
+  Op.getOpcode() == ISD::INTRINSIC_VOID) &&
+ "Should use ComputeNumSignBits if you don't know whether Op"
+ " is a target node!");
+  return 1;
+}
+
+
 SDOperand TargetLowering::
 PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const {
   // Default implementation: no optimization.



___
llvm-commits mailing list
llvm-commits@cs.uiuc.edu
http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits


[llvm-commits] CVS: llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp

2006-05-06 Thread Chris Lattner


Changes in directory llvm/lib/CodeGen/SelectionDAG:

DAGCombiner.cpp updated: 1.159 -> 1.160
---
Log message:

Use the new TargetLowering::ComputeNumSignBits method to eliminate 
sign_extend_inreg operations.  Though ComputeNumSignBits is still rudimentary,
this is enough to compile this:

short test(short X, short x) {
  int Y = X+x;
  return (Y >> 1);
}
short test2(short X, short x) {
  int Y = (short)(X+x);
  return Y >> 1;
}

into:

_test:
add r2, r3, r4
srawi r3, r2, 1
blr
_test2:
add r2, r3, r4
extsh r2, r2
srawi r3, r2, 1
blr

instead of:

_test:
add r2, r3, r4
srawi r2, r2, 1
extsh r3, r2
blr
_test2:
add r2, r3, r4
extsh r2, r2
srawi r2, r2, 1
extsh r3, r2
blr




---
Diffs of the changes:  (+5 -5)

 DAGCombiner.cpp |   10 +-
 1 files changed, 5 insertions(+), 5 deletions(-)


Index: llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
diff -u llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp:1.159 
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp:1.160
--- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp:1.159 Fri May  5 17:56:26 2006
+++ llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp   Sat May  6 04:30:03 2006
@@ -1937,6 +1937,11 @@
 SDOperand Truncate = DAG.getConstant(N0C->getValue(), EVT);
 return DAG.getNode(ISD::SIGN_EXTEND, VT, Truncate);
   }
+  
+  // If the input is already sign extended, just drop the extend.
+  if (TLI.ComputeNumSignBits(N0) >= MVT::getSizeInBits(VT)-EVTBits+1)
+return N0;
+  
   // fold (sext_in_reg (sext_in_reg x, VT2), VT1) -> (sext_in_reg x, minVT) pt1
   if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 
   cast(N0.getOperand(1))->getVT() <= EVT) {
@@ -1947,11 +1952,6 @@
   EVT < cast(N0.getOperand(1))->getVT()) {
 return DAG.getNode(ISD::SIGN_EXTEND_INREG, VT, N0.getOperand(0), N1);
   }
-  // fold (sext_in_reg (assert_sext x)) -> (assert_sext x)
-  if (N0.getOpcode() == ISD::AssertSext && 
-  cast(N0.getOperand(1))->getVT() <= EVT) {
-return N0;
-  }
   // fold (sext_in_reg (sextload x)) -> (sextload x)
   if (N0.getOpcode() == ISD::SEXTLOAD && 
   cast(N0.getOperand(3))->getVT() <= EVT) {



___
llvm-commits mailing list
llvm-commits@cs.uiuc.edu
http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits


[llvm-commits] CVS: llvm/test/Regression/CodeGen/PowerPC/and_sext.ll

2006-05-06 Thread Chris Lattner


Changes in directory llvm/test/Regression/CodeGen/PowerPC:

and_sext.ll updated: 1.1 -> 1.2
---
Log message:

new testcase we handle right now.


---
Diffs of the changes:  (+20 -6)

 and_sext.ll |   26 --
 1 files changed, 20 insertions(+), 6 deletions(-)


Index: llvm/test/Regression/CodeGen/PowerPC/and_sext.ll
diff -u llvm/test/Regression/CodeGen/PowerPC/and_sext.ll:1.1 
llvm/test/Regression/CodeGen/PowerPC/and_sext.ll:1.2
--- llvm/test/Regression/CodeGen/PowerPC/and_sext.ll:1.1Fri Feb 17 
15:22:08 2006
+++ llvm/test/Regression/CodeGen/PowerPC/and_sext.llSat May  6 13:15:50 2006
@@ -1,9 +1,23 @@
-; This test should not contain a sign extend
-; RUN: llvm-as < %s | llc -march=ppc32 | not grep extsh 
+; These tests should not contain a sign extend.
+; RUN: llvm-as < %s | llc -march=ppc32 &&
+; RUN: llvm-as < %s | llc -march=ppc32 | not grep extsh  &&
+; RUN: llvm-as < %s | llc -march=ppc32 | not grep extsb
 
-int %test(uint %mode.0.i.0) {
-%tmp.79 = cast uint %mode.0.i.0 to short;  [#uses=1]
-%tmp.80 = cast short %tmp.79 to int ;  [#uses=1]
-%tmp.81 = and int %tmp.80, 24 ;  [#uses=1]
+int %test1(uint %mode.0.i.0) {
+%tmp.79 = cast uint %mode.0.i.0 to short
+%tmp.80 = cast short %tmp.79 to int
+%tmp.81 = and int %tmp.80, 24
 ret int %tmp.81
 }
+
+short %test2(short %X, short %x) {
+%tmp = cast short %X to int
+%tmp1 = cast short %x to int
+%tmp2 = add int %tmp, %tmp1
+%tmp4 = shr int %tmp2, ubyte 1
+%tmp4 = cast int %tmp4 to short
+%tmp45 = cast short %tmp4 to int
+%retval = cast int %tmp45 to short
+ret short %retval
+}
+



___
llvm-commits mailing list
llvm-commits@cs.uiuc.edu
http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits


[llvm-commits] CVS: llvm/lib/CodeGen/AsmPrinter.cpp

2006-05-06 Thread Jeff Cohen


Changes in directory llvm/lib/CodeGen:

AsmPrinter.cpp updated: 1.67 -> 1.68
---
Log message:

Fix some loose ends in MASM support.

---
Diffs of the changes:  (+5 -5)

 AsmPrinter.cpp |   10 +-
 1 files changed, 5 insertions(+), 5 deletions(-)


Index: llvm/lib/CodeGen/AsmPrinter.cpp
diff -u llvm/lib/CodeGen/AsmPrinter.cpp:1.67 
llvm/lib/CodeGen/AsmPrinter.cpp:1.68
--- llvm/lib/CodeGen/AsmPrinter.cpp:1.67Fri May  5 16:47:05 2006
+++ llvm/lib/CodeGen/AsmPrinter.cpp Sat May  6 16:27:14 2006
@@ -71,9 +71,9 @@
   if (MLSections) {
 if (*NewSection == 0) {
   // Simply end the current section, if any.
-  if (CurrentSection != "") {
-O << CurrentSection << "\tends\n";
-CurrentSection = "";
+  if (!CurrentSection.empty()) {
+O << CurrentSection << "\tends\n\n";
+CurrentSection.clear();
   }
   return;
 }
@@ -88,8 +88,8 @@
   NS = "_text";
 
 if (CurrentSection != NS) {
-  if (CurrentSection != "")
-O << CurrentSection << "\tends\n";
+  if (!CurrentSection.empty())
+O << CurrentSection << "\tends\n\n";
   CurrentSection = NS;
   O << CurrentSection << (isData ? "\tsegment 'DATA'\n"
  : "\tsegment 'CODE'\n");



___
llvm-commits mailing list
llvm-commits@cs.uiuc.edu
http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits


[llvm-commits] CVS: llvm/lib/Target/X86/X86AsmPrinter.cpp X86IntelAsmPrinter.cpp

2006-05-06 Thread Jeff Cohen


Changes in directory llvm/lib/Target/X86:

X86AsmPrinter.cpp updated: 1.178 -> 1.179
X86IntelAsmPrinter.cpp updated: 1.42 -> 1.43
---
Log message:

Fix some loose ends in MASM support.

---
Diffs of the changes:  (+72 -60)

 X86AsmPrinter.cpp  |3 +
 X86IntelAsmPrinter.cpp |  129 ++---
 2 files changed, 72 insertions(+), 60 deletions(-)


Index: llvm/lib/Target/X86/X86AsmPrinter.cpp
diff -u llvm/lib/Target/X86/X86AsmPrinter.cpp:1.178 
llvm/lib/Target/X86/X86AsmPrinter.cpp:1.179
--- llvm/lib/Target/X86/X86AsmPrinter.cpp:1.178 Fri May  5 16:48:50 2006
+++ llvm/lib/Target/X86/X86AsmPrinter.cpp   Sat May  6 16:27:14 2006
@@ -90,6 +90,9 @@
 }
 
 bool X86SharedAsmPrinter::doFinalization(Module &M) {
+  // Note: this code is not shared by the Intel printer as it is too different
+  // from how MASM does things.  When making changes here don't forget to look
+  // at X86IntelAsmPrinter::doFinalization().
   const TargetData *TD = TM.getTargetData();
 
   // Print out module-level global variables here.


Index: llvm/lib/Target/X86/X86IntelAsmPrinter.cpp
diff -u llvm/lib/Target/X86/X86IntelAsmPrinter.cpp:1.42 
llvm/lib/Target/X86/X86IntelAsmPrinter.cpp:1.43
--- llvm/lib/Target/X86/X86IntelAsmPrinter.cpp:1.42 Fri May  5 00:40:20 2006
+++ llvm/lib/Target/X86/X86IntelAsmPrinter.cpp  Sat May  6 16:27:14 2006
@@ -30,12 +30,6 @@
 /// method to print assembly for each instruction.
 ///
 bool X86IntelAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
-  if (forDarwin) {
-// Let PassManager know we need debug information and relay
-// the MachineDebugInfo address on to DwarfWriter.
-DW.SetDebugInfo(&getAnalysis());
-  }
-
   SetupMachineFunction(MF);
   O << "\n\n";
 
@@ -49,11 +43,6 @@
 O << "\tpublic " << CurrentFnName << "\n";
   O << CurrentFnName << "\tproc near\n";
   
-  if (forDarwin) {
-// Emit pre-function debug information.
-DW.BeginFunction(&MF);
-  }
-
   // Print out code for the function.
   for (MachineFunction::const_iterator I = MF.begin(), E = MF.end();
I != E; ++I) {
@@ -70,11 +59,6 @@
 }
   }
 
-  if (forDarwin) {
-// Emit post-function debug information.
-DW.EndFunction();
-  }
-
   O << CurrentFnName << "\tendp\n";
 
   // We didn't modify anything.
@@ -124,8 +108,6 @@
 if (!isMemOp) O << "OFFSET ";
 O << "[" << PrivateGlobalPrefix << "CPI" << getFunctionNumber() << "_"
   << MO.getConstantPoolIndex();
-if (forDarwin && TM.getRelocationModel() == Reloc::PIC)
-  O << "-\"L" << getFunctionNumber() << "$pb\"";
 int Offset = MO.getOffset();
 if (Offset > 0)
   O << " + " << Offset;
@@ -138,29 +120,7 @@
 bool isCallOp = Modifier && !strcmp(Modifier, "call");
 bool isMemOp  = Modifier && !strcmp(Modifier, "mem");
 if (!isMemOp && !isCallOp) O << "OFFSET ";
-if (forDarwin && TM.getRelocationModel() != Reloc::Static) {
-  GlobalValue *GV = MO.getGlobal();
-  std::string Name = Mang->getValueName(GV);
-  if (!isMemOp && !isCallOp) O << '$';
-  // Link-once, External, or Weakly-linked global variables need
-  // non-lazily-resolved stubs
-  if (GV->isExternal() || GV->hasWeakLinkage() ||
-  GV->hasLinkOnceLinkage()) {
-// Dynamically-resolved functions need a stub for the function.
-if (isCallOp && isa(GV) && cast(GV)->isExternal()) 
{
-  FnStubs.insert(Name);
-  O << "L" << Name << "$stub";
-} else {
-  GVStubs.insert(Name);
-  O << "L" << Name << "$non_lazy_ptr";
-}
-  } else {
-O << Mang->getValueName(GV);
-  }
-  if (!isCallOp && TM.getRelocationModel() == Reloc::PIC)
-O << "-\"L" << getFunctionNumber() << "$pb\"";
-} else
-  O << Mang->getValueName(MO.getGlobal());
+O << Mang->getValueName(MO.getGlobal());
 int Offset = MO.getOffset();
 if (Offset > 0)
   O << " + " << Offset;
@@ -170,13 +130,6 @@
   }
   case MachineOperand::MO_ExternalSymbol: {
 bool isCallOp = Modifier && !strcmp(Modifier, "call");
-if (isCallOp && forDarwin && TM.getRelocationModel() != Reloc::Static) {
-  std::string Name(GlobalPrefix);
-  Name += MO.getSymbolName();
-  FnStubs.insert(Name);
-  O << "L" << Name << "$stub";
-  return;
-}
 if (!isCallOp) O << "OFFSET ";
 O << GlobalPrefix << MO.getSymbolName();
 return;
@@ -330,20 +283,22 @@
 }
 
 bool X86IntelAsmPrinter::doInitialization(Module &M) {
-  X86SharedAsmPrinter::doInitialization(M);
-  CommentString = ";";
+  MLSections = true;
   GlobalPrefix = "_";
+  CommentString = ";";
+
+  X86SharedAsmPrinter::doInitialization(M);
+
   PrivateGlobalPrefix = "$";
   AlignDirective = "\talign\t";
-  MLSections = true;
   ZeroDirective = "\tdb\t";
   ZeroDirectiveSuffix = " dup(0)";
   AsciiDirective = "\tdb\t";
   AscizDirective = 0;
-  Data8bitsDirective = "\t.db\t";
-  Data16bitsDirective = "\t.dw\t";
-  Data32bitsDirective = "\t.dd\t";
-  D

[llvm-commits] CVS: llvm/lib/Target/TargetLowering.cpp

2006-05-06 Thread Chris Lattner


Changes in directory llvm/lib/Target:

TargetLowering.cpp updated: 1.57 -> 1.58
---
Log message:

Add some more simple sign bit propagation cases.


---
Diffs of the changes:  (+67 -27)

 TargetLowering.cpp |   94 +
 1 files changed, 67 insertions(+), 27 deletions(-)


Index: llvm/lib/Target/TargetLowering.cpp
diff -u llvm/lib/Target/TargetLowering.cpp:1.57 
llvm/lib/Target/TargetLowering.cpp:1.58
--- llvm/lib/Target/TargetLowering.cpp:1.57 Sat May  6 04:27:13 2006
+++ llvm/lib/Target/TargetLowering.cpp  Sat May  6 17:39:59 2006
@@ -1010,19 +1010,7 @@
 return 1;  // Limit search depth.
 
   switch (Op.getOpcode()) {
-  default: 
-// Allow the target to implement this method for its nodes.
-if (Op.getOpcode() >= ISD::BUILTIN_OP_END) {
-  case ISD::INTRINSIC_WO_CHAIN:
-  case ISD::INTRINSIC_W_CHAIN:
-  case ISD::INTRINSIC_VOID:
-  unsigned NumBits = ComputeNumSignBitsForTargetNode(Op, Depth);
-  if (NumBits > 1) return NumBits;
-}
-
-// FIXME: Should use computemaskedbits to look at the top bits.
-return 1;
-
+  default: break;
   case ISD::AssertSext:
 Tmp = MVT::getSizeInBits(cast(Op.getOperand(1))->getVT());
 return VTBits-Tmp+1;
@@ -1030,6 +1018,31 @@
 Tmp = MVT::getSizeInBits(cast(Op.getOperand(1))->getVT());
 return VTBits-Tmp;
 
+  case ISD::SEXTLOAD:// '17' bits known
+Tmp = MVT::getSizeInBits(cast(Op.getOperand(3))->getVT());
+return VTBits-Tmp+1;
+  case ISD::ZEXTLOAD:// '16' bits known
+Tmp = MVT::getSizeInBits(cast(Op.getOperand(3))->getVT());
+return VTBits-Tmp;
+
+  case ISD::Constant: {
+uint64_t Val = cast(Op)->getValue();
+// If negative, invert the bits, then look at it.
+if (Val & MVT::getIntVTSignBit(VT))
+  Val = ~Val;
+
+// Shift the bits so they are the leading bits in the int64_t.
+Val <<= 64-VTBits;
+
+// Return # leading zeros.  We use 'min' here in case Val was zero before
+// shifting.  We don't want to return '64' as for an i32 "0".
+return std::min(VTBits, CountLeadingZeros_64(Val));
+  }
+
+  case ISD::SIGN_EXTEND:
+Tmp = VTBits-MVT::getSizeInBits(Op.getOperand(0).getValueType());
+return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp;
+
   case ISD::SIGN_EXTEND_INREG:
 // Max of the input and what this extends.
 Tmp = MVT::getSizeInBits(cast(Op.getOperand(1))->getVT());
@@ -1046,31 +1059,58 @@
   if (Tmp > VTBits) Tmp = VTBits;
 }
 return Tmp;
-
+  case ISD::SHL:
+if (ConstantSDNode *C = dyn_cast(Op.getOperand(1))) {
+  // shl destroys sign bits.
+  Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
+  if (C->getValue() >= VTBits ||  // Bad shift.
+  C->getValue() >= Tmp) break;// Shifted all sign bits out.
+  return Tmp - C->getValue();
+}
+break;
   case ISD::ADD:
   case ISD::SUB:
 // Add and sub can have at most one carry bit.  Thus we know that the 
output
 // is, at worst, one more bit than the inputs.
 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
-if (Tmp == 1) return 1;
+if (Tmp == 1) return 1;  // Early out.
 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
 if (Tmp2 == 1) return 1;
 return std::min(Tmp, Tmp2)-1;
 
-  //case ISD::ZEXTLOAD:   // 16 bits known
-  //case ISD::SEXTLOAD:   // 17 bits known
-  //case ISD::Constant:
-  //case ISD::SIGN_EXTEND:
-  //
+  case ISD::AND:
+  case ISD::OR:
+  case ISD::XOR:// NOT is handled here.
+// Logical binary ops preserve the number of sign bits.
+Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
+if (Tmp == 1) return 1;  // Early out.
+Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
+return std::min(Tmp, Tmp2);
+
+  case ISD::SELECT:
+Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
+if (Tmp == 1) return 1;  // Early out.
+Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
+return std::min(Tmp, Tmp2);
+
+  case ISD::SETCC:
+// If setcc returns 0/-1, all bits are sign bits.
+if (getSetCCResultContents() == ZeroOrNegativeOneSetCCResult)
+  return VTBits;
+break;
   }
   
-#if 0
-  // fold (sext_in_reg (setcc x)) -> setcc x iff (setcc x) == 0 or -1
-  if (N0.getOpcode() == ISD::SETCC &&
-  TLI.getSetCCResultContents() == 
-  TargetLowering::ZeroOrNegativeOneSetCCResult)
-return N0;
-#endif
+  // Allow the target to implement this method for its nodes.
+  if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
+  Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 
+  Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
+  Op.getOpcode() == ISD::INTRINSIC_VOID) {
+unsigned NumBits = ComputeNumSignBitsForTargetNode(Op, Depth);
+if (NumBits > 1) return NumBits;
+  }
+  
+  // FIXME: Should use computemaskedbits to look at the top bits.
+  return 1;
 }
 
 



___
llvm-commits mailing list

[llvm-commits] CVS: llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp

2006-05-06 Thread Chris Lattner


Changes in directory llvm/lib/CodeGen/SelectionDAG:

DAGCombiner.cpp updated: 1.160 -> 1.161
---
Log message:

remove cases handled elsewhere


---
Diffs of the changes:  (+2 -16)

 DAGCombiner.cpp |   18 ++
 1 files changed, 2 insertions(+), 16 deletions(-)


Index: llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
diff -u llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp:1.160 
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp:1.161
--- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp:1.160 Sat May  6 04:30:03 2006
+++ llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp   Sat May  6 17:43:44 2006
@@ -1938,30 +1938,16 @@
 return DAG.getNode(ISD::SIGN_EXTEND, VT, Truncate);
   }
   
-  // If the input is already sign extended, just drop the extend.
+  // If the input is already sign extended, just drop the extension.
   if (TLI.ComputeNumSignBits(N0) >= MVT::getSizeInBits(VT)-EVTBits+1)
 return N0;
   
-  // fold (sext_in_reg (sext_in_reg x, VT2), VT1) -> (sext_in_reg x, minVT) pt1
-  if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 
-  cast(N0.getOperand(1))->getVT() <= EVT) {
-return N0;
-  }
   // fold (sext_in_reg (sext_in_reg x, VT2), VT1) -> (sext_in_reg x, minVT) pt2
   if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
   EVT < cast(N0.getOperand(1))->getVT()) {
 return DAG.getNode(ISD::SIGN_EXTEND_INREG, VT, N0.getOperand(0), N1);
   }
-  // fold (sext_in_reg (sextload x)) -> (sextload x)
-  if (N0.getOpcode() == ISD::SEXTLOAD && 
-  cast(N0.getOperand(3))->getVT() <= EVT) {
-return N0;
-  }
-  // fold (sext_in_reg (setcc x)) -> setcc x iff (setcc x) == 0 or -1
-  if (N0.getOpcode() == ISD::SETCC &&
-  TLI.getSetCCResultContents() == 
-TargetLowering::ZeroOrNegativeOneSetCCResult)
-return N0;
+  
   // fold (sext_in_reg x) -> (zext_in_reg x) if the sign bit is zero
   if (TLI.MaskedValueIsZero(N0, 1ULL << (EVTBits-1)))
 return DAG.getZeroExtendInReg(N0, EVT);



___
llvm-commits mailing list
llvm-commits@cs.uiuc.edu
http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits


[llvm-commits] CVS: llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp

2006-05-06 Thread Chris Lattner


Changes in directory llvm/lib/CodeGen/SelectionDAG:

SelectionDAG.cpp updated: 1.303 -> 1.304
---
Log message:

constant fold sign_extend_inreg


---
Diffs of the changes:  (+9 -1)

 SelectionDAG.cpp |   10 +-
 1 files changed, 9 insertions(+), 1 deletion(-)


Index: llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
diff -u llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp:1.303 
llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp:1.304
--- llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp:1.303Thu Apr 27 
00:01:07 2006
+++ llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp  Sat May  6 18:05:41 2006
@@ -1335,6 +1335,14 @@
   ConstantSDNode *N1C = dyn_cast(N1.Val);
   ConstantSDNode *N2C = dyn_cast(N2.Val);
   if (N1C) {
+if (Opcode == ISD::SIGN_EXTEND_INREG) {
+  int64_t Val = N1C->getValue();
+  unsigned FromBits = MVT::getSizeInBits(cast(N2)->getVT());
+  Val <<= 64-FromBits;
+  Val >>= 64-FromBits;
+  return getConstant(Val, VT);
+}
+
 if (N2C) {
   uint64_t C1 = N1C->getValue(), C2 = N2C->getValue();
   switch (Opcode) {
@@ -1441,7 +1449,7 @@
 }
   }
   
-  // Fold a bunch of operators that 
+  // Fold a bunch of operators when the RHS is undef. 
   if (N2.getOpcode() == ISD::UNDEF) {
 switch (Opcode) {
 case ISD::ADD:



___
llvm-commits mailing list
llvm-commits@cs.uiuc.edu
http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits


[llvm-commits] CVS: llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp

2006-05-06 Thread Chris Lattner


Changes in directory llvm/lib/CodeGen/SelectionDAG:

DAGCombiner.cpp updated: 1.161 -> 1.162
---
Log message:

Simplify some code, add a couple minor missed folds


---
Diffs of the changes:  (+16 -21)

 DAGCombiner.cpp |   37 -
 1 files changed, 16 insertions(+), 21 deletions(-)


Index: llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
diff -u llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp:1.161 
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp:1.162
--- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp:1.161 Sat May  6 17:43:44 2006
+++ llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp   Sat May  6 18:06:26 2006
@@ -1664,33 +1664,30 @@
 
 SDOperand DAGCombiner::visitCTLZ(SDNode *N) {
   SDOperand N0 = N->getOperand(0);
-  ConstantSDNode *N0C = dyn_cast(N0);
   MVT::ValueType VT = N->getValueType(0);
 
   // fold (ctlz c1) -> c2
-  if (N0C)
+  if (isa(N0))
 return DAG.getNode(ISD::CTLZ, VT, N0);
   return SDOperand();
 }
 
 SDOperand DAGCombiner::visitCTTZ(SDNode *N) {
   SDOperand N0 = N->getOperand(0);
-  ConstantSDNode *N0C = dyn_cast(N0);
   MVT::ValueType VT = N->getValueType(0);
   
   // fold (cttz c1) -> c2
-  if (N0C)
+  if (isa(N0))
 return DAG.getNode(ISD::CTTZ, VT, N0);
   return SDOperand();
 }
 
 SDOperand DAGCombiner::visitCTPOP(SDNode *N) {
   SDOperand N0 = N->getOperand(0);
-  ConstantSDNode *N0C = dyn_cast(N0);
   MVT::ValueType VT = N->getValueType(0);
   
   // fold (ctpop c1) -> c2
-  if (N0C)
+  if (isa(N0))
 return DAG.getNode(ISD::CTPOP, VT, N0);
   return SDOperand();
 }
@@ -1790,21 +1787,24 @@
 
 SDOperand DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
   SDOperand N0 = N->getOperand(0);
-  ConstantSDNode *N0C = dyn_cast(N0);
   MVT::ValueType VT = N->getValueType(0);
 
   // fold (sext c1) -> c1
-  if (N0C)
+  if (ConstantSDNode *N0C = dyn_cast(N0))
 return DAG.getNode(ISD::SIGN_EXTEND, VT, N0);
+  
   // fold (sext (sext x)) -> (sext x)
-  if (N0.getOpcode() == ISD::SIGN_EXTEND)
+  // fold (sext (aext x)) -> (sext x)
+  if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND)
 return DAG.getNode(ISD::SIGN_EXTEND, VT, N0.getOperand(0));
+  
   // fold (sext (truncate x)) -> (sextinreg x) iff x size == sext size.
   if (N0.getOpcode() == ISD::TRUNCATE && N0.getOperand(0).getValueType() == 
VT&&
   (!AfterLegalize || 
TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, N0.getValueType(
 return DAG.getNode(ISD::SIGN_EXTEND_INREG, VT, N0.getOperand(0),
DAG.getValueType(N0.getValueType()));
+  
   // fold (sext (load x)) -> (sext (truncate (sextload x)))
   if (N0.getOpcode() == ISD::LOAD && N0.hasOneUse() &&
   (!AfterLegalize||TLI.isOperationLegal(ISD::SEXTLOAD, 
N0.getValueType({
@@ -1835,14 +1835,14 @@
 
 SDOperand DAGCombiner::visitZERO_EXTEND(SDNode *N) {
   SDOperand N0 = N->getOperand(0);
-  ConstantSDNode *N0C = dyn_cast(N0);
   MVT::ValueType VT = N->getValueType(0);
 
   // fold (zext c1) -> c1
-  if (N0C)
+  if (ConstantSDNode *N0C = dyn_cast(N0))
 return DAG.getNode(ISD::ZERO_EXTEND, VT, N0);
   // fold (zext (zext x)) -> (zext x)
-  if (N0.getOpcode() == ISD::ZERO_EXTEND)
+  // fold (zext (aext x)) -> (zext x)
+  if (N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND)
 return DAG.getNode(ISD::ZERO_EXTEND, VT, N0.getOperand(0));
   // fold (zext (truncate x)) -> (zextinreg x) iff x size == zext size.
   if (N0.getOpcode() == ISD::TRUNCATE && N0.getOperand(0).getValueType() == 
VT&&
@@ -1877,11 +1877,10 @@
 
 SDOperand DAGCombiner::visitANY_EXTEND(SDNode *N) {
   SDOperand N0 = N->getOperand(0);
-  ConstantSDNode *N0C = dyn_cast(N0);
   MVT::ValueType VT = N->getValueType(0);
   
   // fold (aext c1) -> c1
-  if (N0C)
+  if (isa(N0))
 return DAG.getNode(ISD::ANY_EXTEND, VT, N0);
   // fold (aext (aext x)) -> (aext x)
   // fold (aext (zext x)) -> (zext x)
@@ -1927,16 +1926,13 @@
 SDOperand DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
   SDOperand N0 = N->getOperand(0);
   SDOperand N1 = N->getOperand(1);
-  ConstantSDNode *N0C = dyn_cast(N0);
   MVT::ValueType VT = N->getValueType(0);
   MVT::ValueType EVT = cast(N1)->getVT();
   unsigned EVTBits = MVT::getSizeInBits(EVT);
   
   // fold (sext_in_reg c1) -> c1
-  if (N0C) {
-SDOperand Truncate = DAG.getConstant(N0C->getValue(), EVT);
-return DAG.getNode(ISD::SIGN_EXTEND, VT, Truncate);
-  }
+  if (isa(N0))
+return DAG.getNode(ISD::SIGN_EXTEND_INREG, VT, N0, N1);
   
   // If the input is already sign extended, just drop the extension.
   if (TLI.ComputeNumSignBits(N0) >= MVT::getSizeInBits(VT)-EVTBits+1)
@@ -1978,14 +1974,13 @@
 
 SDOperand DAGCombiner::visitTRUNCATE(SDNode *N) {
   SDOperand N0 = N->getOperand(0);
-  ConstantSDNode *N0C = dyn_cast(N0);
   MVT::ValueType VT = N->getValueType(0);
 
   // noop truncate
   if (N0.getValueType() == N->getValueType(0))
 return N0;
   // fold (truncate c1) -> c1
-  if (N0C)
+  if (isa(N0))
 return DAG.getNode(ISD::

[llvm-commits] CVS: llvm/lib/Bytecode/Archive/Archive.cpp

2006-05-06 Thread Jeff Cohen


Changes in directory llvm/lib/Bytecode/Archive:

Archive.cpp updated: 1.10 -> 1.11
---
Log message:

Apply bug fix supplied by Greg Pettyjohn for a bug he found:  '' is 
not a legal path on Windows.

---
Diffs of the changes:  (+1 -1)

 Archive.cpp |2 +-
 1 files changed, 1 insertion(+), 1 deletion(-)


Index: llvm/lib/Bytecode/Archive/Archive.cpp
diff -u llvm/lib/Bytecode/Archive/Archive.cpp:1.10 
llvm/lib/Bytecode/Archive/Archive.cpp:1.11
--- llvm/lib/Bytecode/Archive/Archive.cpp:1.10  Tue Nov 29 23:21:10 2005
+++ llvm/lib/Bytecode/Archive/Archive.cpp   Sat May  6 18:25:53 2006
@@ -39,7 +39,7 @@
 // This default constructor is only use by the ilist when it creates its
 // sentry node. We give it specific static values to make it stand out a bit.
 ArchiveMember::ArchiveMember()
-  : next(0), prev(0), parent(0), path(""), flags(0), data(0)
+  : next(0), prev(0), parent(0), path("--invalid--"), flags(0), data(0)
 {
   info.user = sys::Process::GetCurrentUserId();
   info.group = sys::Process::GetCurrentGroupId();



___
llvm-commits mailing list
llvm-commits@cs.uiuc.edu
http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits


[llvm-commits] CVS: llvm/lib/Target/TargetLowering.cpp

2006-05-06 Thread Chris Lattner


Changes in directory llvm/lib/Target:

TargetLowering.cpp updated: 1.58 -> 1.59
---
Log message:

Add some more sign propagation cases


---
Diffs of the changes:  (+77 -10)

 TargetLowering.cpp |   87 ++---
 1 files changed, 77 insertions(+), 10 deletions(-)


Index: llvm/lib/Target/TargetLowering.cpp
diff -u llvm/lib/Target/TargetLowering.cpp:1.58 
llvm/lib/Target/TargetLowering.cpp:1.59
--- llvm/lib/Target/TargetLowering.cpp:1.58 Sat May  6 17:39:59 2006
+++ llvm/lib/Target/TargetLowering.cpp  Sat May  6 18:40:29 2006
@@ -1068,16 +1068,6 @@
   return Tmp - C->getValue();
 }
 break;
-  case ISD::ADD:
-  case ISD::SUB:
-// Add and sub can have at most one carry bit.  Thus we know that the 
output
-// is, at worst, one more bit than the inputs.
-Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
-if (Tmp == 1) return 1;  // Early out.
-Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
-if (Tmp2 == 1) return 1;
-return std::min(Tmp, Tmp2)-1;
-
   case ISD::AND:
   case ISD::OR:
   case ISD::XOR:// NOT is handled here.
@@ -1098,6 +1088,83 @@
 if (getSetCCResultContents() == ZeroOrNegativeOneSetCCResult)
   return VTBits;
 break;
+  case ISD::ROTL:
+  case ISD::ROTR:
+if (ConstantSDNode *C = dyn_cast(Op.getOperand(1))) {
+  unsigned RotAmt = C->getValue() & (VTBits-1);
+  
+  // Handle rotate right by N like a rotate left by 32-N.
+  if (Op.getOpcode() == ISD::ROTR)
+RotAmt = (VTBits-RotAmt) & (VTBits-1);
+
+  // If we aren't rotating out all of the known-in sign bits, return the
+  // number that are left.  This handles rotl(sext(x), 1) for example.
+  Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
+  if (Tmp > RotAmt+1) return Tmp-RotAmt;
+}
+break;
+  case ISD::ADD:
+// Add can have at most one carry bit.  Thus we know that the output
+// is, at worst, one more bit than the inputs.
+Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
+if (Tmp == 1) return 1;  // Early out.
+  
+// Special case decrementing a value (ADD X, -1):
+if (ConstantSDNode *CRHS = dyn_cast(Op.getOperand(0)))
+  if (CRHS->isAllOnesValue()) {
+uint64_t KnownZero, KnownOne;
+uint64_t Mask = MVT::getIntVTBitMask(VT);
+ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, 
Depth+1);
+
+// If the input is known to be 0 or 1, the output is 0/-1, which is all
+// sign bits set.
+if ((KnownZero|1) == Mask)
+  return VTBits;
+
+// If we are subtracting one from a positive number, there is no carry
+// out of the result.
+if (KnownZero & MVT::getIntVTSignBit(VT))
+  return Tmp;
+  }
+  
+Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
+if (Tmp2 == 1) return 1;
+  return std::min(Tmp, Tmp2)-1;
+break;
+
+  case ISD::SUB:
+Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
+if (Tmp2 == 1) return 1;
+  
+// Handle NEG.
+if (ConstantSDNode *CLHS = dyn_cast(Op.getOperand(0)))
+  if (CLHS->getValue() == 0) {
+uint64_t KnownZero, KnownOne;
+uint64_t Mask = MVT::getIntVTBitMask(VT);
+ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, 
Depth+1);
+// If the input is known to be 0 or 1, the output is 0/-1, which is all
+// sign bits set.
+if ((KnownZero|1) == Mask)
+  return VTBits;
+
+// If the input is known to be positive (the sign bit is known clear),
+// the output of the NEG has the same number of sign bits as the input.
+if (KnownZero & MVT::getIntVTSignBit(VT))
+  return Tmp2;
+
+// Otherwise, we treat this like a SUB.
+  }
+
+// Sub can have at most one carry bit.  Thus we know that the output
+// is, at worst, one more bit than the inputs.
+Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
+if (Tmp == 1) return 1;  // Early out.
+  return std::min(Tmp, Tmp2)-1;
+break;
+  case ISD::TRUNCATE:
+// FIXME: it's tricky to do anything useful for this, but it is an 
important
+// case for targets like X86.
+break;
   }
   
   // Allow the target to implement this method for its nodes.



___
llvm-commits mailing list
llvm-commits@cs.uiuc.edu
http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits


[llvm-commits] CVS: llvm/lib/Target/TargetLowering.cpp

2006-05-06 Thread Chris Lattner


Changes in directory llvm/lib/Target:

TargetLowering.cpp updated: 1.59 -> 1.60
---
Log message:

Use ComputeMaskedBits to determine # sign bits as a fallback.  This allows us 
to handle all kinds of stuff, including silly things like:
sextinreg(setcc,i16) -> setcc.


---
Diffs of the changes:  (+23 -2)

 TargetLowering.cpp |   25 +++--
 1 files changed, 23 insertions(+), 2 deletions(-)


Index: llvm/lib/Target/TargetLowering.cpp
diff -u llvm/lib/Target/TargetLowering.cpp:1.59 
llvm/lib/Target/TargetLowering.cpp:1.60
--- llvm/lib/Target/TargetLowering.cpp:1.59 Sat May  6 18:40:29 2006
+++ llvm/lib/Target/TargetLowering.cpp  Sat May  6 18:48:13 2006
@@ -1176,8 +1176,29 @@
 if (NumBits > 1) return NumBits;
   }
   
-  // FIXME: Should use computemaskedbits to look at the top bits.
-  return 1;
+  // Finally, if we can prove that the top bits of the result are 0's or 1's,
+  // use this information.
+  uint64_t KnownZero, KnownOne;
+  uint64_t Mask = MVT::getIntVTBitMask(VT);
+  ComputeMaskedBits(Op, Mask, KnownZero, KnownOne, Depth);
+  
+  uint64_t SignBit = MVT::getIntVTSignBit(VT);
+  if (KnownZero & SignBit) {// SignBit is 0
+Mask = KnownZero;
+  } else if (KnownOne & SignBit) {  // SignBit is 1;
+Mask = KnownOne;
+  } else {
+// Nothing known.
+return 1;
+  }
+  
+  // Okay, we know that the sign bit in Mask is set.  Use CLZ to determine
+  // the number of identical bits in the top of the input value.
+  Mask ^= ~0ULL;
+  Mask <<= 64-VTBits;
+  // Return # leading zeros.  We use 'min' here in case Val was zero before
+  // shifting.  We don't want to return '64' as for an i32 "0".
+  return std::min(VTBits, CountLeadingZeros_64(Mask));
 }
 
 



___
llvm-commits mailing list
llvm-commits@cs.uiuc.edu
http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits


[llvm-commits] CVS: llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp

2006-05-06 Thread Nate Begeman


Changes in directory llvm/lib/Target/PowerPC:

PPCISelDAGToDAG.cpp updated: 1.180 -> 1.181
---
Log message:

New rlwimi implementation, which is superior to the old one. There are
still a couple missed optimizations, but we now generate all the possible
rlwimis for multiple inserts into the same bitfield.  More regression tests
to come.


---
Diffs of the changes:  (+47 -88)

 PPCISelDAGToDAG.cpp |  135 ++--
 1 files changed, 47 insertions(+), 88 deletions(-)


Index: llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
diff -u llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp:1.180 
llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp:1.181
--- llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp:1.180   Sat Apr 22 13:53:45 2006
+++ llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp Sat May  6 19:23:38 2006
@@ -391,15 +391,7 @@
 
 /// SelectBitfieldInsert - turn an or of two masked values into
 /// the rotate left word immediate then mask insert (rlwimi) instruction.
-/// Returns true on success, false if the caller still needs to select OR.
-///
-/// Patterns matched:
-/// 1. or shl, and   5. or and, and
-/// 2. or and, shl   6. or shl, shr
-/// 3. or shr, and   7. or shr, shl
-/// 4. or and, shr
 SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) {
-  bool IsRotate = false;
   unsigned TgtMask = 0x, InsMask = 0x, SH = 0;
   unsigned Value;
   
@@ -409,90 +401,57 @@
   unsigned Op0Opc = Op0.getOpcode();
   unsigned Op1Opc = Op1.getOpcode();
   
-  // Verify that we have the correct opcodes
-  if (ISD::SHL != Op0Opc && ISD::SRL != Op0Opc && ISD::AND != Op0Opc)
-return false;
-  if (ISD::SHL != Op1Opc && ISD::SRL != Op1Opc && ISD::AND != Op1Opc)
-return false;
-  
-  // Generate Mask value for Target
-  if (isIntImmediate(Op0.getOperand(1), Value)) {
-switch(Op0Opc) {
-case ISD::SHL: TgtMask <<= Value; break;
-case ISD::SRL: TgtMask >>= Value; break;
-case ISD::AND: TgtMask &= Value; break;
+  uint64_t LKZ, LKO, RKZ, RKO;
+  TLI.ComputeMaskedBits(Op0, TgtMask, LKZ, LKO);
+  TLI.ComputeMaskedBits(Op1, TgtMask, RKZ, RKO);
+  
+  // FIXME: rotrwi / rotlwi
+  if ((LKZ | RKZ) == 0xULL) {
+unsigned PInsMask = ~RKZ;
+unsigned PTgtMask = ~LKZ;
+
+// If the LHS has a foldable shift, then swap it to the RHS so that we can
+// fold the shift into the insert.
+if (Op0Opc == ISD::AND && Op1Opc == ISD::AND) {
+  if (Op0.getOperand(0).getOpcode() == ISD::SHL ||
+  Op0.getOperand(0).getOpcode() == ISD::SRL) {
+if (Op1.getOperand(0).getOpcode() != ISD::SHL &&
+Op1.getOperand(0).getOpcode() != ISD::SRL) {
+  std::swap(Op0, Op1);
+  std::swap(Op0Opc, Op1Opc);
+  std::swap(PInsMask, PTgtMask);
+}
+  }
 }
-  } else {
-return 0;
-  }
-  
-  // Generate Mask value for Insert
-  if (!isIntImmediate(Op1.getOperand(1), Value))
-return 0;
-  
-  switch(Op1Opc) {
-  case ISD::SHL:
-SH = Value;
-InsMask <<= SH;
-if (Op0Opc == ISD::SRL) IsRotate = true;
-break;
-  case ISD::SRL:
-SH = Value;
-InsMask >>= SH;
-SH = 32-SH;
-if (Op0Opc == ISD::SHL) IsRotate = true;
-break;
-  case ISD::AND:
-InsMask &= Value;
-break;
-  }
-  
-  // If both of the inputs are ANDs and one of them has a logical shift by
-  // constant as its input, make that AND the inserted value so that we can
-  // combine the shift into the rotate part of the rlwimi instruction
-  bool IsAndWithShiftOp = false;
-  if (Op0Opc == ISD::AND && Op1Opc == ISD::AND) {
-if (Op1.getOperand(0).getOpcode() == ISD::SHL ||
-Op1.getOperand(0).getOpcode() == ISD::SRL) {
-  if (isIntImmediate(Op1.getOperand(0).getOperand(1), Value)) {
-SH = Op1.getOperand(0).getOpcode() == ISD::SHL ? Value : 32 - Value;
-IsAndWithShiftOp = true;
-  }
-} else if (Op0.getOperand(0).getOpcode() == ISD::SHL ||
-   Op0.getOperand(0).getOpcode() == ISD::SRL) {
-  if (isIntImmediate(Op0.getOperand(0).getOperand(1), Value)) {
-std::swap(Op0, Op1);
-std::swap(TgtMask, InsMask);
-SH = Op1.getOperand(0).getOpcode() == ISD::SHL ? Value : 32 - Value;
-IsAndWithShiftOp = true;
+
+unsigned MB, ME;
+if (isRunOfOnes(PInsMask, MB, ME)) {
+  SDOperand Tmp1, Tmp2, Tmp3;
+  bool DisjointMask = (PTgtMask ^ PInsMask) == 0x;
+
+  if ((Op1Opc == ISD::SHL || Op1Opc == ISD::SRL) &&
+  isIntImmediate(Op1.getOperand(1), Value)) {
+Op1 = Op1.getOperand(0);
+SH  = (Op1Opc == ISD::SHL) ? Value : 32 - Value;
+  }
+  if (Op1Opc == ISD::AND) {
+unsigned SHOpc = Op1.getOperand(0).getOpcode();
+if ((SHOpc == ISD::SHL || SHOpc == ISD::SRL) &&
+isIntImmediate(Op1.getOperand(0).getOperand(1), Value)) {
+  Op1 = Op1.getOperand(0).getOperand(0);
+  SH  = (SHOpc == ISD::SHL) ? Value : 32 - Value;
+} else {
+  Op1 = Op1.getOpe

[llvm-commits] CVS: llvm/lib/System/Win32/Path.inc

2006-05-06 Thread Jeff Cohen


Changes in directory llvm/lib/System/Win32:

Path.inc updated: 1.41 -> 1.42
---
Log message:

Unlike Unix, Windows won't let a file be implicitly replaced via renaming 
without explicit permission.

---
Diffs of the changes:  (+1 -1)

 Path.inc |2 +-
 1 files changed, 1 insertion(+), 1 deletion(-)


Index: llvm/lib/System/Win32/Path.inc
diff -u llvm/lib/System/Win32/Path.inc:1.41 llvm/lib/System/Win32/Path.inc:1.42
--- llvm/lib/System/Win32/Path.inc:1.41 Sat Apr 29 13:41:44 2006
+++ llvm/lib/System/Win32/Path.inc  Sat May  6 21:51:51 2006
@@ -682,7 +682,7 @@
 
 bool
 Path::renamePathOnDisk(const Path& newName) {
-  if (!MoveFile(path.c_str(), newName.c_str()))
+  if (!MoveFileEx(path.c_str(), newName.c_str(), MOVEFILE_REPLACE_EXISTING))
 ThrowError("Can't move '" + path +
"' to '" + newName.path + "': ");
   return true;



___
llvm-commits mailing list
llvm-commits@cs.uiuc.edu
http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits