gchatelet updated this revision to Diff 487339.
gchatelet added a comment.
Herald added a project: clang.
Herald added a subscriber: cfe-commits.

- Fix missing clang change


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D141134/new/

https://reviews.llvm.org/D141134

Files:
  clang/lib/CodeGen/CGDecl.cpp
  llvm/include/llvm/Support/TypeSize.h
  llvm/lib/Analysis/ConstantFolding.cpp
  llvm/lib/Analysis/Loads.cpp
  llvm/lib/CodeGen/Analysis.cpp
  llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
  llvm/lib/CodeGen/StackProtector.cpp
  llvm/lib/IR/DataLayout.cpp
  llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
  llvm/lib/Target/AArch64/AArch64ISelLowering.h
  llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
  llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
  llvm/lib/Target/RISCV/RISCVISelLowering.cpp
  llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
  llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
  llvm/utils/TableGen/CodeGenDAGPatterns.cpp

Index: llvm/utils/TableGen/CodeGenDAGPatterns.cpp
===================================================================
--- llvm/utils/TableGen/CodeGenDAGPatterns.cpp
+++ llvm/utils/TableGen/CodeGenDAGPatterns.cpp
@@ -764,8 +764,8 @@
 namespace {
 struct TypeSizeComparator {
   bool operator()(const TypeSize &LHS, const TypeSize &RHS) const {
-    return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) <
-           std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue());
+    return std::make_tuple(LHS.isScalable(), LHS.getKnownMinSize()) <
+           std::make_tuple(RHS.isScalable(), RHS.getKnownMinSize());
   }
 };
 } // end anonymous namespace
Index: llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
===================================================================
--- llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -543,7 +543,7 @@
       if (!isAligned(I->getAlign(), Off))
         return false;
 
-      NeededDerefBytes = std::max(NeededDerefBytes, Off + Size.getFixedValue());
+      NeededDerefBytes = std::max(NeededDerefBytes, Off + Size.getFixedSize());
       NeededAlign = std::max(NeededAlign, I->getAlign());
     }
 
Index: llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
===================================================================
--- llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -990,7 +990,7 @@
 unsigned RISCVTTIImpl::getEstimatedVLFor(VectorType *Ty) {
   if (isa<ScalableVectorType>(Ty)) {
     const unsigned EltSize = DL.getTypeSizeInBits(Ty->getElementType());
-    const unsigned MinSize = DL.getTypeSizeInBits(Ty).getKnownMinValue();
+    const unsigned MinSize = DL.getTypeSizeInBits(Ty).getKnownMinSize();
     const unsigned VectorBits = *getVScaleForTuning() * RISCV::RVVBitsPerBlock;
     return RISCVTargetLowering::computeVLMAX(VectorBits, EltSize, MinSize);
   }
@@ -1455,7 +1455,7 @@
   TypeSize Size = DL.getTypeSizeInBits(Ty);
   if (Ty->isVectorTy()) {
     if (Size.isScalable() && ST->hasVInstructions())
-      return divideCeil(Size.getKnownMinValue(), RISCV::RVVBitsPerBlock);
+      return divideCeil(Size.getKnownMinSize(), RISCV::RVVBitsPerBlock);
 
     if (ST->useRVVForFixedLengthVectors())
       return divideCeil(Size, ST->getRealMinVLen());
Index: llvm/lib/Target/RISCV/RISCVISelLowering.cpp
===================================================================
--- llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -138,7 +138,7 @@
       if (VT.getVectorMinNumElements() < MinElts)
         return;
 
-      unsigned Size = VT.getSizeInBits().getKnownMinValue();
+      unsigned Size = VT.getSizeInBits().getKnownMinSize();
       const TargetRegisterClass *RC;
       if (Size <= RISCV::RVVBitsPerBlock)
         RC = &RISCV::VRRegClass;
@@ -1589,7 +1589,7 @@
 
 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
   assert(VT.isScalableVector() && "Expecting a scalable vector type");
-  unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
+  unsigned KnownSize = VT.getSizeInBits().getKnownMinSize();
   if (VT.getVectorElementType() == MVT::i1)
     KnownSize *= 8;
 
@@ -5443,7 +5443,7 @@
     // Optimize for constant AVL
     if (isa<ConstantSDNode>(AVL)) {
       unsigned EltSize = VT.getScalarSizeInBits();
-      unsigned MinSize = VT.getSizeInBits().getKnownMinValue();
+      unsigned MinSize = VT.getSizeInBits().getKnownMinSize();
 
       unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
       unsigned MaxVLMAX =
@@ -6419,7 +6419,7 @@
     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Op2);
   }
   unsigned EltSize = VecVT.getScalarSizeInBits();
-  unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
+  unsigned MinSize = VecVT.getSizeInBits().getKnownMinSize();
   unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
   unsigned MaxVLMAX =
     RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
Index: llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
===================================================================
--- llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
+++ llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
@@ -1816,9 +1816,9 @@
   auto *NcTy = const_cast<Type *>(Ty);
   switch (Kind) {
   case Store:
-    return DL.getTypeStoreSize(NcTy).getFixedValue();
+    return DL.getTypeStoreSize(NcTy).getFixedSize();
   case Alloc:
-    return DL.getTypeAllocSize(NcTy).getFixedValue();
+    return DL.getTypeAllocSize(NcTy).getFixedSize();
   }
   llvm_unreachable("Unhandled SizeKind enum");
 }
Index: llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
===================================================================
--- llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
+++ llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
@@ -650,7 +650,7 @@
         continue;
       }
       CandidateTy Candidate(GV, K.second.size(),
-                      DL.getTypeAllocSize(GV->getValueType()).getFixedValue());
+                      DL.getTypeAllocSize(GV->getValueType()).getFixedSize());
       if (MostUsed < Candidate)
         MostUsed = Candidate;
     }
Index: llvm/lib/Target/AArch64/AArch64ISelLowering.h
===================================================================
--- llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -791,7 +791,7 @@
 
     TypeSize TS = VT.getSizeInBits();
     // TODO: We should be able to use bic/bif too for SVE.
-    return !TS.isScalable() && TS.getFixedValue() >= 64; // vector 'bic'
+    return !TS.isScalable() && TS.getFixedSize() >= 64; // vector 'bic'
   }
 
   bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
===================================================================
--- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -10502,7 +10502,7 @@
     unsigned NumSrcElts = VTSize / EltVT.getFixedSizeInBits();
     EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts);
 
-    if (SrcVTSize.getFixedValue() < VTSize) {
+    if (SrcVTSize.getFixedSize() < VTSize) {
       assert(2 * SrcVTSize == VTSize);
       // We can pad out the smaller vector for free, so if it's part of a
       // shuffle...
@@ -10512,7 +10512,7 @@
       continue;
     }
 
-    if (SrcVTSize.getFixedValue() != 2 * VTSize) {
+    if (SrcVTSize.getFixedSize() != 2 * VTSize) {
       LLVM_DEBUG(
           dbgs() << "Reshuffle failed: result vector too small to extract\n");
       return SDValue();
Index: llvm/lib/IR/DataLayout.cpp
===================================================================
--- llvm/lib/IR/DataLayout.cpp
+++ llvm/lib/IR/DataLayout.cpp
@@ -67,7 +67,7 @@
 
     getMemberOffsets()[i] = StructSize;
     // Consume space for this data item
-    StructSize += DL.getTypeAllocSize(Ty).getFixedValue();
+    StructSize += DL.getTypeAllocSize(Ty).getFixedSize();
   }
 
   // Add padding to the end of the struct so that it could be put in an array
Index: llvm/lib/CodeGen/StackProtector.cpp
===================================================================
--- llvm/lib/CodeGen/StackProtector.cpp
+++ llvm/lib/CodeGen/StackProtector.cpp
@@ -218,7 +218,7 @@
       // We can't subtract a fixed size from a scalable one, so in that case
       // assume the scalable value is of minimum size.
       TypeSize NewAllocSize =
-          TypeSize::Fixed(AllocSize.getKnownMinValue()) - OffsetSize;
+          TypeSize::Fixed(AllocSize.getKnownMinSize()) - OffsetSize;
       if (HasAddressTaken(I, NewAllocSize))
         return true;
       break;
Index: llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
===================================================================
--- llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -4062,11 +4062,11 @@
     AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
                             DAG.getVScale(dl, IntPtr,
                                           APInt(IntPtr.getScalarSizeInBits(),
-                                                TySize.getKnownMinValue())));
+                                                TySize.getKnownMinSize())));
   else
     AllocSize =
         DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
-                    DAG.getConstant(TySize.getFixedValue(), dl, IntPtr));
+                    DAG.getConstant(TySize.getFixedSize(), dl, IntPtr));
 
   // Handle alignment.  If the requested alignment is less than or equal to
   // the stack alignment, ignore it.  If the size is greater than or equal to
Index: llvm/lib/CodeGen/Analysis.cpp
===================================================================
--- llvm/lib/CodeGen/Analysis.cpp
+++ llvm/lib/CodeGen/Analysis.cpp
@@ -101,7 +101,7 @@
   // Given an array type, recursively traverse the elements.
   if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
     Type *EltTy = ATy->getElementType();
-    uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue();
+    uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedSize();
     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
       ComputeValueVTs(TLI, DL, EltTy, ValueVTs, MemVTs, Offsets,
                       StartingOffset + i * EltSize);
@@ -146,7 +146,7 @@
   // Given an array type, recursively traverse the elements.
   if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
     Type *EltTy = ATy->getElementType();
-    uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue();
+    uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedSize();
     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
       computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
                        StartingOffset + i * EltSize);
Index: llvm/lib/Analysis/Loads.cpp
===================================================================
--- llvm/lib/Analysis/Loads.cpp
+++ llvm/lib/Analysis/Loads.cpp
@@ -408,7 +408,7 @@
   TypeSize TySize = DL.getTypeStoreSize(Ty);
   if (TySize.isScalable())
     return false;
-  APInt Size(DL.getIndexTypeSizeInBits(V->getType()), TySize.getFixedValue());
+  APInt Size(DL.getIndexTypeSizeInBits(V->getType()), TySize.getFixedSize());
   return isSafeToLoadUnconditionally(V, Alignment, Size, DL, ScanFrom, AC, DT,
                                      TLI);
 }
Index: llvm/lib/Analysis/ConstantFolding.cpp
===================================================================
--- llvm/lib/Analysis/ConstantFolding.cpp
+++ llvm/lib/Analysis/ConstantFolding.cpp
@@ -599,7 +599,7 @@
     return nullptr;
 
   // If we're not accessing anything in this constant, the result is undefined.
-  if (Offset >= (int64_t)InitializerSize.getFixedValue())
+  if (Offset >= (int64_t)InitializerSize.getFixedSize())
     return PoisonValue::get(IntType);
 
   unsigned char RawBytes[32] = {0};
Index: llvm/include/llvm/Support/TypeSize.h
===================================================================
--- llvm/include/llvm/Support/TypeSize.h
+++ llvm/include/llvm/Support/TypeSize.h
@@ -311,9 +311,16 @@
 // the exact size. If the type is a scalable vector, it will represent the known
 // minimum size.
 class TypeSize : public details::FixedOrScalableQuantity<TypeSize, uint64_t> {
+  using UP = details::FixedOrScalableQuantity<TypeSize, uint64_t>;
+
   TypeSize(const FixedOrScalableQuantity<TypeSize, uint64_t> &V)
       : FixedOrScalableQuantity(V) {}
 
+  // Make 'getFixedValue' private, it is exposed as 'getFixedSize' below.
+  using UP::getFixedValue;
+  // Make 'getKnownMinValue' private, it is exposed as 'getKnownMinSize' below.
+  using UP::getKnownMinValue;
+
 public:
   constexpr TypeSize(ScalarTy Quantity, bool Scalable)
       : FixedOrScalableQuantity(Quantity, Scalable) {}
@@ -399,7 +406,7 @@
 /// Similar to the alignTo functions in MathExtras.h
 inline constexpr TypeSize alignTo(TypeSize Size, uint64_t Align) {
   assert(Align != 0u && "Align must be non-zero");
-  return {(Size.getKnownMinValue() + Align - 1) / Align * Align,
+  return {(Size.getKnownMinSize() + Align - 1) / Align * Align,
           Size.isScalable()};
 }
 
Index: clang/lib/CodeGen/CGDecl.cpp
===================================================================
--- clang/lib/CodeGen/CGDecl.cpp
+++ clang/lib/CodeGen/CGDecl.cpp
@@ -1341,7 +1341,7 @@
              CGM.getDataLayout().getAllocaAddrSpace() &&
          "Pointer should be in alloca address space");
   llvm::Value *SizeV = llvm::ConstantInt::get(
-      Int64Ty, Size.isScalable() ? -1 : Size.getFixedValue());
+      Int64Ty, Size.isScalable() ? -1 : Size.getFixedSize());
   Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
   llvm::CallInst *C =
       Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {SizeV, Addr});
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
  • [PATCH] D141134: [NFC] ... Guillaume Chatelet via Phabricator via cfe-commits

Reply via email to