Author: Guillaume Chatelet Date: 2023-01-11T16:07:48Z New Revision: 6916ebd026500061462917666a0e0d228ed52681
URL: https://github.com/llvm/llvm-project/commit/6916ebd026500061462917666a0e0d228ed52681 DIFF: https://github.com/llvm/llvm-project/commit/6916ebd026500061462917666a0e0d228ed52681.diff LOG: [clang][NFC] Use the TypeSize::getXXXValue() instead of TypeSize::getXXXSize) This change is one of a series to implement the discussion from https://reviews.llvm.org/D141134. Added: Modified: clang/lib/CodeGen/CGBuiltin.cpp clang/lib/CodeGen/CGCall.cpp clang/lib/CodeGen/CGExpr.cpp clang/lib/CodeGen/CGGPUBuiltin.cpp clang/lib/CodeGen/CGStmt.cpp clang/lib/CodeGen/CodeGenFunction.cpp clang/lib/CodeGen/TargetInfo.cpp Removed: ################################################################################ diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index a6994688d44e1..430b5f43cdd5a 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -6784,8 +6784,8 @@ static Value *EmitCommonNeonSISDBuiltinExpr( Value *Result = CGF.EmitNeonCall(F, Ops, s); llvm::Type *ResultType = CGF.ConvertType(E->getType()); - if (ResultType->getPrimitiveSizeInBits().getFixedSize() < - Result->getType()->getPrimitiveSizeInBits().getFixedSize()) + if (ResultType->getPrimitiveSizeInBits().getFixedValue() < + Result->getType()->getPrimitiveSizeInBits().getFixedValue()) return CGF.Builder.CreateExtractElement(Result, C0); return CGF.Builder.CreateBitCast(Result, ResultType, s); diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp index 06976ba5fb44a..41084956e1979 100644 --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -1261,7 +1261,7 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, - DstSize.getFixedSize(), CGF); + DstSize.getFixedValue(), CGF); SrcTy = Src.getElementType(); } @@ -1277,7 +1277,7 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, // If load is legal, just bitcast the src pointer. if (!SrcSize.isScalable() && !DstSize.isScalable() && - SrcSize.getFixedSize() >= DstSize.getFixedSize()) { + SrcSize.getFixedValue() >= DstSize.getFixedValue()) { // Generally SrcSize is never greater than DstSize, since this means we are // losing bits. However, this can happen in cases where the structure has // additional padding, for example due to a user specified alignment. @@ -1323,7 +1323,7 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, CGF.Builder.CreateMemCpy( Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(), Src.getAlignment().getAsAlign(), - llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinSize())); + llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinValue())); return CGF.Builder.CreateLoad(Tmp); } @@ -1366,7 +1366,7 @@ static void CreateCoercedStore(llvm::Value *Src, if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, - SrcSize.getFixedSize(), CGF); + SrcSize.getFixedValue(), CGF); DstTy = Dst.getElementType(); } @@ -1393,7 +1393,7 @@ static void CreateCoercedStore(llvm::Value *Src, // If store is legal, just bitcast the src pointer. if (isa<llvm::ScalableVectorType>(SrcTy) || isa<llvm::ScalableVectorType>(DstTy) || - SrcSize.getFixedSize() <= DstSize.getFixedSize()) { + SrcSize.getFixedValue() <= DstSize.getFixedValue()) { Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy); CGF.EmitAggregateStore(Src, Dst, DstIsVolatile); } else { @@ -1411,7 +1411,7 @@ static void CreateCoercedStore(llvm::Value *Src, CGF.Builder.CreateMemCpy( Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), - llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedSize())); + llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedValue())); } } @@ -4725,7 +4725,7 @@ class AllocAlignAttrEmitter final static unsigned getMaxVectorWidth(const llvm::Type *Ty) { if (auto *VT = dyn_cast<llvm::VectorType>(Ty)) - return VT->getPrimitiveSizeInBits().getKnownMinSize(); + return VT->getPrimitiveSizeInBits().getKnownMinValue(); if (auto *AT = dyn_cast<llvm::ArrayType>(Ty)) return getMaxVectorWidth(AT->getElementType()); diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp index afdee52aec629..6d5e729b1eea9 100644 --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -3089,7 +3089,7 @@ llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) { // Floating-point types which fit into intptr_t are bitcast to integers // and then passed directly (after zero-extension, if necessary). if (V->getType()->isFloatingPointTy()) { - unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedSize(); + unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue(); if (Bits <= TargetTy->getIntegerBitWidth()) V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(), Bits)); diff --git a/clang/lib/CodeGen/CGGPUBuiltin.cpp b/clang/lib/CodeGen/CGGPUBuiltin.cpp index fdd2fa18bb4a0..c39e0cc75f2d3 100644 --- a/clang/lib/CodeGen/CGGPUBuiltin.cpp +++ b/clang/lib/CodeGen/CGGPUBuiltin.cpp @@ -162,7 +162,7 @@ RValue EmitDevicePrintfCallExpr(const CallExpr *E, CodeGenFunction *CGF, // amdgpu llvm::Constant *Size = llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGM.getLLVMContext()), - static_cast<uint32_t>(r.second.getFixedSize())); + static_cast<uint32_t>(r.second.getFixedValue())); Vec.push_back(Size); } diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp index f0e1d97b2676d..28203494f2b19 100644 --- a/clang/lib/CodeGen/CGStmt.cpp +++ b/clang/lib/CodeGen/CGStmt.cpp @@ -2477,7 +2477,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back())) LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, - VT->getPrimitiveSizeInBits().getKnownMinSize()); + VT->getPrimitiveSizeInBits().getKnownMinValue()); } else { Address DestAddr = Dest.getAddress(*this); // Matrix types in memory are represented by arrays, but accessed through @@ -2516,7 +2516,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType())) LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, - VT->getPrimitiveSizeInBits().getKnownMinSize()); + VT->getPrimitiveSizeInBits().getKnownMinValue()); // Only tie earlyclobber physregs. if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber())) InOutConstraints += llvm::utostr(i); @@ -2606,7 +2606,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType())) LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, - VT->getPrimitiveSizeInBits().getKnownMinSize()); + VT->getPrimitiveSizeInBits().getKnownMinValue()); ArgTypes.push_back(Arg->getType()); ArgElemTypes.push_back(ArgElemType); diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp index f15c6bbc3c156..f8617f96459da 100644 --- a/clang/lib/CodeGen/CodeGenFunction.cpp +++ b/clang/lib/CodeGen/CodeGenFunction.cpp @@ -481,13 +481,13 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { if (auto *VT = dyn_cast<llvm::VectorType>(A.getType())) LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, - VT->getPrimitiveSizeInBits().getKnownMinSize()); + VT->getPrimitiveSizeInBits().getKnownMinValue()); // Update vector width based on return type. if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType())) LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, - VT->getPrimitiveSizeInBits().getKnownMinSize()); + VT->getPrimitiveSizeInBits().getKnownMinValue()); if (CurFnInfo->getMaxVectorWidth() > LargestVectorWidth) LargestVectorWidth = CurFnInfo->getMaxVectorWidth(); diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp index b871ea6dd2baf..e07ff8a4e4ee9 100644 --- a/clang/lib/CodeGen/TargetInfo.cpp +++ b/clang/lib/CodeGen/TargetInfo.cpp @@ -1078,7 +1078,7 @@ static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, .Cases("y", "&y", "^Ym", true) .Default(false); if (IsMMXCons && Ty->isVectorTy()) { - if (cast<llvm::VectorType>(Ty)->getPrimitiveSizeInBits().getFixedSize() != + if (cast<llvm::VectorType>(Ty)->getPrimitiveSizeInBits().getFixedValue() != 64) { // Invalid MMX constraint return nullptr; @@ -2417,7 +2417,7 @@ class X86_64ABIInfo : public ABIInfo { if (info.isDirect()) { llvm::Type *ty = info.getCoerceToType(); if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) - return vectorTy->getPrimitiveSizeInBits().getFixedSize() > 128; + return vectorTy->getPrimitiveSizeInBits().getFixedValue() > 128; } return false; } _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits