foad created this revision. foad added reviewers: lattner, RKSimon, lebedev.ri, spatel. Herald added subscribers: kosarev, jsilvanus, hsmhsm, jeroen.dobbelaere, frasercrmck, ecnelises, martong, kerbowa, luismarques, apazos, sameer.abuasal, pengfei, s.egerton, Jim, jocewei, PkmX, the_o, brucehoult, MartinMosbeck, rogfer01, edward-jones, zzheng, jrtc27, niosHD, sabuasal, simoncook, johnrusso, rbar, asb, hiraditya, arichardson, nhaehnle, jvesely, arsenm. Herald added a reviewer: bollu. Herald added a project: All. foad requested review of this revision. Herald added subscribers: llvm-commits, cfe-commits, pcwang-thead, MaskRay. Herald added projects: clang, LLVM.
Most clients only used these methods because they wanted to be able to extend or truncate to the same bit width (which is a no-op). Now that the standard zext, sext and trunc allow this, there is no reason to use the OrSelf versions. The OrSelf versions additionally have the strange behaviour of allowing extending to a *smaller* width, or truncating to a *larger* width, which are also treated as no-ops. A small amount of client code relied on this (ConstantRange::castOp and MicrosoftCXXNameMangler::mangleNumber) and needed rewriting. Repository: rG LLVM Github Monorepo https://reviews.llvm.org/D125557 Files: clang/lib/AST/ExprConstant.cpp clang/lib/AST/MicrosoftMangle.cpp clang/lib/CodeGen/CGBuiltin.cpp clang/lib/Sema/SemaDecl.cpp clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp llvm/lib/Analysis/BasicAliasAnalysis.cpp llvm/lib/Analysis/ConstantFolding.cpp llvm/lib/Analysis/LazyValueInfo.cpp llvm/lib/Analysis/MemoryBuiltins.cpp llvm/lib/Analysis/ScalarEvolution.cpp llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp llvm/lib/IR/ConstantRange.cpp llvm/lib/Support/APFixedPoint.cpp llvm/lib/Support/APInt.cpp llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp llvm/lib/Target/AArch64/AArch64ISelLowering.cpp llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp llvm/lib/Target/RISCV/RISCVISelLowering.cpp llvm/lib/Target/X86/X86ISelLowering.cpp llvm/lib/Target/X86/X86TargetTransformInfo.cpp llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp llvm/test/TableGen/VarLenEncoder.td llvm/utils/TableGen/VarLenCodeEmitterGen.cpp polly/lib/CodeGen/IslExprBuilder.cpp
Index: polly/lib/CodeGen/IslExprBuilder.cpp =================================================================== --- polly/lib/CodeGen/IslExprBuilder.cpp +++ polly/lib/CodeGen/IslExprBuilder.cpp @@ -765,7 +765,7 @@ else T = Builder.getIntNTy(BitWidth); - APValue = APValue.sextOrSelf(T->getBitWidth()); + APValue = APValue.sext(T->getBitWidth()); V = ConstantInt::get(T, APValue); isl_ast_expr_free(Expr); Index: llvm/utils/TableGen/VarLenCodeEmitterGen.cpp =================================================================== --- llvm/utils/TableGen/VarLenCodeEmitterGen.cpp +++ llvm/utils/TableGen/VarLenCodeEmitterGen.cpp @@ -424,7 +424,7 @@ raw_string_ostream SS(Case); // Resize the scratch buffer. if (BitWidth && !VLI.isFixedValueOnly()) - SS.indent(6) << "Scratch = Scratch.zextOrSelf(" << BitWidth << ");\n"; + SS.indent(6) << "Scratch = Scratch.zext(" << BitWidth << ");\n"; // Populate based value. SS.indent(6) << "Inst = getInstBits(opcode);\n"; Index: llvm/test/TableGen/VarLenEncoder.td =================================================================== --- llvm/test/TableGen/VarLenEncoder.td +++ llvm/test/TableGen/VarLenEncoder.td @@ -65,7 +65,7 @@ // CHECK: UINT64_C(46848), // FOO32 // CHECK-LABEL: case ::FOO16: { -// CHECK: Scratch = Scratch.zextOrSelf(41); +// CHECK: Scratch = Scratch.zext(41); // src.reg // CHECK: getMachineOpValue(MI, MI.getOperand(1), /*Pos=*/0, Scratch, Fixups, STI); // CHECK: Inst.insertBits(Scratch.extractBits(8, 0), 0); @@ -83,7 +83,7 @@ // CHECK: Inst.insertBits(Scratch.extractBits(2, 0), 39); // CHECK-LABEL: case ::FOO32: { -// CHECK: Scratch = Scratch.zextOrSelf(57); +// CHECK: Scratch = Scratch.zext(57); // src.reg // CHECK: getMachineOpValue(MI, MI.getOperand(1), /*Pos=*/0, Scratch, Fixups, STI); // CHECK: Inst.insertBits(Scratch.extractBits(8, 0), 0); Index: llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp =================================================================== --- llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp +++ llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp @@ -496,7 +496,7 @@ if (PtrDelta.urem(Stride) != 0) return false; unsigned IdxBitWidth = OpA->getType()->getScalarSizeInBits(); - APInt IdxDiff = PtrDelta.udiv(Stride).zextOrSelf(IdxBitWidth); + APInt IdxDiff = PtrDelta.udiv(Stride).zext(IdxBitWidth); // Only look through a ZExt/SExt. if (!isa<SExtInst>(OpA) && !isa<ZExtInst>(OpA)) Index: llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp =================================================================== --- llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp +++ llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp @@ -741,8 +741,7 @@ // sdiv/srem is UB if divisor is -1 and divident is INT_MIN, so unless we can // prove that such a combination is impossible, we need to bump the bitwidth. if (CRs[1]->contains(APInt::getAllOnes(OrigWidth)) && - CRs[0]->contains( - APInt::getSignedMinValue(MinSignedBits).sextOrSelf(OrigWidth))) + CRs[0]->contains(APInt::getSignedMinValue(MinSignedBits).sext(OrigWidth))) ++MinSignedBits; // Don't shrink below 8 bits wide. Index: llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp =================================================================== --- llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -772,7 +772,7 @@ uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType()); // Make sure that, even if the multiplication below would wrap as an // uint64_t, we still do the right thing. - if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize)) + if ((CS->getValue().zext(128) * APInt(128, TypeSize)).ugt(MaxSize)) return false; continue; } Index: llvm/lib/Target/X86/X86TargetTransformInfo.cpp =================================================================== --- llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ llvm/lib/Target/X86/X86TargetTransformInfo.cpp @@ -3813,7 +3813,7 @@ assert(CostValue >= 0 && "Negative cost!"); unsigned Num128Lanes = SizeInBits / 128 * CostValue; unsigned NumElts = LT.second.getVectorNumElements() * CostValue; - APInt WidenedDemandedElts = DemandedElts.zextOrSelf(NumElts); + APInt WidenedDemandedElts = DemandedElts.zext(NumElts); unsigned Scale = NumElts / Num128Lanes; // We iterate each 128-lane, and check if we need a // extracti128/inserti128 for this 128-lane. @@ -3973,8 +3973,7 @@ // if all elements that will form a single Dst vector aren't demanded, // then we won't need to do that shuffle, so adjust the cost accordingly. APInt DemandedDstVectors = APIntOps::ScaleBitMask( - DemandedDstElts.zextOrSelf(NumDstVectors * NumEltsPerDstVec), - NumDstVectors); + DemandedDstElts.zext(NumDstVectors * NumEltsPerDstVec), NumDstVectors); unsigned NumDstVectorsDemanded = DemandedDstVectors.countPopulation(); InstructionCost SingleShuffleCost = Index: llvm/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/lib/Target/X86/X86ISelLowering.cpp +++ llvm/lib/Target/X86/X86ISelLowering.cpp @@ -22491,11 +22491,11 @@ // floating-point values. APInt MinInt, MaxInt; if (IsSigned) { - MinInt = APInt::getSignedMinValue(SatWidth).sextOrSelf(DstWidth); - MaxInt = APInt::getSignedMaxValue(SatWidth).sextOrSelf(DstWidth); + MinInt = APInt::getSignedMinValue(SatWidth).sext(DstWidth); + MaxInt = APInt::getSignedMaxValue(SatWidth).sext(DstWidth); } else { - MinInt = APInt::getMinValue(SatWidth).zextOrSelf(DstWidth); - MaxInt = APInt::getMaxValue(SatWidth).zextOrSelf(DstWidth); + MinInt = APInt::getMinValue(SatWidth).zext(DstWidth); + MaxInt = APInt::getMaxValue(SatWidth).zext(DstWidth); } APFloat MinFloat(DAG.EVTToAPFloatSemantics(SrcVT)); @@ -41438,7 +41438,7 @@ TLO, Depth + 1)) return true; - Known.Zero = KnownZero.zextOrSelf(BitWidth); + Known.Zero = KnownZero.zext(BitWidth); Known.Zero.setHighBits(BitWidth - NumElts); // MOVMSK only uses the MSB from each vector element. @@ -43383,8 +43383,8 @@ uint64_t Idx = CIdx->getZExtValue(); if (UndefVecElts[Idx]) return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT); - return DAG.getConstant(EltBits[Idx].zextOrSelf(VT.getScalarSizeInBits()), - dl, VT); + return DAG.getConstant(EltBits[Idx].zext(VT.getScalarSizeInBits()), dl, + VT); } } Index: llvm/lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -8626,7 +8626,7 @@ break; SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0)); unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16; - APInt SignBit = APInt::getSignMask(FPBits).sextOrSelf(VT.getSizeInBits()); + APInt SignBit = APInt::getSignMask(FPBits).sext(VT.getSizeInBits()); if (Op0.getOpcode() == ISD::FNEG) return DAG.getNode(ISD::XOR, DL, VT, NewFMV, DAG.getConstant(SignBit, DL, VT)); Index: llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp =================================================================== --- llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp +++ llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp @@ -1217,8 +1217,8 @@ unsigned W2 = A2.getBitWidth(); unsigned MaxW = (W1 >= W2) ? W1 : W2; if (Cmp & Comparison::U) { - const APInt Zx1 = A1.zextOrSelf(MaxW); - const APInt Zx2 = A2.zextOrSelf(MaxW); + const APInt Zx1 = A1.zext(MaxW); + const APInt Zx2 = A2.zext(MaxW); if (Cmp & Comparison::L) Result = Zx1.ult(Zx2); else if (Cmp & Comparison::G) @@ -1227,8 +1227,8 @@ } // Signed comparison. - const APInt Sx1 = A1.sextOrSelf(MaxW); - const APInt Sx2 = A2.sextOrSelf(MaxW); + const APInt Sx1 = A1.sext(MaxW); + const APInt Sx2 = A2.sext(MaxW); if (Cmp & Comparison::L) Result = Sx1.slt(Sx2); else if (Cmp & Comparison::G) @@ -1813,7 +1813,7 @@ unsigned Count, APInt &Result) { assert(Count > 0); unsigned BW = A1.getBitWidth(), SW = Count*Bits; - APInt LoBits = (Bits < BW) ? A1.trunc(Bits) : A1.zextOrSelf(Bits); + APInt LoBits = (Bits < BW) ? A1.trunc(Bits) : A1.zext(Bits); if (Count > 1) LoBits = LoBits.zext(SW); @@ -2538,9 +2538,9 @@ } for (unsigned i = 0; i < HiVs.size(); ++i) { - APInt HV = HiVs[i].zextOrSelf(64) << 32; + APInt HV = HiVs[i].zext(64) << 32; for (unsigned j = 0; j < LoVs.size(); ++j) { - APInt LV = LoVs[j].zextOrSelf(64); + APInt LV = LoVs[j].zext(64); const Constant *C = intToConst(HV | LV); Result.add(C); if (Result.isBottom()) Index: llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp +++ llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp @@ -2502,7 +2502,7 @@ // Try to avoid emitting a bit operation when we only need to touch half of // the 64-bit pointer. - APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zextOrSelf(64); + APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zext(64); const APInt MaskHi32 = APInt::getHighBitsSet(64, 32); const APInt MaskLo32 = APInt::getLowBitsSet(64, 32); Index: llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp =================================================================== --- llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp +++ llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp @@ -135,7 +135,7 @@ if (!Const) return false; - const APInt ConstValue = Const->Value.sextOrSelf(Ty.getSizeInBits()); + const APInt ConstValue = Const->Value.sext(Ty.getSizeInBits()); // The following code is ported from AArch64ISelLowering. // Multiplication of a power of two plus/minus one can be done more // cheaply as as shift+add/sub. For now, this is true unilaterally. If Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -3593,17 +3593,14 @@ SDValue Sat; if (Op.getOpcode() == ISD::FP_TO_SINT_SAT) { SDValue MinC = DAG.getConstant( - APInt::getSignedMaxValue(SatWidth).sextOrSelf(SrcElementWidth), DL, - IntVT); + APInt::getSignedMaxValue(SatWidth).sext(SrcElementWidth), DL, IntVT); SDValue Min = DAG.getNode(ISD::SMIN, DL, IntVT, NativeCvt, MinC); SDValue MaxC = DAG.getConstant( - APInt::getSignedMinValue(SatWidth).sextOrSelf(SrcElementWidth), DL, - IntVT); + APInt::getSignedMinValue(SatWidth).sext(SrcElementWidth), DL, IntVT); Sat = DAG.getNode(ISD::SMAX, DL, IntVT, Min, MaxC); } else { SDValue MinC = DAG.getConstant( - APInt::getAllOnesValue(SatWidth).zextOrSelf(SrcElementWidth), DL, - IntVT); + APInt::getAllOnesValue(SatWidth).zext(SrcElementWidth), DL, IntVT); Sat = DAG.getNode(ISD::UMIN, DL, IntVT, NativeCvt, MinC); } @@ -3652,14 +3649,14 @@ SDValue Sat; if (Op.getOpcode() == ISD::FP_TO_SINT_SAT) { SDValue MinC = DAG.getConstant( - APInt::getSignedMaxValue(SatWidth).sextOrSelf(DstWidth), DL, DstVT); + APInt::getSignedMaxValue(SatWidth).sext(DstWidth), DL, DstVT); SDValue Min = DAG.getNode(ISD::SMIN, DL, DstVT, NativeCvt, MinC); SDValue MaxC = DAG.getConstant( - APInt::getSignedMinValue(SatWidth).sextOrSelf(DstWidth), DL, DstVT); + APInt::getSignedMinValue(SatWidth).sext(DstWidth), DL, DstVT); Sat = DAG.getNode(ISD::SMAX, DL, DstVT, Min, MaxC); } else { SDValue MinC = DAG.getConstant( - APInt::getAllOnesValue(SatWidth).zextOrSelf(DstWidth), DL, DstVT); + APInt::getAllOnesValue(SatWidth).zext(DstWidth), DL, DstVT); Sat = DAG.getNode(ISD::UMIN, DL, DstVT, NativeCvt, MinC); } @@ -12040,8 +12037,8 @@ SDLoc DL(Op); APInt MulImm = cast<ConstantSDNode>(Op.getOperand(0))->getAPIntValue(); - return DAG.getZExtOrTrunc(DAG.getVScale(DL, MVT::i64, MulImm.sextOrSelf(64)), - DL, VT); + return DAG.getZExtOrTrunc(DAG.getVScale(DL, MVT::i64, MulImm.sext(64)), DL, + VT); } /// Set the IntrinsicInfo for the `aarch64_sve_st<N>` intrinsics. Index: llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -3148,7 +3148,7 @@ SDLoc DL(N); uint64_t Val = cast<ConstantSDNode>(N) ->getAPIntValue() - .truncOrSelf(VT.getFixedSizeInBits()) + .trunc(VT.getFixedSizeInBits()) .getZExtValue(); switch (VT.SimpleTy) { @@ -3188,7 +3188,7 @@ SDLoc DL(N); int64_t Val = cast<ConstantSDNode>(N) ->getAPIntValue() - .truncOrSelf(VT.getFixedSizeInBits()) + .trunc(VT.getFixedSizeInBits()) .getSExtValue(); switch (VT.SimpleTy) { Index: llvm/lib/Support/APInt.cpp =================================================================== --- llvm/lib/Support/APInt.cpp +++ llvm/lib/Support/APInt.cpp @@ -343,7 +343,7 @@ /// In the slow case, we know the result is large. APInt APInt::concatSlowCase(const APInt &NewLSB) const { unsigned NewWidth = getBitWidth() + NewLSB.getBitWidth(); - APInt Result = NewLSB.zextOrSelf(NewWidth); + APInt Result = NewLSB.zext(NewWidth); Result.insertBits(*this, NewLSB.getBitWidth()); return Result; } @@ -612,7 +612,7 @@ APInt APInt::getSplat(unsigned NewLen, const APInt &V) { assert(NewLen >= V.getBitWidth() && "Can't splat to smaller bit width!"); - APInt Val = V.zextOrSelf(NewLen); + APInt Val = V.zext(NewLen); for (unsigned I = V.getBitWidth(); I < NewLen; I <<= 1) Val |= Val << I; Index: llvm/lib/Support/APFixedPoint.cpp =================================================================== --- llvm/lib/Support/APFixedPoint.cpp +++ llvm/lib/Support/APFixedPoint.cpp @@ -233,11 +233,11 @@ // Widen the LHS and RHS so we can perform a full multiplication. unsigned Wide = CommonFXSema.getWidth() * 2; if (CommonFXSema.isSigned()) { - ThisVal = ThisVal.sextOrSelf(Wide); - OtherVal = OtherVal.sextOrSelf(Wide); + ThisVal = ThisVal.sext(Wide); + OtherVal = OtherVal.sext(Wide); } else { - ThisVal = ThisVal.zextOrSelf(Wide); - OtherVal = OtherVal.zextOrSelf(Wide); + ThisVal = ThisVal.zext(Wide); + OtherVal = OtherVal.zext(Wide); } // Perform the full multiplication and downscale to get the same scale. @@ -290,11 +290,11 @@ // Widen the LHS and RHS so we can perform a full division. unsigned Wide = CommonFXSema.getWidth() * 2; if (CommonFXSema.isSigned()) { - ThisVal = ThisVal.sextOrSelf(Wide); - OtherVal = OtherVal.sextOrSelf(Wide); + ThisVal = ThisVal.sext(Wide); + OtherVal = OtherVal.sext(Wide); } else { - ThisVal = ThisVal.zextOrSelf(Wide); - OtherVal = OtherVal.zextOrSelf(Wide); + ThisVal = ThisVal.zext(Wide); + OtherVal = OtherVal.zext(Wide); } // Upscale to compensate for the loss of precision from division, and @@ -340,9 +340,9 @@ // Widen the LHS. unsigned Wide = Sema.getWidth() * 2; if (Sema.isSigned()) - ThisVal = ThisVal.sextOrSelf(Wide); + ThisVal = ThisVal.sext(Wide); else - ThisVal = ThisVal.zextOrSelf(Wide); + ThisVal = ThisVal.zext(Wide); // Clamp the shift amount at the original width, and perform the shift. Amt = std::min(Amt, ThisVal.getBitWidth()); Index: llvm/lib/IR/ConstantRange.cpp =================================================================== --- llvm/lib/IR/ConstantRange.cpp +++ llvm/lib/IR/ConstantRange.cpp @@ -721,15 +721,23 @@ case Instruction::UIToFP: { // TODO: use input range if available auto BW = getBitWidth(); - APInt Min = APInt::getMinValue(BW).zextOrSelf(ResultBitWidth); - APInt Max = APInt::getMaxValue(BW).zextOrSelf(ResultBitWidth); + APInt Min = APInt::getMinValue(BW); + APInt Max = APInt::getMaxValue(BW); + if (ResultBitWidth > BW) { + Min = Min.zext(ResultBitWidth); + Max = Max.zext(ResultBitWidth); + } return ConstantRange(std::move(Min), std::move(Max)); } case Instruction::SIToFP: { // TODO: use input range if available auto BW = getBitWidth(); - APInt SMin = APInt::getSignedMinValue(BW).sextOrSelf(ResultBitWidth); - APInt SMax = APInt::getSignedMaxValue(BW).sextOrSelf(ResultBitWidth); + APInt SMin = APInt::getSignedMinValue(BW); + APInt SMax = APInt::getSignedMaxValue(BW); + if (ResultBitWidth > BW) { + SMin = SMin.sext(ResultBitWidth); + SMax = SMax.sext(ResultBitWidth); + } return ConstantRange(std::move(SMin), std::move(SMax)); } case Instruction::FPTrunc: Index: llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -1118,7 +1118,7 @@ KnownBits SrcKnown; SDValue Src = Op.getOperand(0); unsigned SrcBitWidth = Src.getScalarValueSizeInBits(); - APInt SrcDemandedBits = DemandedBits.zextOrSelf(SrcBitWidth); + APInt SrcDemandedBits = DemandedBits.zext(SrcBitWidth); if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcKnown, TLO, Depth + 1)) return true; @@ -1234,7 +1234,7 @@ break; uint64_t Idx = Op.getConstantOperandVal(1); unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); - APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); + APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx); if (SimplifyDemandedBits(Src, DemandedBits, DemandedSrcElts, Known, TLO, Depth + 1)) @@ -2104,7 +2104,7 @@ } APInt InDemandedBits = DemandedBits.trunc(InBits); - APInt InDemandedElts = DemandedElts.zextOrSelf(InElts); + APInt InDemandedElts = DemandedElts.zext(InElts); if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, Depth + 1)) return true; @@ -2141,7 +2141,7 @@ } APInt InDemandedBits = DemandedBits.trunc(InBits); - APInt InDemandedElts = DemandedElts.zextOrSelf(InElts); + APInt InDemandedElts = DemandedElts.zext(InElts); // Since some of the sign extended bits are demanded, we know that the sign // bit is demanded. @@ -2185,7 +2185,7 @@ return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); APInt InDemandedBits = DemandedBits.trunc(InBits); - APInt InDemandedElts = DemandedElts.zextOrSelf(InElts); + APInt InDemandedElts = DemandedElts.zext(InElts); if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, Depth + 1)) return true; @@ -2914,7 +2914,7 @@ break; uint64_t Idx = Op.getConstantOperandVal(1); unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); - APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); + APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx); APInt SrcUndef, SrcZero; if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO, @@ -3073,7 +3073,7 @@ APInt SrcUndef, SrcZero; SDValue Src = Op.getOperand(0); unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); - APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts); + APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts); if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO, Depth + 1)) return true; @@ -9347,11 +9347,11 @@ // floating-point values. APInt MinInt, MaxInt; if (IsSigned) { - MinInt = APInt::getSignedMinValue(SatWidth).sextOrSelf(DstWidth); - MaxInt = APInt::getSignedMaxValue(SatWidth).sextOrSelf(DstWidth); + MinInt = APInt::getSignedMinValue(SatWidth).sext(DstWidth); + MaxInt = APInt::getSignedMaxValue(SatWidth).sext(DstWidth); } else { - MinInt = APInt::getMinValue(SatWidth).zextOrSelf(DstWidth); - MaxInt = APInt::getMaxValue(SatWidth).zextOrSelf(DstWidth); + MinInt = APInt::getMinValue(SatWidth).zext(DstWidth); + MaxInt = APInt::getMaxValue(SatWidth).zext(DstWidth); } // We cannot risk emitting FP_TO_XINT nodes with a source VT of f16, as Index: llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -141,11 +141,11 @@ unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); if (auto *Op0 = dyn_cast<ConstantSDNode>(N->getOperand(0))) { - SplatVal = Op0->getAPIntValue().truncOrSelf(EltSize); + SplatVal = Op0->getAPIntValue().trunc(EltSize); return true; } if (auto *Op0 = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) { - SplatVal = Op0->getValueAPF().bitcastToAPInt().truncOrSelf(EltSize); + SplatVal = Op0->getValueAPF().bitcastToAPInt().trunc(EltSize); return true; } } @@ -2669,7 +2669,7 @@ uint64_t Idx = V.getConstantOperandVal(1); unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); APInt UndefSrcElts; - APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); + APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx); if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts, Depth + 1)) { UndefElts = UndefSrcElts.extractBits(NumElts, Idx); return true; @@ -2686,9 +2686,9 @@ return false; unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); APInt UndefSrcElts; - APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts); + APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts); if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts, Depth + 1)) { - UndefElts = UndefSrcElts.truncOrSelf(NumElts); + UndefElts = UndefSrcElts.trunc(NumElts); return true; } break; @@ -3066,7 +3066,7 @@ break; uint64_t Idx = Op.getConstantOperandVal(1); unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); - APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); + APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx); Known = computeKnownBits(Src, DemandedSrcElts, Depth + 1); break; } @@ -3429,7 +3429,7 @@ } case ISD::ZERO_EXTEND_VECTOR_INREG: { EVT InVT = Op.getOperand(0).getValueType(); - APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); + APInt InDemandedElts = DemandedElts.zext(InVT.getVectorNumElements()); Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); Known = Known.zext(BitWidth); break; @@ -3441,7 +3441,7 @@ } case ISD::SIGN_EXTEND_VECTOR_INREG: { EVT InVT = Op.getOperand(0).getValueType(); - APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); + APInt InDemandedElts = DemandedElts.zext(InVT.getVectorNumElements()); Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); // If the sign bit is known to be zero or one, then sext will extend // it to the top bits, else it will just zext. @@ -3457,7 +3457,7 @@ } case ISD::ANY_EXTEND_VECTOR_INREG: { EVT InVT = Op.getOperand(0).getValueType(); - APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); + APInt InDemandedElts = DemandedElts.zext(InVT.getVectorNumElements()); Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); Known = Known.anyext(BitWidth); break; @@ -4004,7 +4004,7 @@ case ISD::SIGN_EXTEND_VECTOR_INREG: { SDValue Src = Op.getOperand(0); EVT SrcVT = Src.getValueType(); - APInt DemandedSrcElts = DemandedElts.zextOrSelf(SrcVT.getVectorNumElements()); + APInt DemandedSrcElts = DemandedElts.zext(SrcVT.getVectorNumElements()); Tmp = VTBits - SrcVT.getScalarSizeInBits(); return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp; } @@ -4291,7 +4291,7 @@ break; uint64_t Idx = Op.getConstantOperandVal(1); unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); - APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); + APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx); return ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1); } case ISD::CONCAT_VECTORS: { @@ -5569,7 +5569,7 @@ for (unsigned I = 0, E = DstBits.size(); I != E; ++I) { if (DstUndefs[I]) continue; - Ops[I] = getConstant(DstBits[I].sextOrSelf(BVEltBits), DL, BVEltVT); + Ops[I] = getConstant(DstBits[I].sext(BVEltBits), DL, BVEltVT); } return getBitcast(VT, getBuildVector(BVVT, DL, Ops)); } @@ -11443,9 +11443,8 @@ auto *CInt = dyn_cast<ConstantSDNode>(Op); auto *CFP = dyn_cast<ConstantFPSDNode>(Op); assert((CInt || CFP) && "Unknown constant"); - SrcBitElements[I] = - CInt ? CInt->getAPIntValue().truncOrSelf(SrcEltSizeInBits) - : CFP->getValueAPF().bitcastToAPInt(); + SrcBitElements[I] = CInt ? CInt->getAPIntValue().trunc(SrcEltSizeInBits) + : CFP->getValueAPF().bitcastToAPInt(); } // Recast to dst width. Index: llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -1525,7 +1525,7 @@ EVT VT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); APInt MulImm = cast<ConstantSDNode>(N->getOperand(0))->getAPIntValue(); - return DAG.getVScale(SDLoc(N), VT, MulImm.sextOrSelf(VT.getSizeInBits())); + return DAG.getVScale(SDLoc(N), VT, MulImm.sext(VT.getSizeInBits())); } SDValue DAGTypeLegalizer::PromoteIntRes_VAARG(SDNode *N) { Index: llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp +++ llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp @@ -466,9 +466,9 @@ if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { APInt Val; if (TLI->signExtendConstant(CI)) - Val = CI->getValue().sextOrSelf(BitWidth); + Val = CI->getValue().sext(BitWidth); else - Val = CI->getValue().zextOrSelf(BitWidth); + Val = CI->getValue().zext(BitWidth); DestLOI.NumSignBits = Val.getNumSignBits(); DestLOI.Known = KnownBits::makeConstant(Val); } else { @@ -502,9 +502,9 @@ if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { APInt Val; if (TLI->signExtendConstant(CI)) - Val = CI->getValue().sextOrSelf(BitWidth); + Val = CI->getValue().sext(BitWidth); else - Val = CI->getValue().zextOrSelf(BitWidth); + Val = CI->getValue().zext(BitWidth); DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, Val.getNumSignBits()); DestLOI.Known.Zero &= ~Val; DestLOI.Known.One &= Val; Index: llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -882,8 +882,8 @@ // We provide an Offset so that we can create bitwidths that won't overflow. static void zeroExtendToMatch(APInt &LHS, APInt &RHS, unsigned Offset = 0) { unsigned Bits = Offset + std::max(LHS.getBitWidth(), RHS.getBitWidth()); - LHS = LHS.zextOrSelf(Bits); - RHS = RHS.zextOrSelf(Bits); + LHS = LHS.zext(Bits); + RHS = RHS.zext(Bits); } // Return true if this node is a setcc, or is a select_cc @@ -4996,8 +4996,7 @@ return 0; const APInt &C1 = N1C->getAPIntValue(); const APInt &C2 = N3C->getAPIntValue(); - if (C1.getBitWidth() < C2.getBitWidth() || - C1 != C2.sextOrSelf(C1.getBitWidth())) + if (C1.getBitWidth() < C2.getBitWidth() || C1 != C2.sext(C1.getBitWidth())) return 0; return CC == ISD::SETLT ? ISD::SMIN : (CC == ISD::SETGT ? ISD::SMAX : 0); }; @@ -5104,7 +5103,7 @@ const APInt &C1 = N1C->getAPIntValue(); const APInt &C3 = N3C->getAPIntValue(); if (!(C1 + 1).isPowerOf2() || C1.getBitWidth() < C3.getBitWidth() || - C1 != C3.zextOrSelf(C1.getBitWidth())) + C1 != C3.zext(C1.getBitWidth())) return SDValue(); unsigned BW = (C1 + 1).exactLogBase2(); Index: llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -7466,7 +7466,7 @@ unsigned NumBits = Ty.getScalarSizeInBits(); auto ValVRegAndVal = getIConstantVRegValWithLookThrough(Val, MRI); if (!Ty.isVector() && ValVRegAndVal) { - APInt Scalar = ValVRegAndVal->Value.truncOrSelf(8); + APInt Scalar = ValVRegAndVal->Value.trunc(8); APInt SplatVal = APInt::getSplat(NumBits, Scalar); return MIB.buildConstant(Ty, SplatVal).getReg(0); } Index: llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp =================================================================== --- llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -2874,7 +2874,7 @@ assert(Size % 8 == 0); // Extend the element to take zero padding into account. - APInt Value = CI->getValue().zextOrSelf(Size); + APInt Value = CI->getValue().zext(Size); if (!Value.isSplat(8)) return -1; Index: llvm/lib/Analysis/ScalarEvolution.cpp =================================================================== --- llvm/lib/Analysis/ScalarEvolution.cpp +++ llvm/lib/Analysis/ScalarEvolution.cpp @@ -9693,8 +9693,8 @@ static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) { if (X.hasValue() && Y.hasValue()) { unsigned W = std::max(X->getBitWidth(), Y->getBitWidth()); - APInt XW = X->sextOrSelf(W); - APInt YW = Y->sextOrSelf(W); + APInt XW = X->sext(W); + APInt YW = Y->sext(W); return XW.slt(YW) ? *X : *Y; } if (!X.hasValue() && !Y.hasValue()) @@ -9846,8 +9846,8 @@ std::tie(A, B, C, M, BitWidth) = *T; // Lower bound is inclusive, subtract 1 to represent the exiting value. - APInt Lower = Range.getLower().sextOrSelf(A.getBitWidth()) - 1; - APInt Upper = Range.getUpper().sextOrSelf(A.getBitWidth()); + APInt Lower = Range.getLower().sext(A.getBitWidth()) - 1; + APInt Upper = Range.getUpper().sext(A.getBitWidth()); auto SL = SolveForBoundary(Lower); auto SU = SolveForBoundary(Upper); // If any of the solutions was unknown, no meaninigful conclusions can Index: llvm/lib/Analysis/MemoryBuiltins.cpp =================================================================== --- llvm/lib/Analysis/MemoryBuiltins.cpp +++ llvm/lib/Analysis/MemoryBuiltins.cpp @@ -385,7 +385,7 @@ if (!Arg) return None; - APInt MaxSize = Arg->getValue().zextOrSelf(IntTyBits); + APInt MaxSize = Arg->getValue().zext(IntTyBits); if (Size.ugt(MaxSize)) Size = MaxSize + 1; } Index: llvm/lib/Analysis/LazyValueInfo.cpp =================================================================== --- llvm/lib/Analysis/LazyValueInfo.cpp +++ llvm/lib/Analysis/LazyValueInfo.cpp @@ -1140,7 +1140,7 @@ ConstantRange CR = ConstantRange::makeExactICmpRegion(EdgePred, *C); if (!CR.isEmptySet()) return ValueLatticeElement::getRange(ConstantRange::getNonEmpty( - CR.getUnsignedMin().zextOrSelf(BitWidth), APInt(BitWidth, 0))); + CR.getUnsignedMin().zext(BitWidth), APInt(BitWidth, 0))); } return ValueLatticeElement::getOverdefined(); Index: llvm/lib/Analysis/ConstantFolding.cpp =================================================================== --- llvm/lib/Analysis/ConstantFolding.cpp +++ llvm/lib/Analysis/ConstantFolding.cpp @@ -91,7 +91,7 @@ return ConstantExpr::getBitCast(C, DestTy); Result <<= BitShift; - Result |= ElementCI->getValue().zextOrSelf(Result.getBitWidth()); + Result |= ElementCI->getValue().zext(Result.getBitWidth()); } return nullptr; @@ -2878,11 +2878,11 @@ unsigned Width = C0->getBitWidth(); assert(Scale < Width && "Illegal scale."); unsigned ExtendedWidth = Width * 2; - APInt Product = (C0->sextOrSelf(ExtendedWidth) * - C1->sextOrSelf(ExtendedWidth)).ashr(Scale); + APInt Product = + (C0->sext(ExtendedWidth) * C1->sext(ExtendedWidth)).ashr(Scale); if (IntrinsicID == Intrinsic::smul_fix_sat) { - APInt Max = APInt::getSignedMaxValue(Width).sextOrSelf(ExtendedWidth); - APInt Min = APInt::getSignedMinValue(Width).sextOrSelf(ExtendedWidth); + APInt Max = APInt::getSignedMaxValue(Width).sext(ExtendedWidth); + APInt Min = APInt::getSignedMinValue(Width).sext(ExtendedWidth); Product = APIntOps::smin(Product, Max); Product = APIntOps::smax(Product, Min); } Index: llvm/lib/Analysis/BasicAliasAnalysis.cpp =================================================================== --- llvm/lib/Analysis/BasicAliasAnalysis.cpp +++ llvm/lib/Analysis/BasicAliasAnalysis.cpp @@ -659,8 +659,8 @@ unsigned TypeSize = DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize(); LE = LE.mul(APInt(IndexSize, TypeSize), GEPOp->isInBounds()); - Decomposed.Offset += LE.Offset.sextOrSelf(MaxIndexSize); - APInt Scale = LE.Scale.sextOrSelf(MaxIndexSize); + Decomposed.Offset += LE.Offset.sext(MaxIndexSize); + APInt Scale = LE.Scale.sext(MaxIndexSize); // If we already had an occurrence of this index variable, merge this // scale into it. For example, we want to handle: Index: clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp =================================================================== --- clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp +++ clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp @@ -264,8 +264,8 @@ Matches[0].getNodeAs<IntegerLiteral>("initNum")->getValue(); auto CondOp = Matches[0].getNodeAs<BinaryOperator>("conditionOperator"); if (InitNum.getBitWidth() != BoundNum.getBitWidth()) { - InitNum = InitNum.zextOrSelf(BoundNum.getBitWidth()); - BoundNum = BoundNum.zextOrSelf(InitNum.getBitWidth()); + InitNum = InitNum.zext(BoundNum.getBitWidth()); + BoundNum = BoundNum.zext(InitNum.getBitWidth()); } if (CondOp->getOpcode() == BO_GE || CondOp->getOpcode() == BO_LE) Index: clang/lib/Sema/SemaDecl.cpp =================================================================== --- clang/lib/Sema/SemaDecl.cpp +++ clang/lib/Sema/SemaDecl.cpp @@ -18710,7 +18710,7 @@ const auto &EVal = E->getInitVal(); // Only single-bit enumerators introduce new flag values. if (EVal.isPowerOf2()) - FlagBits = FlagBits.zextOrSelf(EVal.getBitWidth()) | EVal; + FlagBits = FlagBits.zext(EVal.getBitWidth()) | EVal; } } Index: clang/lib/CodeGen/CGBuiltin.cpp =================================================================== --- clang/lib/CodeGen/CGBuiltin.cpp +++ clang/lib/CodeGen/CGBuiltin.cpp @@ -2002,7 +2002,7 @@ // Signed overflow occurs if the result is greater than INT_MAX or lesser // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative). auto IntMax = - llvm::APInt::getSignedMaxValue(ResultInfo.Width).zextOrSelf(OpWidth); + llvm::APInt::getSignedMaxValue(ResultInfo.Width).zext(OpWidth); llvm::Value *MaxResult = CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax), CGF.Builder.CreateZExt(IsNegative, OpTy)); Index: clang/lib/AST/MicrosoftMangle.cpp =================================================================== --- clang/lib/AST/MicrosoftMangle.cpp +++ clang/lib/AST/MicrosoftMangle.cpp @@ -808,8 +808,8 @@ // to convert every integer to signed 64 bit before mangling (including // unsigned 64 bit values). Do the same, but preserve bits beyond the bottom // 64. - llvm::APInt Value = - Number.isSigned() ? Number.sextOrSelf(64) : Number.zextOrSelf(64); + unsigned Width = std::max(Number.getBitWidth(), 64U); + llvm::APInt Value = Number.extend(Width); // <non-negative integer> ::= A@ # when Number == 0 // ::= <decimal digit> # when 1 <= Number <= 10 Index: clang/lib/AST/ExprConstant.cpp =================================================================== --- clang/lib/AST/ExprConstant.cpp +++ clang/lib/AST/ExprConstant.cpp @@ -8572,7 +8572,7 @@ Into = ExprResult.Val.getInt(); if (Into.isNegative() || !Into.isIntN(BitsInSizeT)) return false; - Into = Into.zextOrSelf(BitsInSizeT); + Into = Into.zext(BitsInSizeT); return true; }; @@ -9558,8 +9558,8 @@ unsigned Bits = std::max(CAT->getSize().getBitWidth(), ArrayBound.getBitWidth()); - llvm::APInt InitBound = CAT->getSize().zextOrSelf(Bits); - llvm::APInt AllocBound = ArrayBound.zextOrSelf(Bits); + llvm::APInt InitBound = CAT->getSize().zext(Bits); + llvm::APInt AllocBound = ArrayBound.zext(Bits); if (InitBound.ugt(AllocBound)) { if (IsNothrow) return ZeroInitialization(E); @@ -10353,9 +10353,9 @@ for (unsigned i = 0; i < NElts; i++) { llvm::APInt Elt; if (BigEndian) - Elt = SValInt.rotl(i*EltSize+FloatEltSize).truncOrSelf(FloatEltSize); + Elt = SValInt.rotl(i * EltSize + FloatEltSize).trunc(FloatEltSize); else - Elt = SValInt.rotr(i*EltSize).truncOrSelf(FloatEltSize); + Elt = SValInt.rotr(i * EltSize).trunc(FloatEltSize); Elts.push_back(APValue(APFloat(Sem, Elt))); } } else if (EltTy->isIntegerType()) {
_______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits