Author: Jim Lin Date: 2020-02-18T10:49:13+08:00 New Revision: 466f8843f526b03c8944a46af5ebb374133b5389
URL: https://github.com/llvm/llvm-project/commit/466f8843f526b03c8944a46af5ebb374133b5389 DIFF: https://github.com/llvm/llvm-project/commit/466f8843f526b03c8944a46af5ebb374133b5389.diff LOG: [NFC] Remove trailing space sed -Ei 's/[[:space:]]+$//' include/**/*.{def,h,td} lib/**/*.{cpp,h,td} Added: Modified: clang/lib/AST/DeclCXX.cpp clang/lib/AST/JSONNodeDumper.cpp clang/lib/AST/StmtProfile.cpp clang/lib/Basic/FileManager.cpp clang/lib/CodeGen/BackendUtil.cpp clang/lib/CodeGen/CGBuiltin.cpp clang/lib/CodeGen/CGObjCGNU.cpp clang/lib/CodeGen/CGObjCRuntime.cpp clang/lib/Index/IndexDecl.cpp clang/lib/Parse/ParseExpr.cpp clang/lib/Parse/ParseExprCXX.cpp clang/lib/Parse/ParseTemplate.cpp clang/lib/Sema/SemaExpr.cpp clang/lib/Sema/SemaObjCProperty.cpp clang/lib/Sema/SemaTemplate.cpp clang/lib/Sema/SemaTemplateInstantiate.cpp clang/lib/Sema/SemaTemplateVariadic.cpp clang/lib/Serialization/ModuleManager.cpp llvm/include/llvm-c/Core.h llvm/lib/Analysis/AliasSetTracker.cpp llvm/lib/Analysis/GuardUtils.cpp llvm/lib/Analysis/Loads.cpp llvm/lib/Analysis/ModuleSummaryAnalysis.cpp llvm/lib/Analysis/ScalarEvolution.cpp llvm/lib/Analysis/TargetLibraryInfo.cpp llvm/lib/Analysis/TargetTransformInfo.cpp llvm/lib/Analysis/VectorUtils.cpp llvm/lib/AsmParser/LLParser.cpp llvm/lib/CodeGen/CodeGenPrepare.cpp llvm/lib/CodeGen/GCRootLowering.cpp llvm/lib/CodeGen/StackMaps.cpp llvm/lib/CodeGen/TargetLoweringBase.cpp llvm/lib/CodeGen/ValueTypes.cpp llvm/lib/IR/Constants.cpp llvm/lib/IR/Core.cpp llvm/lib/IR/DiagnosticInfo.cpp llvm/lib/IR/Verifier.cpp llvm/lib/MC/XCOFFObjectWriter.cpp llvm/lib/Support/Host.cpp Removed: ################################################################################ diff --git a/clang/lib/AST/DeclCXX.cpp b/clang/lib/AST/DeclCXX.cpp index 931a1141b1b4..58e7e16d6817 100644 --- a/clang/lib/AST/DeclCXX.cpp +++ b/clang/lib/AST/DeclCXX.cpp @@ -663,7 +663,7 @@ bool CXXRecordDecl::lambdaIsDefaultConstructibleAndAssignable() const { // C++17 [expr.prim.lambda]p21: // The closure type associated with a lambda-expression has no default // constructor and a deleted copy assignment operator. - if (getLambdaCaptureDefault() != LCD_None || + if (getLambdaCaptureDefault() != LCD_None || getLambdaData().NumCaptures != 0) return false; return getASTContext().getLangOpts().CPlusPlus2a; @@ -2152,7 +2152,7 @@ CXXMethodDecl *CXXMethodDecl::getDevirtualizedMethod(const Expr *Base, return DevirtualizedMethod; // Similarly, if the class itself or its destructor is marked 'final', - // the class can't be derived from and we can therefore devirtualize the + // the class can't be derived from and we can therefore devirtualize the // member function call. if (BestDynamicDecl->isEffectivelyFinal()) return DevirtualizedMethod; diff --git a/clang/lib/AST/JSONNodeDumper.cpp b/clang/lib/AST/JSONNodeDumper.cpp index c30b07137edc..637b61e90a90 100644 --- a/clang/lib/AST/JSONNodeDumper.cpp +++ b/clang/lib/AST/JSONNodeDumper.cpp @@ -997,7 +997,7 @@ void JSONNodeDumper::VisitObjCPropertyDecl(const ObjCPropertyDecl *D) { case ObjCPropertyDecl::Required: JOS.attribute("control", "required"); break; case ObjCPropertyDecl::Optional: JOS.attribute("control", "optional"); break; } - + ObjCPropertyDecl::PropertyAttributeKind Attrs = D->getPropertyAttributes(); if (Attrs != ObjCPropertyDecl::OBJC_PR_noattr) { if (Attrs & ObjCPropertyDecl::OBJC_PR_getter) diff --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp index 14ddc13ce561..76c5fe2e5402 100644 --- a/clang/lib/AST/StmtProfile.cpp +++ b/clang/lib/AST/StmtProfile.cpp @@ -1387,7 +1387,7 @@ void StmtProfiler::VisitRequiresExpr(const RequiresExpr *S) { ID.AddInteger(concepts::Requirement::RK_Nested); auto *NestedReq = cast<concepts::NestedRequirement>(Req); ID.AddBoolean(NestedReq->isSubstitutionFailure()); - if (!NestedReq->isSubstitutionFailure()) + if (!NestedReq->isSubstitutionFailure()) Visit(NestedReq->getConstraintExpr()); } } diff --git a/clang/lib/Basic/FileManager.cpp b/clang/lib/Basic/FileManager.cpp index e4d019aedb7c..ac8af8fcaf4a 100644 --- a/clang/lib/Basic/FileManager.cpp +++ b/clang/lib/Basic/FileManager.cpp @@ -513,7 +513,7 @@ FileManager::getStatValue(StringRef Path, llvm::vfs::Status &Status, StatCache.get(), *FS); } -std::error_code +std::error_code FileManager::getNoncachedStatValue(StringRef Path, llvm::vfs::Status &Result) { SmallString<128> FilePath(Path); diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp index 48e26459e94f..6c71cf793c0f 100644 --- a/clang/lib/CodeGen/BackendUtil.cpp +++ b/clang/lib/CodeGen/BackendUtil.cpp @@ -351,7 +351,7 @@ static TargetLibraryInfoImpl *createTLII(llvm::Triple &TargetTriple, break; case CodeGenOptions::MASSV: TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::MASSV); - break; + break; case CodeGenOptions::SVML: TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::SVML); break; diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index eb87180ee900..5e411bc7aa93 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -1629,7 +1629,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_ceilf: case Builtin::BI__builtin_ceilf16: case Builtin::BI__builtin_ceill: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, Intrinsic::ceil, Intrinsic::experimental_constrained_ceil)); @@ -1650,7 +1650,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_cosf: case Builtin::BI__builtin_cosf16: case Builtin::BI__builtin_cosl: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, Intrinsic::cos, Intrinsic::experimental_constrained_cos)); @@ -1661,7 +1661,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_expf: case Builtin::BI__builtin_expf16: case Builtin::BI__builtin_expl: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, Intrinsic::exp, Intrinsic::experimental_constrained_exp)); @@ -1672,7 +1672,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_exp2f: case Builtin::BI__builtin_exp2f16: case Builtin::BI__builtin_exp2l: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, Intrinsic::exp2, Intrinsic::experimental_constrained_exp2)); @@ -1693,7 +1693,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_floorf: case Builtin::BI__builtin_floorf16: case Builtin::BI__builtin_floorl: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, Intrinsic::floor, Intrinsic::experimental_constrained_floor)); @@ -1704,7 +1704,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fmaf: case Builtin::BI__builtin_fmaf16: case Builtin::BI__builtin_fmal: - return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E, + return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E, Intrinsic::fma, Intrinsic::experimental_constrained_fma)); @@ -1715,7 +1715,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fmaxf: case Builtin::BI__builtin_fmaxf16: case Builtin::BI__builtin_fmaxl: - return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E, + return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E, Intrinsic::maxnum, Intrinsic::experimental_constrained_maxnum)); @@ -1726,7 +1726,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fminf: case Builtin::BI__builtin_fminf16: case Builtin::BI__builtin_fminl: - return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E, + return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E, Intrinsic::minnum, Intrinsic::experimental_constrained_minnum)); @@ -1751,7 +1751,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_logf: case Builtin::BI__builtin_logf16: case Builtin::BI__builtin_logl: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, Intrinsic::log, Intrinsic::experimental_constrained_log)); @@ -1762,7 +1762,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_log10f: case Builtin::BI__builtin_log10f16: case Builtin::BI__builtin_log10l: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, Intrinsic::log10, Intrinsic::experimental_constrained_log10)); @@ -1773,7 +1773,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_log2f: case Builtin::BI__builtin_log2f16: case Builtin::BI__builtin_log2l: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, Intrinsic::log2, Intrinsic::experimental_constrained_log2)); @@ -1783,7 +1783,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_nearbyint: case Builtin::BI__builtin_nearbyintf: case Builtin::BI__builtin_nearbyintl: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, Intrinsic::nearbyint, Intrinsic::experimental_constrained_nearbyint)); @@ -1794,7 +1794,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_powf: case Builtin::BI__builtin_powf16: case Builtin::BI__builtin_powl: - return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E, + return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E, Intrinsic::pow, Intrinsic::experimental_constrained_pow)); @@ -1805,7 +1805,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_rintf: case Builtin::BI__builtin_rintf16: case Builtin::BI__builtin_rintl: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, Intrinsic::rint, Intrinsic::experimental_constrained_rint)); @@ -1816,7 +1816,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_roundf: case Builtin::BI__builtin_roundf16: case Builtin::BI__builtin_roundl: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, Intrinsic::round, Intrinsic::experimental_constrained_round)); @@ -1827,7 +1827,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_sinf: case Builtin::BI__builtin_sinf16: case Builtin::BI__builtin_sinl: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, Intrinsic::sin, Intrinsic::experimental_constrained_sin)); @@ -1838,7 +1838,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_sqrtf: case Builtin::BI__builtin_sqrtf16: case Builtin::BI__builtin_sqrtl: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, Intrinsic::sqrt, Intrinsic::experimental_constrained_sqrt)); @@ -1849,7 +1849,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_truncf: case Builtin::BI__builtin_truncf16: case Builtin::BI__builtin_truncl: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, Intrinsic::trunc, Intrinsic::experimental_constrained_trunc)); diff --git a/clang/lib/CodeGen/CGObjCGNU.cpp b/clang/lib/CodeGen/CGObjCGNU.cpp index 61801e0e815b..db78309e9fd9 100644 --- a/clang/lib/CodeGen/CGObjCGNU.cpp +++ b/clang/lib/CodeGen/CGObjCGNU.cpp @@ -821,7 +821,7 @@ class CGObjCGNUstep : public CGObjCGNU { // Slot_t objc_slot_lookup_super(struct objc_super*, SEL); SlotLookupSuperFn.init(&CGM, "objc_slot_lookup_super", SlotTy, PtrToObjCSuperTy, SelectorTy); - // If we're in ObjC++ mode, then we want to make + // If we're in ObjC++ mode, then we want to make if (usesSEHExceptions) { llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext); // void objc_exception_rethrow(void) @@ -1657,7 +1657,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep { b.CreateRetVoid(); // We can't use the normal LLVM global initialisation array, because we // need to specify that this runs early in library initialisation. - auto *InitVar = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), + auto *InitVar = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), /*isConstant*/true, llvm::GlobalValue::InternalLinkage, Init, ".objc_early_init_ptr"); InitVar->setSection(".CRT$XCLb"); diff --git a/clang/lib/CodeGen/CGObjCRuntime.cpp b/clang/lib/CodeGen/CGObjCRuntime.cpp index f8b831d0e9be..c34758c7e3b3 100644 --- a/clang/lib/CodeGen/CGObjCRuntime.cpp +++ b/clang/lib/CodeGen/CGObjCRuntime.cpp @@ -211,7 +211,7 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF, CGF.pushSEHCleanup(NormalAndEHCleanup, FinallyFunc); } - + // Emit the try body. CGF.EmitStmt(S.getTryBody()); @@ -271,7 +271,7 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF, cleanups.ForceCleanup(); CGF.EmitBranchThroughCleanup(Cont); - } + } // Go back to the try-statement fallthrough. CGF.Builder.restoreIP(SavedIP); diff --git a/clang/lib/Index/IndexDecl.cpp b/clang/lib/Index/IndexDecl.cpp index 2002c695a9b1..68160bc59eb6 100644 --- a/clang/lib/Index/IndexDecl.cpp +++ b/clang/lib/Index/IndexDecl.cpp @@ -80,7 +80,7 @@ class IndexingDeclVisitor : public ConstDeclVisitor<IndexingDeclVisitor, bool> { !MD->isSynthesizedAccessorStub(); } - + void handleDeclarator(const DeclaratorDecl *D, const NamedDecl *Parent = nullptr, bool isIBType = false) { diff --git a/clang/lib/Parse/ParseExpr.cpp b/clang/lib/Parse/ParseExpr.cpp index e0c53df992e8..584de6b87d90 100644 --- a/clang/lib/Parse/ParseExpr.cpp +++ b/clang/lib/Parse/ParseExpr.cpp @@ -1006,7 +1006,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind, assert(Tok.isNot(tok::kw_decltype) && Tok.isNot(tok::kw___super)); return ParseCastExpression(ParseKind, isAddressOfOperand, isTypeCast, isVectorLiteral, NotPrimaryExpression); - + case tok::identifier: { // primary-expression: identifier // unqualified-id: identifier // constant: enumeration-constant @@ -2690,7 +2690,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, PreferredType.enterTypeCast(Tok.getLocation(), Ty.get().get()); ExprResult SubExpr = ParseCastExpression(AnyCastExpr); - + if (Ty.isInvalid() || SubExpr.isInvalid()) return ExprError(); diff --git a/clang/lib/Parse/ParseExprCXX.cpp b/clang/lib/Parse/ParseExprCXX.cpp index 550bf7045425..10608644a8fe 100644 --- a/clang/lib/Parse/ParseExprCXX.cpp +++ b/clang/lib/Parse/ParseExprCXX.cpp @@ -1302,7 +1302,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer( ParseParameterDeclarationClause(D.getContext(), Attr, ParamInfo, EllipsisLoc); - // For a generic lambda, each 'auto' within the parameter declaration + // For a generic lambda, each 'auto' within the parameter declaration // clause creates a template type parameter, so increment the depth. // If we've parsed any explicit template parameters, then the depth will // have already been incremented. So we make sure that at most a single diff --git a/clang/lib/Parse/ParseTemplate.cpp b/clang/lib/Parse/ParseTemplate.cpp index 8debebc7702a..53c4829f43f4 100644 --- a/clang/lib/Parse/ParseTemplate.cpp +++ b/clang/lib/Parse/ParseTemplate.cpp @@ -986,7 +986,7 @@ Parser::ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position) { // Create the parameter. return Actions.ActOnNonTypeTemplateParameter(getCurScope(), ParamDecl, - Depth, Position, EqualLoc, + Depth, Position, EqualLoc, DefaultArg.get()); } diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp index e7cb2045f19b..3a1865124199 100644 --- a/clang/lib/Sema/SemaExpr.cpp +++ b/clang/lib/Sema/SemaExpr.cpp @@ -1916,7 +1916,7 @@ Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, bool RefersToCapturedVariable = isa<VarDecl>(D) && NeedToCaptureVariable(cast<VarDecl>(D), NameInfo.getLoc()); - + DeclRefExpr *E = DeclRefExpr::Create( Context, NNS, TemplateKWLoc, D, RefersToCapturedVariable, NameInfo, Ty, VK, FoundD, TemplateArgs, getNonOdrUseReasonInCurrentContext(D)); diff --git a/clang/lib/Sema/SemaObjCProperty.cpp b/clang/lib/Sema/SemaObjCProperty.cpp index 305b14d55b33..9c7d8ecf7f9b 100644 --- a/clang/lib/Sema/SemaObjCProperty.cpp +++ b/clang/lib/Sema/SemaObjCProperty.cpp @@ -1456,7 +1456,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S, PropertyLoc); PIDecl->setGetterMethodDecl(OMD); } - + if (getLangOpts().CPlusPlus && Synthesize && !CompleteTypeErr && Ivar->getType()->isRecordType()) { // For Objective-C++, need to synthesize the AST for the IVAR object to be diff --git a/clang/lib/Sema/SemaTemplate.cpp b/clang/lib/Sema/SemaTemplate.cpp index 1188017972cc..c1084b875a92 100755 --- a/clang/lib/Sema/SemaTemplate.cpp +++ b/clang/lib/Sema/SemaTemplate.cpp @@ -1288,11 +1288,11 @@ NamedDecl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, // Check that we have valid decl-specifiers specified. auto CheckValidDeclSpecifiers = [this, &D] { // C++ [temp.param] - // p1 + // p1 // template-parameter: // ... // parameter-declaration - // p2 + // p2 // ... A storage class shall not be specified in a template-parameter // declaration. // [dcl.typedef]p1: @@ -8341,7 +8341,7 @@ Decl *Sema::ActOnConceptDefinition(Scope *S, ConceptDecl *NewDecl = ConceptDecl::Create(Context, DC, NameLoc, Name, TemplateParameterLists.front(), ConstraintExpr); - + if (NewDecl->hasAssociatedConstraints()) { // C++2a [temp.concept]p4: // A concept shall not have associated constraints. diff --git a/clang/lib/Sema/SemaTemplateInstantiate.cpp b/clang/lib/Sema/SemaTemplateInstantiate.cpp index 568f5404dc0b..3ae2822a9803 100644 --- a/clang/lib/Sema/SemaTemplateInstantiate.cpp +++ b/clang/lib/Sema/SemaTemplateInstantiate.cpp @@ -761,7 +761,7 @@ void Sema::PrintInstantiationStack() { case CodeSynthesisContext::Memoization: break; - + case CodeSynthesisContext::ConstraintsCheck: { unsigned DiagID = 0; if (!Active->Entity) { diff --git a/clang/lib/Sema/SemaTemplateVariadic.cpp b/clang/lib/Sema/SemaTemplateVariadic.cpp index d947d6d282be..825b062c0054 100644 --- a/clang/lib/Sema/SemaTemplateVariadic.cpp +++ b/clang/lib/Sema/SemaTemplateVariadic.cpp @@ -940,7 +940,7 @@ bool Sema::containsUnexpandedParameterPacks(Declarator &D) { if (Expr *TRC = D.getTrailingRequiresClause()) if (TRC->containsUnexpandedParameterPack()) return true; - + return false; } diff --git a/clang/lib/Serialization/ModuleManager.cpp b/clang/lib/Serialization/ModuleManager.cpp index 7406c8795fe4..9b326d26eb84 100644 --- a/clang/lib/Serialization/ModuleManager.cpp +++ b/clang/lib/Serialization/ModuleManager.cpp @@ -436,7 +436,7 @@ bool ModuleManager::lookupModuleFile(StringRef FileName, // Open the file immediately to ensure there is no race between stat'ing and // opening the file. - auto FileOrErr = FileMgr.getFile(FileName, /*OpenFile=*/true, + auto FileOrErr = FileMgr.getFile(FileName, /*OpenFile=*/true, /*CacheFailure=*/false); if (!FileOrErr) { File = nullptr; diff --git a/llvm/include/llvm-c/Core.h b/llvm/include/llvm-c/Core.h index 7a39731d3e0c..852e16400f3a 100644 --- a/llvm/include/llvm-c/Core.h +++ b/llvm/include/llvm-c/Core.h @@ -2690,7 +2690,7 @@ LLVMValueRef LLVMGetNextGlobalIFunc(LLVMValueRef IFunc); * no previous global aliases. */ LLVMValueRef LLVMGetPreviousGlobalIFunc(LLVMValueRef IFunc); - + /** * Retrieves the resolver function associated with this indirect function, or * NULL if it doesn't not exist. @@ -2944,7 +2944,7 @@ void LLVMInsertExistingBasicBlockAfterInsertBlock(LLVMBuilderRef Builder, */ void LLVMAppendExistingBasicBlock(LLVMValueRef Fn, LLVMBasicBlockRef BB); - + /** * Create a new basic block without inserting it into a function. * @@ -3755,7 +3755,7 @@ LLVMValueRef LLVMBuildArrayMalloc(LLVMBuilderRef, LLVMTypeRef Ty, LLVMValueRef Val, const char *Name); /** - * Creates and inserts a memset to the specified pointer and the + * Creates and inserts a memset to the specified pointer and the * specified value. * * @see llvm::IRRBuilder::CreateMemSet() @@ -3768,7 +3768,7 @@ LLVMValueRef LLVMBuildMemSet(LLVMBuilderRef B, LLVMValueRef Ptr, * * @see llvm::IRRBuilder::CreateMemCpy() */ -LLVMValueRef LLVMBuildMemCpy(LLVMBuilderRef B, +LLVMValueRef LLVMBuildMemCpy(LLVMBuilderRef B, LLVMValueRef Dst, unsigned DstAlign, LLVMValueRef Src, unsigned SrcAlign, LLVMValueRef Size); @@ -3777,7 +3777,7 @@ LLVMValueRef LLVMBuildMemCpy(LLVMBuilderRef B, * * @see llvm::IRRBuilder::CreateMemMove() */ -LLVMValueRef LLVMBuildMemMove(LLVMBuilderRef B, +LLVMValueRef LLVMBuildMemMove(LLVMBuilderRef B, LLVMValueRef Dst, unsigned DstAlign, LLVMValueRef Src, unsigned SrcAlign, LLVMValueRef Size); diff --git a/llvm/lib/Analysis/AliasSetTracker.cpp b/llvm/lib/Analysis/AliasSetTracker.cpp index 5cc5ab597ef9..5cc68f05dc0e 100644 --- a/llvm/lib/Analysis/AliasSetTracker.cpp +++ b/llvm/lib/Analysis/AliasSetTracker.cpp @@ -677,7 +677,7 @@ void AliasSet::print(raw_ostream &OS) const { I.getPointer()->printAsOperand(OS << "("); if (I.getSize() == LocationSize::unknown()) OS << ", unknown)"; - else + else OS << ", " << I.getSize() << ")"; } } diff --git a/llvm/lib/Analysis/GuardUtils.cpp b/llvm/lib/Analysis/GuardUtils.cpp index d48283279858..cd132c56991f 100644 --- a/llvm/lib/Analysis/GuardUtils.cpp +++ b/llvm/lib/Analysis/GuardUtils.cpp @@ -47,7 +47,7 @@ bool llvm::parseWidenableBranch(const User *U, Value *&Condition, Use *C, *WC; if (parseWidenableBranch(const_cast<User*>(U), C, WC, IfTrueBB, IfFalseBB)) { - if (C) + if (C) Condition = C->get(); else Condition = ConstantInt::getTrue(IfTrueBB->getContext()); @@ -66,10 +66,10 @@ bool llvm::parseWidenableBranch(User *U, Use *&C,Use *&WC, auto *Cond = BI->getCondition(); if (!Cond->hasOneUse()) return false; - + IfTrueBB = BI->getSuccessor(0); IfFalseBB = BI->getSuccessor(1); - + if (match(Cond, m_Intrinsic<Intrinsic::experimental_widenable_condition>())) { WC = &BI->getOperandUse(0); C = nullptr; @@ -88,7 +88,7 @@ bool llvm::parseWidenableBranch(User *U, Use *&C,Use *&WC, if (!And) // Could be a constexpr return false; - + if (match(A, m_Intrinsic<Intrinsic::experimental_widenable_condition>()) && A->hasOneUse()) { WC = &And->getOperandUse(0); diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp index c2e9b8b882e4..b2d20cef04a5 100644 --- a/llvm/lib/Analysis/Loads.cpp +++ b/llvm/lib/Analysis/Loads.cpp @@ -78,7 +78,7 @@ static bool isDereferenceableAndAlignedPointer( if (!CheckForNonNull || isKnownNonZero(V, DL, 0, nullptr, CtxI, DT)) { // As we recursed through GEPs to get here, we've incrementally checked // that each step advanced by a multiple of the alignment. If our base is - // properly aligned, then the original offset accessed must also be. + // properly aligned, then the original offset accessed must also be. Type *Ty = V->getType(); assert(Ty->isSized() && "must be sized"); APInt Offset(DL.getTypeStoreSizeInBits(Ty), 0); @@ -150,7 +150,7 @@ bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, // are dereferenced, so bail out. if (!Ty->isSized() || (Ty->isVectorTy() && Ty->getVectorIsScalable())) return false; - + // When dereferenceability information is provided by a dereferenceable // attribute, we know exactly how many bytes are dereferenceable. If we can // determine the exact offset to the attributed variable, we can use that diff --git a/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp b/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp index 3dddf4b7d60a..1ff47e10bd99 100644 --- a/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp +++ b/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp @@ -83,7 +83,7 @@ cl::opt<std::string> ModuleSummaryDotFile( // to know when computing summary for global var, because if global variable // references basic block address we can't import it separately from function // containing that basic block. For simplicity we currently don't import such -// global vars at all. When importing function we aren't interested if any +// global vars at all. When importing function we aren't interested if any // instruction in it takes an address of any basic block, because instruction // can only take an address of basic block located in the same function. static bool findRefEdges(ModuleSummaryIndex &Index, const User *CurUser, diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp index 56c1c514ed8a..cd74815a895e 100644 --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -6640,7 +6640,7 @@ const SCEV *ScalarEvolution::getExitCount(const Loop *L, BasicBlock *ExitingBlock, ExitCountKind Kind) { switch (Kind) { - case Exact: + case Exact: return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); case ConstantMaximum: return getBackedgeTakenInfo(L).getMax(ExitingBlock, this); @@ -6657,7 +6657,7 @@ ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L, ExitCountKind Kind) { switch (Kind) { - case Exact: + case Exact: return getBackedgeTakenInfo(L).getExact(L, this); case ConstantMaximum: return getBackedgeTakenInfo(L).getMax(this); diff --git a/llvm/lib/Analysis/TargetLibraryInfo.cpp b/llvm/lib/Analysis/TargetLibraryInfo.cpp index 9f6ed75cd8d4..2c4809b201ee 100644 --- a/llvm/lib/Analysis/TargetLibraryInfo.cpp +++ b/llvm/lib/Analysis/TargetLibraryInfo.cpp @@ -1488,9 +1488,9 @@ bool TargetLibraryInfoImpl::getLibFunc(const Function &FDecl, LibFunc &F) const { // Intrinsics don't overlap w/libcalls; if our module has a large number of // intrinsics, this ends up being an interesting compile time win since we - // avoid string normalization and comparison. + // avoid string normalization and comparison. if (FDecl.isIntrinsic()) return false; - + const DataLayout *DL = FDecl.getParent() ? &FDecl.getParent()->getDataLayout() : nullptr; return getLibFunc(FDecl.getName(), F) && diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp index 62c021435b33..865f8975825a 100644 --- a/llvm/lib/Analysis/TargetTransformInfo.cpp +++ b/llvm/lib/Analysis/TargetTransformInfo.cpp @@ -47,7 +47,7 @@ struct NoTTIImpl : TargetTransformInfoImplCRTPBase<NoTTIImpl> { bool HardwareLoopInfo::canAnalyze(LoopInfo &LI) { // If the loop has irreducible control flow, it can not be converted to // Hardware loop. - LoopBlocksRPO RPOT(L); + LoopBlocksRPO RPOT(L); RPOT.perform(&LI); if (containsIrreducibleCFG<const BasicBlock *>(RPOT, LI)) return false; diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp index d2c521ac9c9d..9bdf0f334d2f 100644 --- a/llvm/lib/Analysis/VectorUtils.cpp +++ b/llvm/lib/Analysis/VectorUtils.cpp @@ -684,7 +684,7 @@ llvm::createBitMaskForGaps(IRBuilder<> &Builder, unsigned VF, return ConstantVector::get(Mask); } -Constant *llvm::createReplicatedMask(IRBuilder<> &Builder, +Constant *llvm::createReplicatedMask(IRBuilder<> &Builder, unsigned ReplicationFactor, unsigned VF) { SmallVector<Constant *, 16> MaskVec; for (unsigned i = 0; i < VF; i++) @@ -951,7 +951,7 @@ void InterleavedAccessInfo::analyzeInterleaving( // create a group for B, we continue with the bottom-up algorithm to ensure // we don't break any of B's dependences. InterleaveGroup<Instruction> *Group = nullptr; - if (isStrided(DesB.Stride) && + if (isStrided(DesB.Stride) && (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) { Group = getInterleaveGroup(B); if (!Group) { @@ -1052,8 +1052,8 @@ void InterleavedAccessInfo::analyzeInterleaving( // All members of a predicated interleave-group must have the same predicate, // and currently must reside in the same BB. - BasicBlock *BlockA = A->getParent(); - BasicBlock *BlockB = B->getParent(); + BasicBlock *BlockA = A->getParent(); + BasicBlock *BlockB = B->getParent(); if ((isPredicated(BlockA) || isPredicated(BlockB)) && (!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB)) continue; diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp index b22e7cb4f581..d8e514112aab 100644 --- a/llvm/lib/AsmParser/LLParser.cpp +++ b/llvm/lib/AsmParser/LLParser.cpp @@ -3416,7 +3416,7 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) { ID.Kind = ValID::t_Constant; return false; } - + // Unary Operators. case lltok::kw_fneg: { unsigned Opc = Lex.getUIntVal(); @@ -3426,7 +3426,7 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) { ParseGlobalTypeAndValue(Val) || ParseToken(lltok::rparen, "expected ')' in unary constantexpr")) return true; - + // Check that the type is valid for the operator. switch (Opc) { case Instruction::FNeg: @@ -4764,7 +4764,7 @@ bool LLParser::ParseDICommonBlock(MDNode *&Result, bool IsDistinct) { OPTIONAL(declaration, MDField, ); \ OPTIONAL(name, MDStringField, ); \ OPTIONAL(file, MDField, ); \ - OPTIONAL(line, LineField, ); + OPTIONAL(line, LineField, ); PARSE_MD_FIELDS(); #undef VISIT_MD_FIELDS diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp index ce758d698c96..306f9dcd91c9 100644 --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -1953,7 +1953,7 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) { case Intrinsic::experimental_widenable_condition: { // Give up on future widening oppurtunties so that we can fold away dead // paths and merge blocks before going into block-local instruction - // selection. + // selection. if (II->use_empty()) { II->eraseFromParent(); return true; diff --git a/llvm/lib/CodeGen/GCRootLowering.cpp b/llvm/lib/CodeGen/GCRootLowering.cpp index 90e5f32f53b3..2a85048cc979 100644 --- a/llvm/lib/CodeGen/GCRootLowering.cpp +++ b/llvm/lib/CodeGen/GCRootLowering.cpp @@ -189,12 +189,12 @@ bool LowerIntrinsics::runOnFunction(Function &F) { /// need to be able to ensure each root has been initialized by the point the /// first safepoint is reached. This really should have been done by the /// frontend, but the old API made this non-obvious, so we do a potentially -/// redundant store just in case. +/// redundant store just in case. bool LowerIntrinsics::DoLowering(Function &F, GCStrategy &S) { SmallVector<AllocaInst *, 32> Roots; bool MadeChange = false; - for (BasicBlock &BB : F) + for (BasicBlock &BB : F) for (BasicBlock::iterator II = BB.begin(), E = BB.end(); II != E;) { IntrinsicInst *CI = dyn_cast<IntrinsicInst>(II++); if (!CI) diff --git a/llvm/lib/CodeGen/StackMaps.cpp b/llvm/lib/CodeGen/StackMaps.cpp index 2ef71d6f59f7..daf3fd36c006 100644 --- a/llvm/lib/CodeGen/StackMaps.cpp +++ b/llvm/lib/CodeGen/StackMaps.cpp @@ -300,7 +300,7 @@ void StackMaps::recordStackMapOpers(const MCSymbol &MILabel, MachineInstr::const_mop_iterator MOE, bool recordResult) { MCContext &OutContext = AP.OutStreamer->getContext(); - + LocationVec Locations; LiveOutVec LiveOuts; diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp index 3a3bb8cd2405..bd717a8585ee 100644 --- a/llvm/lib/CodeGen/TargetLoweringBase.cpp +++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp @@ -1066,7 +1066,7 @@ TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI, MF.getDataLayout().getPointerSize(), MFI.getObjectAlignment(FI)); MIB->addMemOperand(MF, MMO); } - + // Replace the instruction and update the operand index. MBB->insert(MachineBasicBlock::iterator(MI), MIB); OperIdx += (MIB->getNumOperands() - MI->getNumOperands()) - 1; diff --git a/llvm/lib/CodeGen/ValueTypes.cpp b/llvm/lib/CodeGen/ValueTypes.cpp index 41cbdf035558..264982983fc8 100644 --- a/llvm/lib/CodeGen/ValueTypes.cpp +++ b/llvm/lib/CodeGen/ValueTypes.cpp @@ -230,89 +230,89 @@ Type *EVT::getTypeForEVT(LLVMContext &Context) const { case MVT::v2f64: return VectorType::get(Type::getDoubleTy(Context), 2); case MVT::v4f64: return VectorType::get(Type::getDoubleTy(Context), 4); case MVT::v8f64: return VectorType::get(Type::getDoubleTy(Context), 8); - case MVT::nxv1i1: + case MVT::nxv1i1: return VectorType::get(Type::getInt1Ty(Context), 1, /*Scalable=*/ true); - case MVT::nxv2i1: + case MVT::nxv2i1: return VectorType::get(Type::getInt1Ty(Context), 2, /*Scalable=*/ true); - case MVT::nxv4i1: + case MVT::nxv4i1: return VectorType::get(Type::getInt1Ty(Context), 4, /*Scalable=*/ true); - case MVT::nxv8i1: + case MVT::nxv8i1: return VectorType::get(Type::getInt1Ty(Context), 8, /*Scalable=*/ true); - case MVT::nxv16i1: + case MVT::nxv16i1: return VectorType::get(Type::getInt1Ty(Context), 16, /*Scalable=*/ true); - case MVT::nxv32i1: + case MVT::nxv32i1: return VectorType::get(Type::getInt1Ty(Context), 32, /*Scalable=*/ true); - case MVT::nxv1i8: + case MVT::nxv1i8: return VectorType::get(Type::getInt8Ty(Context), 1, /*Scalable=*/ true); - case MVT::nxv2i8: + case MVT::nxv2i8: return VectorType::get(Type::getInt8Ty(Context), 2, /*Scalable=*/ true); - case MVT::nxv4i8: + case MVT::nxv4i8: return VectorType::get(Type::getInt8Ty(Context), 4, /*Scalable=*/ true); - case MVT::nxv8i8: + case MVT::nxv8i8: return VectorType::get(Type::getInt8Ty(Context), 8, /*Scalable=*/ true); - case MVT::nxv16i8: + case MVT::nxv16i8: return VectorType::get(Type::getInt8Ty(Context), 16, /*Scalable=*/ true); - case MVT::nxv32i8: + case MVT::nxv32i8: return VectorType::get(Type::getInt8Ty(Context), 32, /*Scalable=*/ true); - case MVT::nxv1i16: + case MVT::nxv1i16: return VectorType::get(Type::getInt16Ty(Context), 1, /*Scalable=*/ true); - case MVT::nxv2i16: + case MVT::nxv2i16: return VectorType::get(Type::getInt16Ty(Context), 2, /*Scalable=*/ true); - case MVT::nxv4i16: + case MVT::nxv4i16: return VectorType::get(Type::getInt16Ty(Context), 4, /*Scalable=*/ true); - case MVT::nxv8i16: + case MVT::nxv8i16: return VectorType::get(Type::getInt16Ty(Context), 8, /*Scalable=*/ true); case MVT::nxv16i16: return VectorType::get(Type::getInt16Ty(Context), 16, /*Scalable=*/ true); case MVT::nxv32i16: return VectorType::get(Type::getInt16Ty(Context), 32, /*Scalable=*/ true); - case MVT::nxv1i32: + case MVT::nxv1i32: return VectorType::get(Type::getInt32Ty(Context), 1, /*Scalable=*/ true); - case MVT::nxv2i32: + case MVT::nxv2i32: return VectorType::get(Type::getInt32Ty(Context), 2, /*Scalable=*/ true); - case MVT::nxv4i32: + case MVT::nxv4i32: return VectorType::get(Type::getInt32Ty(Context), 4, /*Scalable=*/ true); - case MVT::nxv8i32: + case MVT::nxv8i32: return VectorType::get(Type::getInt32Ty(Context), 8, /*Scalable=*/ true); case MVT::nxv16i32: return VectorType::get(Type::getInt32Ty(Context), 16,/*Scalable=*/ true); case MVT::nxv32i32: return VectorType::get(Type::getInt32Ty(Context), 32,/*Scalable=*/ true); - case MVT::nxv1i64: + case MVT::nxv1i64: return VectorType::get(Type::getInt64Ty(Context), 1, /*Scalable=*/ true); - case MVT::nxv2i64: + case MVT::nxv2i64: return VectorType::get(Type::getInt64Ty(Context), 2, /*Scalable=*/ true); - case MVT::nxv4i64: + case MVT::nxv4i64: return VectorType::get(Type::getInt64Ty(Context), 4, /*Scalable=*/ true); - case MVT::nxv8i64: + case MVT::nxv8i64: return VectorType::get(Type::getInt64Ty(Context), 8, /*Scalable=*/ true); case MVT::nxv16i64: return VectorType::get(Type::getInt64Ty(Context), 16, /*Scalable=*/ true); case MVT::nxv32i64: return VectorType::get(Type::getInt64Ty(Context), 32, /*Scalable=*/ true); - case MVT::nxv2f16: + case MVT::nxv2f16: return VectorType::get(Type::getHalfTy(Context), 2, /*Scalable=*/ true); - case MVT::nxv4f16: + case MVT::nxv4f16: return VectorType::get(Type::getHalfTy(Context), 4, /*Scalable=*/ true); - case MVT::nxv8f16: + case MVT::nxv8f16: return VectorType::get(Type::getHalfTy(Context), 8, /*Scalable=*/ true); - case MVT::nxv1f32: + case MVT::nxv1f32: return VectorType::get(Type::getFloatTy(Context), 1, /*Scalable=*/ true); - case MVT::nxv2f32: + case MVT::nxv2f32: return VectorType::get(Type::getFloatTy(Context), 2, /*Scalable=*/ true); - case MVT::nxv4f32: + case MVT::nxv4f32: return VectorType::get(Type::getFloatTy(Context), 4, /*Scalable=*/ true); - case MVT::nxv8f32: + case MVT::nxv8f32: return VectorType::get(Type::getFloatTy(Context), 8, /*Scalable=*/ true); case MVT::nxv16f32: return VectorType::get(Type::getFloatTy(Context), 16, /*Scalable=*/ true); - case MVT::nxv1f64: + case MVT::nxv1f64: return VectorType::get(Type::getDoubleTy(Context), 1, /*Scalable=*/ true); - case MVT::nxv2f64: + case MVT::nxv2f64: return VectorType::get(Type::getDoubleTy(Context), 2, /*Scalable=*/ true); - case MVT::nxv4f64: + case MVT::nxv4f64: return VectorType::get(Type::getDoubleTy(Context), 4, /*Scalable=*/ true); - case MVT::nxv8f64: + case MVT::nxv8f64: return VectorType::get(Type::getDoubleTy(Context), 8, /*Scalable=*/ true); case MVT::Metadata: return Type::getMetadataTy(Context); } diff --git a/llvm/lib/IR/Constants.cpp b/llvm/lib/IR/Constants.cpp index 79c302867369..399bd41c82b2 100644 --- a/llvm/lib/IR/Constants.cpp +++ b/llvm/lib/IR/Constants.cpp @@ -826,10 +826,10 @@ Constant *ConstantFP::getQNaN(Type *Ty, bool Negative, APInt *Payload) { const fltSemantics &Semantics = *TypeToFloatSemantics(Ty->getScalarType()); APFloat NaN = APFloat::getQNaN(Semantics, Negative, Payload); Constant *C = get(Ty->getContext(), NaN); - + if (VectorType *VTy = dyn_cast<VectorType>(Ty)) return ConstantVector::getSplat(VTy->getNumElements(), C); - + return C; } @@ -837,10 +837,10 @@ Constant *ConstantFP::getSNaN(Type *Ty, bool Negative, APInt *Payload) { const fltSemantics &Semantics = *TypeToFloatSemantics(Ty->getScalarType()); APFloat NaN = APFloat::getSNaN(Semantics, Negative, Payload); Constant *C = get(Ty->getContext(), NaN); - + if (VectorType *VTy = dyn_cast<VectorType>(Ty)) return ConstantVector::getSplat(VTy->getNumElements(), C); - + return C; } @@ -1908,7 +1908,7 @@ Constant *ConstantExpr::getAddrSpaceCast(Constant *C, Type *DstTy, return getFoldedCast(Instruction::AddrSpaceCast, C, DstTy, OnlyIfReduced); } -Constant *ConstantExpr::get(unsigned Opcode, Constant *C, unsigned Flags, +Constant *ConstantExpr::get(unsigned Opcode, Constant *C, unsigned Flags, Type *OnlyIfReducedTy) { // Check the operands for consistency first. assert(Instruction::isUnaryOp(Opcode) && diff --git a/llvm/lib/IR/Core.cpp b/llvm/lib/IR/Core.cpp index 3b4224d78a2d..313bc65e775c 100644 --- a/llvm/lib/IR/Core.cpp +++ b/llvm/lib/IR/Core.cpp @@ -3436,14 +3436,14 @@ LLVMValueRef LLVMBuildArrayMalloc(LLVMBuilderRef B, LLVMTypeRef Ty, return wrap(unwrap(B)->Insert(Malloc, Twine(Name))); } -LLVMValueRef LLVMBuildMemSet(LLVMBuilderRef B, LLVMValueRef Ptr, +LLVMValueRef LLVMBuildMemSet(LLVMBuilderRef B, LLVMValueRef Ptr, LLVMValueRef Val, LLVMValueRef Len, unsigned Align) { return wrap(unwrap(B)->CreateMemSet(unwrap(Ptr), unwrap(Val), unwrap(Len), MaybeAlign(Align))); } -LLVMValueRef LLVMBuildMemCpy(LLVMBuilderRef B, +LLVMValueRef LLVMBuildMemCpy(LLVMBuilderRef B, LLVMValueRef Dst, unsigned DstAlign, LLVMValueRef Src, unsigned SrcAlign, LLVMValueRef Size) { diff --git a/llvm/lib/IR/DiagnosticInfo.cpp b/llvm/lib/IR/DiagnosticInfo.cpp index fda541a296a1..6528c723fbfa 100644 --- a/llvm/lib/IR/DiagnosticInfo.cpp +++ b/llvm/lib/IR/DiagnosticInfo.cpp @@ -119,7 +119,7 @@ DiagnosticLocation::DiagnosticLocation(const DebugLoc &DL) { DiagnosticLocation::DiagnosticLocation(const DISubprogram *SP) { if (!SP) return; - + File = SP->getFile(); Line = SP->getScopeLine(); Column = 0; diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp index cf8e73f30225..6af581b178dc 100644 --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -3174,7 +3174,7 @@ void Verifier::visitInvokeInst(InvokeInst &II) { /// visitUnaryOperator - Check the argument to the unary operator. /// void Verifier::visitUnaryOperator(UnaryOperator &U) { - Assert(U.getType() == U.getOperand(0)->getType(), + Assert(U.getType() == U.getOperand(0)->getType(), "Unary operators must have same type for" "operands and result!", &U); @@ -4813,7 +4813,7 @@ void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) { Type *ResultTy = FPI.getType(); Assert(!ValTy->isVectorTy() && !ResultTy->isVectorTy(), "Intrinsic does not support vectors", &FPI); - } + } break; case Intrinsic::experimental_constrained_lround: @@ -4823,7 +4823,7 @@ void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) { Assert(!ValTy->isVectorTy() && !ResultTy->isVectorTy(), "Intrinsic does not support vectors", &FPI); break; - } + } case Intrinsic::experimental_constrained_fcmp: case Intrinsic::experimental_constrained_fcmps: { @@ -4834,7 +4834,7 @@ void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) { } case Intrinsic::experimental_constrained_fptosi: - case Intrinsic::experimental_constrained_fptoui: { + case Intrinsic::experimental_constrained_fptoui: { Value *Operand = FPI.getArgOperand(0); uint64_t NumSrcElem = 0; Assert(Operand->getType()->isFPOrFPVectorTy(), @@ -4906,7 +4906,7 @@ void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) { "Intrinsic first argument's type must be smaller than result type", &FPI); } - } + } break; default: @@ -5172,7 +5172,7 @@ struct VerifierLegacyPass : public FunctionPass { bool runOnFunction(Function &F) override { if (!V->verify(F) && FatalErrors) { - errs() << "in function " << F.getName() << '\n'; + errs() << "in function " << F.getName() << '\n'; report_fatal_error("Broken function found, compilation aborted!"); } return false; diff --git a/llvm/lib/MC/XCOFFObjectWriter.cpp b/llvm/lib/MC/XCOFFObjectWriter.cpp index acb0e77807d6..67202833cfba 100644 --- a/llvm/lib/MC/XCOFFObjectWriter.cpp +++ b/llvm/lib/MC/XCOFFObjectWriter.cpp @@ -764,7 +764,7 @@ void XCOFFObjectWriter::assignAddressesAndIndices(const MCAsmLayout &Layout) { SymbolIndexMap[MCSec->getQualNameSymbol()] = Csect.SymbolTableIndex; // 1 main and 1 auxiliary symbol table entry for the csect. SymbolTableIndex += 2; - + for (auto &Sym : Csect.Syms) { Sym.SymbolTableIndex = SymbolTableIndex; SymbolIndexMap[Sym.MCSym] = Sym.SymbolTableIndex; diff --git a/llvm/lib/Support/Host.cpp b/llvm/lib/Support/Host.cpp index cafdc2ff380c..955c1b306296 100644 --- a/llvm/lib/Support/Host.cpp +++ b/llvm/lib/Support/Host.cpp @@ -1255,7 +1255,7 @@ StringRef sys::getHostCPUName() { return "swift"; default:; } - + return "generic"; } #else _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits