xbolva00 updated this revision to Diff 402994.
xbolva00 retitled this revision from "[AlwaysInliner] Enable call site inlining 
to make flatten attribute working again (PR53360)" to "[AlwaysInliner] Enable 
call site inlining to make flatten attribute working again (#53360)".
Herald added a subscriber: pengfei.

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D117965/new/

https://reviews.llvm.org/D117965

Files:
  clang/test/CodeGen/flatten.c
  clang/test/CodeGenCXX/flatten.cpp
  llvm/lib/Transforms/IPO/AlwaysInliner.cpp
  llvm/test/CodeGen/X86/x86-cmov-converter.ll
  llvm/test/Transforms/Coroutines/coro-retcon-once-private.ll
  llvm/test/Transforms/Inline/always-inline.ll

Index: llvm/test/Transforms/Inline/always-inline.ll
===================================================================
--- llvm/test/Transforms/Inline/always-inline.ll
+++ llvm/test/Transforms/Inline/always-inline.ll
@@ -1,14 +1,11 @@
-; RUN: opt < %s -inline-threshold=0 -always-inline -enable-new-pm=0 -S | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-CALL
+; RUN: opt < %s -inline-threshold=0 -always-inline -enable-new-pm=0 -S | FileCheck %s --check-prefix=CHECK
 ;
 ; Ensure the threshold has no impact on these decisions.
-; RUN: opt < %s -inline-threshold=20000000 -always-inline -enable-new-pm=0 -S | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-CALL
-; RUN: opt < %s -inline-threshold=-20000000 -always-inline -enable-new-pm=0 -S | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-CALL
+; RUN: opt < %s -inline-threshold=20000000 -always-inline -enable-new-pm=0 -S | FileCheck %s --check-prefix=CHECK
+; RUN: opt < %s -inline-threshold=-20000000 -always-inline -enable-new-pm=0 -S | FileCheck %s --check-prefix=CHECK
 ;
 ; The new pass manager doesn't re-use any threshold based infrastructure for
-; the always inliner, but test that we get the correct result. The new PM
-; always inliner also doesn't support inlining call-site alwaysinline
-; annotations. It isn't clear that this is a reasonable use case for
-; 'alwaysinline'.
+; the always inliner, but test that we get the correct result.
 ; RUN: opt < %s -inline-threshold=0 -passes=always-inline -S | FileCheck %s --check-prefix=CHECK
 ; RUN: opt < %s -inline-threshold=20000000 -passes=always-inline -S | FileCheck %s --check-prefix=CHECK
 ; RUN: opt < %s -inline-threshold=-20000000 -passes=always-inline -S | FileCheck %s --check-prefix=CHECK
@@ -26,12 +23,6 @@
    ret i32 %r
 }
 
-; The always inliner can't DCE arbitrary internal functions. PR2945
-define internal i32 @pr2945() nounwind {
-; CHECK-LABEL: @pr2945(
-  ret i32 0
-}
-
 define internal void @inner2(i32 %N) alwaysinline {
 ; CHECK-NOT: @inner2(
   %P = alloca i32, i32 %N
@@ -146,10 +137,9 @@
   ret i32 1
 }
 define i32 @outer7() {
-; CHECK-CALL-LABEL: @outer7(
-; CHECK-CALL-NOT: call
-; CHECK-CALL: ret
-
+; CHECK-LABEL: @outer7(
+; CHECK-NOT: call
+; CHECK: ret
    %r = call i32 @inner7() alwaysinline
    ret i32 %r
 }
Index: llvm/test/Transforms/Coroutines/coro-retcon-once-private.ll
===================================================================
--- llvm/test/Transforms/Coroutines/coro-retcon-once-private.ll
+++ llvm/test/Transforms/Coroutines/coro-retcon-once-private.ll
@@ -3,9 +3,7 @@
 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.12.0"
 
-; CHECK: define internal { i8*, i32 } @f(i8* %buffer, i32* %array)
-; CHECK-NEXT: entry:
-; CHECK-NEXT:  unreachable
+; CHECK-NOT: define internal { i8*, i32 } @f(i8* %buffer, i32* %array)
 
 define internal {i8*, i32} @f(i8* %buffer, i32* %array) {
 entry:
Index: llvm/test/CodeGen/X86/x86-cmov-converter.ll
===================================================================
--- llvm/test/CodeGen/X86/x86-cmov-converter.ll
+++ llvm/test/CodeGen/X86/x86-cmov-converter.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=x86_64-pc-linux -x86-cmov-converter=true -verify-machineinstrs -disable-block-placement < %s | FileCheck -allow-deprecated-dag-overlap %s
 
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
@@ -101,11 +102,34 @@
 
 %struct.Node = type { i32, %struct.Node*, %struct.Node* }
 
-; CHECK-LABEL: CmovInHotPath
-; CHECK-NOT: cmov
-; CHECK: jg
-
 define void @CmovInHotPath(i32 %n, i32 %a, i32 %b, i32* nocapture %c, i32* nocapture readnone %d) #0 {
+; CHECK-LABEL: CmovInHotPath:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    testl %edi, %edi
+; CHECK-NEXT:    jle .LBB0_5
+; CHECK-NEXT:  # %bb.1: # %for.body.preheader
+; CHECK-NEXT:    movl %edi, %r8d
+; CHECK-NEXT:    xorl %edi, %edi
+; CHECK-NEXT:  .LBB0_2: # %for.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movl (%rcx,%rdi,4), %eax
+; CHECK-NEXT:    leal 1(%rax), %r9d
+; CHECK-NEXT:    imull %esi, %eax
+; CHECK-NEXT:    movl $10, %r10d
+; CHECK-NEXT:    cmpl %edx, %eax
+; CHECK-NEXT:    jg .LBB0_4
+; CHECK-NEXT:  # %bb.3: # %for.body
+; CHECK-NEXT:    # in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT:    movl %r9d, %r10d
+; CHECK-NEXT:  .LBB0_4: # %for.body
+; CHECK-NEXT:    # in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT:    imull %r9d, %r10d
+; CHECK-NEXT:    movl %r10d, (%rcx,%rdi,4)
+; CHECK-NEXT:    addq $1, %rdi
+; CHECK-NEXT:    cmpq %rdi, %r8
+; CHECK-NEXT:    jne .LBB0_2
+; CHECK-NEXT:  .LBB0_5: # %for.cond.cleanup
+; CHECK-NEXT:    retq
 entry:
   %cmp14 = icmp sgt i32 %n, 0
   br i1 %cmp14, label %for.body.preheader, label %for.cond.cleanup
@@ -132,10 +156,33 @@
   br i1 %exitcond, label %for.cond.cleanup, label %for.body
 }
 
-; CHECK-LABEL: CmovNotInHotPath
-; CHECK: cmovg
-
 define void @CmovNotInHotPath(i32 %n, i32 %a, i32 %b, i32* nocapture %c, i32* nocapture %d) #0 {
+; CHECK-LABEL: CmovNotInHotPath:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    testl %edi, %edi
+; CHECK-NEXT:    jle .LBB1_3
+; CHECK-NEXT:  # %bb.1: # %for.body.preheader
+; CHECK-NEXT:    movl %edx, %r9d
+; CHECK-NEXT:    movl %edi, %r10d
+; CHECK-NEXT:    xorl %edi, %edi
+; CHECK-NEXT:    movl $10, %r11d
+; CHECK-NEXT:  .LBB1_2: # %for.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movl (%rcx,%rdi,4), %eax
+; CHECK-NEXT:    movl %eax, %edx
+; CHECK-NEXT:    imull %esi, %edx
+; CHECK-NEXT:    cmpl %r9d, %edx
+; CHECK-NEXT:    cmovgl %r11d, %eax
+; CHECK-NEXT:    movl %eax, (%rcx,%rdi,4)
+; CHECK-NEXT:    movl (%r8,%rdi,4), %eax
+; CHECK-NEXT:    cltd
+; CHECK-NEXT:    idivl %r9d
+; CHECK-NEXT:    movl %eax, (%r8,%rdi,4)
+; CHECK-NEXT:    addq $1, %rdi
+; CHECK-NEXT:    cmpq %rdi, %r10
+; CHECK-NEXT:    jne .LBB1_2
+; CHECK-NEXT:  .LBB1_3: # %for.cond.cleanup
+; CHECK-NEXT:    retq
 entry:
   %cmp18 = icmp sgt i32 %n, 0
   br i1 %cmp18, label %for.body.preheader, label %for.cond.cleanup
@@ -164,11 +211,34 @@
   br i1 %exitcond, label %for.cond.cleanup, label %for.body
 }
 
-; CHECK-LABEL: MaxIndex
-; CHECK-NOT: cmov
-; CHECK: jg
-
 define i32 @MaxIndex(i32 %n, i32* nocapture readonly %a) #0 {
+; CHECK-LABEL: MaxIndex:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    cmpl $2, %edi
+; CHECK-NEXT:    jl .LBB2_5
+; CHECK-NEXT:  # %bb.1: # %for.body.preheader
+; CHECK-NEXT:    movl %edi, %r8d
+; CHECK-NEXT:    xorl %edi, %edi
+; CHECK-NEXT:    movl $1, %edx
+; CHECK-NEXT:  .LBB2_2: # %for.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movl (%rsi,%rdx,4), %r9d
+; CHECK-NEXT:    movslq %edi, %rcx
+; CHECK-NEXT:    movl %edx, %eax
+; CHECK-NEXT:    cmpl (%rsi,%rcx,4), %r9d
+; CHECK-NEXT:    jg .LBB2_4
+; CHECK-NEXT:  # %bb.3: # %for.body
+; CHECK-NEXT:    # in Loop: Header=BB2_2 Depth=1
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:  .LBB2_4: # %for.body
+; CHECK-NEXT:    # in Loop: Header=BB2_2 Depth=1
+; CHECK-NEXT:    addq $1, %rdx
+; CHECK-NEXT:    movl %eax, %edi
+; CHECK-NEXT:    cmpq %rdx, %r8
+; CHECK-NEXT:    jne .LBB2_2
+; CHECK-NEXT:  .LBB2_5: # %for.cond.cleanup
+; CHECK-NEXT:    retq
 entry:
   %cmp14 = icmp sgt i32 %n, 1
   br i1 %cmp14, label %for.body.preheader, label %for.cond.cleanup
@@ -197,11 +267,76 @@
   br i1 %exitcond, label %for.cond.cleanup, label %for.body
 }
 
-; CHECK-LABEL: MaxValue
-; CHECK-NOT: jg
-; CHECK: cmovg
+; TODO: If cmov instruction is marked as unpredicatable, do not convert it to branch.
+define i32 @MaxIndex_unpredictable(i32 %n, i32* nocapture readonly %a) #0 {
+; CHECK-LABEL: MaxIndex_unpredictable:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    cmpl $2, %edi
+; CHECK-NEXT:    jl .LBB3_3
+; CHECK-NEXT:  # %bb.1: # %for.body.preheader
+; CHECK-NEXT:    movl %edi, %ecx
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    movl $1, %edx
+; CHECK-NEXT:  .LBB3_2: # %for.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movl (%rsi,%rdx,4), %edi
+; CHECK-NEXT:    cltq
+; CHECK-NEXT:    cmpl (%rsi,%rax,4), %edi
+; CHECK-NEXT:    cmovgl %edx, %eax
+; CHECK-NEXT:    addq $1, %rdx
+; CHECK-NEXT:    cmpq %rdx, %rcx
+; CHECK-NEXT:    jne .LBB3_2
+; CHECK-NEXT:  .LBB3_3: # %for.cond.cleanup
+; CHECK-NEXT:    # kill: def $eax killed $eax killed $rax
+; CHECK-NEXT:    retq
+entry:
+  %cmp14 = icmp sgt i32 %n, 1
+  br i1 %cmp14, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %wide.trip.count = zext i32 %n to i64
+  br label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.body, %entry
+  %t.0.lcssa = phi i32 [ 0, %entry ], [ %i.0.t.0, %for.body ]
+  ret i32 %t.0.lcssa
+
+for.body:                                         ; preds = %for.body.preheader, %for.body
+  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 1, %for.body.preheader ]
+  %t.015 = phi i32 [ %i.0.t.0, %for.body ], [ 0, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
+  %0 = load i32, i32* %arrayidx, align 4
+  %idxprom1 = sext i32 %t.015 to i64
+  %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %idxprom1
+  %1 = load i32, i32* %arrayidx2, align 4
+  %cmp3 = icmp sgt i32 %0, %1
+  %2 = trunc i64 %indvars.iv to i32
+  %i.0.t.0 = select i1 %cmp3, i32 %2, i32 %t.015, !unpredictable !0
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
+  br i1 %exitcond, label %for.cond.cleanup, label %for.body
+}
 
 define i32 @MaxValue(i32 %n, i32* nocapture readonly %a) #0 {
+; CHECK-LABEL: MaxValue:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl (%rsi), %eax
+; CHECK-NEXT:    cmpl $2, %edi
+; CHECK-NEXT:    jl .LBB4_3
+; CHECK-NEXT:  # %bb.1: # %for.body.preheader
+; CHECK-NEXT:    movl %edi, %ecx
+; CHECK-NEXT:    movl $1, %edx
+; CHECK-NEXT:  .LBB4_2: # %for.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movl (%rsi,%rdx,4), %edi
+; CHECK-NEXT:    cmpl %eax, %edi
+; CHECK-NEXT:    cmovgl %edi, %eax
+; CHECK-NEXT:    addq $1, %rdx
+; CHECK-NEXT:    cmpq %rdx, %rcx
+; CHECK-NEXT:    jne .LBB4_2
+; CHECK-NEXT:  .LBB4_3: # %for.cond.cleanup
+; CHECK-NEXT:    retq
 entry:
   %0 = load i32, i32* %a, align 4
   %cmp13 = icmp sgt i32 %n, 1
@@ -227,10 +362,25 @@
   br i1 %exitcond, label %for.cond.cleanup, label %for.body
 }
 
-; CHECK-LABEL: BinarySearch
-; CHECK: set
-
 define i32 @BinarySearch(i32 %Mask, %struct.Node* nocapture readonly %Curr, %struct.Node* nocapture readonly %Next) #0 {
+; CHECK-LABEL: BinarySearch:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl (%rsi), %eax
+; CHECK-NEXT:    jmp .LBB5_2
+; CHECK-NEXT:  .LBB5_1: # %while.body
+; CHECK-NEXT:    # in Loop: Header=BB5_2 Depth=1
+; CHECK-NEXT:    movl %ecx, %eax
+; CHECK-NEXT:    xorl %ecx, %ecx
+; CHECK-NEXT:    btl %eax, %edi
+; CHECK-NEXT:    setae %cl
+; CHECK-NEXT:    movq 8(%rdx,%rcx,8), %rdx
+; CHECK-NEXT:  .LBB5_2: # %while.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movl (%rdx), %ecx
+; CHECK-NEXT:    cmpl %ecx, %eax
+; CHECK-NEXT:    ja .LBB5_1
+; CHECK-NEXT:  # %bb.3: # %while.end
+; CHECK-NEXT:    retq
 entry:
   %Val8 = getelementptr inbounds %struct.Node, %struct.Node* %Curr, i64 0, i32 0
   %0 = load i32, i32* %Val8, align 8
@@ -287,20 +437,40 @@
 ;;                                          ; previous Phi instruction result
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
-; CHECK-LABEL: Transform
-; CHECK-NOT: cmov
-; CHECK:         divl    [[a:%[0-9a-z]*]]
-; CHECK:         movl    $11, [[s1:%[0-9a-z]*]]
-; CHECK:         movl    [[a]], [[s2:%[0-9a-z]*]]
-; CHECK:         cmpl    [[a]], %edx
-; CHECK:         ja      [[SinkBB:.*]]
-; CHECK: [[FalseBB:.*]]:
-; CHECK:         movl    $22, [[s1]]
-; CHECK:         movl    $22, [[s2]]
-; CHECK: [[SinkBB]]:
-; CHECK:         ja
-
 define void @Transform(i32 *%arr, i32 *%arr2, i32 %a, i32 %b, i32 %c, i32 %n) #0 {
+; CHECK-LABEL: Transform:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movb $1, %al
+; CHECK-NEXT:    testb %al, %al
+; CHECK-NEXT:    jne .LBB6_5
+; CHECK-NEXT:  # %bb.1: # %while.body.preheader
+; CHECK-NEXT:    movl %edx, %r8d
+; CHECK-NEXT:    xorl %esi, %esi
+; CHECK-NEXT:  .LBB6_2: # %while.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movslq %esi, %rsi
+; CHECK-NEXT:    movl (%rdi,%rsi,4), %eax
+; CHECK-NEXT:    xorl %edx, %edx
+; CHECK-NEXT:    divl %r8d
+; CHECK-NEXT:    movl %eax, %edx
+; CHECK-NEXT:    movl $11, %eax
+; CHECK-NEXT:    movl %r8d, %ecx
+; CHECK-NEXT:    cmpl %r8d, %edx
+; CHECK-NEXT:    ja .LBB6_4
+; CHECK-NEXT:  # %bb.3: # %while.body
+; CHECK-NEXT:    # in Loop: Header=BB6_2 Depth=1
+; CHECK-NEXT:    movl $22, %eax
+; CHECK-NEXT:    movl $22, %ecx
+; CHECK-NEXT:  .LBB6_4: # %while.body
+; CHECK-NEXT:    # in Loop: Header=BB6_2 Depth=1
+; CHECK-NEXT:    xorl %edx, %edx
+; CHECK-NEXT:    divl %ecx
+; CHECK-NEXT:    movl %edx, (%rdi,%rsi,4)
+; CHECK-NEXT:    addl $1, %esi
+; CHECK-NEXT:    cmpl %r9d, %esi
+; CHECK-NEXT:    ja .LBB6_2
+; CHECK-NEXT:  .LBB6_5: # %while.end
+; CHECK-NEXT:    retq
 entry:
   %cmp10 = icmp ugt i32 0, %n
   br i1 %cmp10, label %while.body, label %while.end
@@ -328,16 +498,33 @@
 ; even outside of a loop.
 define i32 @test_cmov_memoperand(i32 %a, i32 %b, i32 %x, i32* %y) #0 {
 ; CHECK-LABEL: test_cmov_memoperand:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl %edx, %eax
+; CHECK-NEXT:    cmpl %esi, %edi
+; CHECK-NEXT:    ja .LBB7_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    movl (%rcx), %eax
+; CHECK-NEXT:  .LBB7_2: # %entry
+; CHECK-NEXT:    retq
 entry:
   %cond = icmp ugt i32 %a, %b
-; CHECK:         movl %edx, %eax
-; CHECK:         cmpl
   %load = load i32, i32* %y
   %z = select i1 %cond, i32 %x, i32 %load
-; CHECK-NOT:     cmov
-; CHECK:         ja [[FALSE_BB:.*]]
-; CHECK:         movl (%rcx), %eax
-; CHECK:       [[FALSE_BB]]:
+  ret i32 %z
+}
+
+; TODO: If cmov instruction is marked as unpredicatable, do not convert it to branch.
+define i32 @test_cmov_memoperand_unpredictable(i32 %a, i32 %b, i32 %x, i32* %y) #0 {
+; CHECK-LABEL: test_cmov_memoperand_unpredictable:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl %edx, %eax
+; CHECK-NEXT:    cmpl %esi, %edi
+; CHECK-NEXT:    cmovbel (%rcx), %eax
+; CHECK-NEXT:    retq
+entry:
+  %cond = icmp ugt i32 %a, %b
+  %load = load i32, i32* %y
+  %z = select i1 %cond, i32 %x, i32 %load, !unpredictable !0
   ret i32 %z
 }
 
@@ -345,29 +532,25 @@
 ; operand.
 define i32 @test_cmov_memoperand_in_group(i32 %a, i32 %b, i32 %x, i32* %y.ptr) #0 {
 ; CHECK-LABEL: test_cmov_memoperand_in_group:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl %edx, %eax
+; CHECK-NEXT:    movl %edx, %r8d
+; CHECK-NEXT:    cmpl %esi, %edi
+; CHECK-NEXT:    ja .LBB9_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    movl (%rcx), %r8d
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    movl %esi, %edx
+; CHECK-NEXT:  .LBB9_2: # %entry
+; CHECK-NEXT:    addl %r8d, %eax
+; CHECK-NEXT:    addl %edx, %eax
+; CHECK-NEXT:    retq
 entry:
   %cond = icmp ugt i32 %a, %b
-; CHECK:         movl %edx, %eax
-; CHECK:         cmpl
   %y = load i32, i32* %y.ptr
   %z1 = select i1 %cond, i32 %x, i32 %a
   %z2 = select i1 %cond, i32 %x, i32 %y
   %z3 = select i1 %cond, i32 %x, i32 %b
-; CHECK-NOT:     cmov
-; CHECK:         ja [[FALSE_BB:.*]]
-; CHECK-DAG:     movl %{{.*}}, %[[R1:.*]]
-; CHECK-DAG:     movl (%r{{..}}), %[[R2:.*]]
-; CHECK-DAG:     movl %{{.*}} %eax
-; CHECK:       [[FALSE_BB]]:
-; CHECK:         addl
-; CHECK-DAG:       %[[R1]]
-; CHECK-DAG:       ,
-; CHECK-DAG:       %eax
-; CHECK-DAG:     addl
-; CHECK-DAG:       %[[R2]]
-; CHECK-DAG:       ,
-; CHECK-DAG:       %eax
-; CHECK:         retq
   %s1 = add i32 %z1, %z2
   %s2 = add i32 %s1, %z3
   ret i32 %s2
@@ -376,29 +559,25 @@
 ; Same as before but with operands reversed in the select with a load.
 define i32 @test_cmov_memoperand_in_group2(i32 %a, i32 %b, i32 %x, i32* %y.ptr) #0 {
 ; CHECK-LABEL: test_cmov_memoperand_in_group2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl %edx, %eax
+; CHECK-NEXT:    movl %edx, %r8d
+; CHECK-NEXT:    cmpl %esi, %edi
+; CHECK-NEXT:    jbe .LBB10_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    movl (%rcx), %r8d
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    movl %esi, %edx
+; CHECK-NEXT:  .LBB10_2: # %entry
+; CHECK-NEXT:    addl %r8d, %eax
+; CHECK-NEXT:    addl %edx, %eax
+; CHECK-NEXT:    retq
 entry:
   %cond = icmp ugt i32 %a, %b
-; CHECK:         movl %edx, %eax
-; CHECK:         cmpl
   %y = load i32, i32* %y.ptr
   %z2 = select i1 %cond, i32 %a, i32 %x
   %z1 = select i1 %cond, i32 %y, i32 %x
   %z3 = select i1 %cond, i32 %b, i32 %x
-; CHECK-NOT:     cmov
-; CHECK:         jbe [[FALSE_BB:.*]]
-; CHECK-DAG:     movl %{{.*}}, %[[R1:.*]]
-; CHECK-DAG:     movl (%r{{..}}), %[[R2:.*]]
-; CHECK-DAG:     movl %{{.*}} %eax
-; CHECK:       [[FALSE_BB]]:
-; CHECK:         addl
-; CHECK-DAG:       %[[R1]]
-; CHECK-DAG:       ,
-; CHECK-DAG:       %eax
-; CHECK-DAG:     addl
-; CHECK-DAG:       %[[R2]]
-; CHECK-DAG:       ,
-; CHECK-DAG:       %eax
-; CHECK:         retq
   %s1 = add i32 %z1, %z2
   %s2 = add i32 %s1, %z3
   ret i32 %s2
@@ -408,15 +587,19 @@
 ; loads.
 define i32 @test_cmov_memoperand_conflicting_dir(i32 %a, i32 %b, i32 %x, i32* %y1.ptr, i32* %y2.ptr) #0 {
 ; CHECK-LABEL: test_cmov_memoperand_conflicting_dir:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    cmpl %esi, %edi
+; CHECK-NEXT:    movl (%rcx), %eax
+; CHECK-NEXT:    cmoval %edx, %eax
+; CHECK-NEXT:    cmoval (%r8), %edx
+; CHECK-NEXT:    addl %edx, %eax
+; CHECK-NEXT:    retq
 entry:
   %cond = icmp ugt i32 %a, %b
-; CHECK:         cmpl
   %y1 = load i32, i32* %y1.ptr
   %y2 = load i32, i32* %y2.ptr
   %z1 = select i1 %cond, i32 %x, i32 %y1
   %z2 = select i1 %cond, i32 %y2, i32 %x
-; CHECK:         cmoval
-; CHECK:         cmoval
   %s1 = add i32 %z1, %z2
   ret i32 %s1
 }
@@ -426,18 +609,19 @@
 ; the group.
 define i32 @test_cmov_memoperand_in_group_reuse_for_addr(i32 %a, i32 %b, i32* %x, i32* %y) #0 {
 ; CHECK-LABEL: test_cmov_memoperand_in_group_reuse_for_addr:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    cmpl %esi, %edi
+; CHECK-NEXT:    ja .LBB12_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    movl (%rcx), %eax
+; CHECK-NEXT:  .LBB12_2: # %entry
+; CHECK-NEXT:    retq
 entry:
   %cond = icmp ugt i32 %a, %b
-; CHECK:         movl %edi, %eax
-; CHECK:         cmpl
   %p = select i1 %cond, i32* %x, i32* %y
   %load = load i32, i32* %p
   %z = select i1 %cond, i32 %a, i32 %load
-; CHECK-NOT:     cmov
-; CHECK:         ja [[FALSE_BB:.*]]
-; CHECK:         movl (%r{{..}}), %eax
-; CHECK:       [[FALSE_BB]]:
-; CHECK:         retq
   ret i32 %z
 }
 
@@ -445,20 +629,21 @@
 ; uses the result of the other as part of the address.
 define i32 @test_cmov_memoperand_in_group_reuse_for_addr2(i32 %a, i32 %b, i32* %x, i32** %y) #0 {
 ; CHECK-LABEL: test_cmov_memoperand_in_group_reuse_for_addr2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    cmpl %esi, %edi
+; CHECK-NEXT:    ja .LBB13_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    movq (%rcx), %rax
+; CHECK-NEXT:    movl (%rax), %eax
+; CHECK-NEXT:  .LBB13_2: # %entry
+; CHECK-NEXT:    retq
 entry:
   %cond = icmp ugt i32 %a, %b
-; CHECK:         movl %edi, %eax
-; CHECK:         cmpl
   %load1 = load i32*, i32** %y
   %p = select i1 %cond, i32* %x, i32* %load1
   %load2 = load i32, i32* %p
   %z = select i1 %cond, i32 %a, i32 %load2
-; CHECK-NOT:     cmov
-; CHECK:         ja [[FALSE_BB:.*]]
-; CHECK:         movq (%r{{..}}), %[[R1:.*]]
-; CHECK:         movl (%[[R1]]), %eax
-; CHECK:       [[FALSE_BB]]:
-; CHECK:         retq
   ret i32 %z
 }
 
@@ -467,19 +652,20 @@
 ; where that cmov gets *its* input from a prior cmov in the group.
 define i32 @test_cmov_memoperand_in_group_reuse_for_addr3(i32 %a, i32 %b, i32* %x, i32* %y, i32* %z) #0 {
 ; CHECK-LABEL: test_cmov_memoperand_in_group_reuse_for_addr3:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    cmpl %esi, %edi
+; CHECK-NEXT:    ja .LBB14_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    movl (%rcx), %eax
+; CHECK-NEXT:  .LBB14_2: # %entry
+; CHECK-NEXT:    retq
 entry:
   %cond = icmp ugt i32 %a, %b
-; CHECK:         movl %edi, %eax
-; CHECK:         cmpl
   %p = select i1 %cond, i32* %x, i32* %y
   %p2 = select i1 %cond, i32* %z, i32* %p
   %load = load i32, i32* %p2
   %r = select i1 %cond, i32 %a, i32 %load
-; CHECK-NOT:     cmov
-; CHECK:         ja [[FALSE_BB:.*]]
-; CHECK:         movl (%r{{..}}), %eax
-; CHECK:       [[FALSE_BB]]:
-; CHECK:         retq
   ret i32 %r
 }
 
@@ -495,34 +681,35 @@
 ; CHECK-NEXT:    movq (%rcx), %rdx
 ; CHECK-NEXT:    xorl %esi, %esi
 ; CHECK-NEXT:    movq %rax, %rcx
-entry:
-  %begin = load i32*, i32** @begin, align 8
-  %end = load i32*, i32** @end, align 8
-  br label %loop.body
-
-; CHECK-NEXT:  .LBB13_1: # %loop.body
+; CHECK-NEXT:  .LBB15_1: # %loop.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    addq $8, %rcx
 ; CHECK-NEXT:    cmpq %rdx, %rcx
-; CHECK-NEXT:    ja .LBB13_3
+; CHECK-NEXT:    ja .LBB15_3
 ; CHECK-NEXT:  # %bb.2: # %loop.body
-; CHECK-NEXT:    # in Loop: Header=BB13_1 Depth=1
+; CHECK-NEXT:    # in Loop: Header=BB15_1 Depth=1
 ; CHECK-NEXT:    movq (%r8), %rcx
-; CHECK-NEXT:  .LBB13_3: # %loop.body
-; CHECK-NEXT:    # in Loop: Header=BB13_1 Depth=1
+; CHECK-NEXT:  .LBB15_3: # %loop.body
+; CHECK-NEXT:    # in Loop: Header=BB15_1 Depth=1
 ; CHECK-NEXT:    movl %edi, (%rcx)
 ; CHECK-NEXT:    addq $8, %rcx
 ; CHECK-NEXT:    cmpq %rdx, %rcx
-; CHECK-NEXT:    ja .LBB13_5
+; CHECK-NEXT:    ja .LBB15_5
 ; CHECK-NEXT:  # %bb.4: # %loop.body
-; CHECK-NEXT:    # in Loop: Header=BB13_1 Depth=1
+; CHECK-NEXT:    # in Loop: Header=BB15_1 Depth=1
 ; CHECK-NEXT:    movq %rax, %rcx
-; CHECK-NEXT:  .LBB13_5: # %loop.body
-; CHECK-NEXT:    # in Loop: Header=BB13_1 Depth=1
+; CHECK-NEXT:  .LBB15_5: # %loop.body
+; CHECK-NEXT:    # in Loop: Header=BB15_1 Depth=1
 ; CHECK-NEXT:    movl %edi, (%rcx)
 ; CHECK-NEXT:    addl $1, %esi
 ; CHECK-NEXT:    cmpl $1024, %esi # imm = 0x400
-; CHECK-NEXT:    jl .LBB13_1
+; CHECK-NEXT:    jl .LBB15_1
+; CHECK-NEXT:  # %bb.6: # %exit
+; CHECK-NEXT:    retq
+entry:
+  %begin = load i32*, i32** @begin, align 8
+  %end = load i32*, i32** @end, align 8
+  br label %loop.body
 loop.body:
   %phi.iv = phi i32 [ 0, %entry ], [ %iv.next, %loop.body ]
   %phi.ptr = phi i32* [ %begin, %entry ], [ %dst2, %loop.body ]
@@ -538,11 +725,9 @@
   %iv.next = add i32 %phi.iv, 1
   %cond = icmp slt i32 %iv.next, 1024
   br i1 %cond, label %loop.body, label %exit
-
-; CHECK-NEXT:  # %bb.6: # %exit
-; CHECK-NEXT:    retq
 exit:
   ret void
 }
 
 attributes #0 = {"target-cpu"="x86-64"}
+!0 = !{}
Index: llvm/lib/Transforms/IPO/AlwaysInliner.cpp
===================================================================
--- llvm/lib/Transforms/IPO/AlwaysInliner.cpp
+++ llvm/lib/Transforms/IPO/AlwaysInliner.cpp
@@ -54,13 +54,13 @@
     if (F.isPresplitCoroutine())
       continue;
 
-    if (!F.isDeclaration() && F.hasFnAttribute(Attribute::AlwaysInline) &&
-        isInlineViable(F).isSuccess()) {
+    if (!F.isDeclaration() && isInlineViable(F).isSuccess()) {
       Calls.clear();
 
       for (User *U : F.users())
         if (auto *CB = dyn_cast<CallBase>(U))
-          if (CB->getCalledFunction() == &F)
+          if (CB->getCalledFunction() == &F &&
+              CB->hasFnAttr(Attribute::AlwaysInline))
             Calls.insert(CB);
 
       for (CallBase *CB : Calls) {
Index: clang/test/CodeGenCXX/flatten.cpp
===================================================================
--- clang/test/CodeGenCXX/flatten.cpp
+++ clang/test/CodeGenCXX/flatten.cpp
@@ -1,7 +1,3 @@
-// UNSUPPORTED: experimental-new-pass-manager
-// See the comment for CodeGen/flatten.c on why this is unsupported with the new
-// PM.
-
 // RUN: %clang_cc1 -triple=x86_64-linux-gnu -std=c++11 %s -emit-llvm -o - | FileCheck %s
 
 void f(void) {}
Index: clang/test/CodeGen/flatten.c
===================================================================
--- clang/test/CodeGen/flatten.c
+++ clang/test/CodeGen/flatten.c
@@ -1,9 +1,3 @@
-// UNSUPPORTED: experimental-new-pass-manager
-// Currently, different code seems to be intentionally generated under the new
-// PM since we alwaysinline functions and not callsites under new PM.
-// Under new PM, f() will not be inlined from g() since f is not marked as
-// alwaysinline.
-
 // RUN: %clang_cc1 -triple=x86_64-linux-gnu %s -emit-llvm -o - | FileCheck %s
 
 void f(void) {}
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to