Changes in directory llvm/test/CodeGen/X86:
2004-02-14-InefficientStackPointer.llx updated: 1.2 -> 1.3 fp-stack-ret.ll updated: 1.2 -> 1.3 fp_constant_op.llx updated: 1.4 -> 1.5 fp_load_fold.llx updated: 1.4 -> 1.5 sse-fcopysign.ll updated: 1.5 -> 1.6 sse-load-ret.ll updated: 1.2 -> 1.3 vec_call.ll updated: 1.3 -> 1.4 --- Log message: For PR1336: http://llvm.org/PR1336 : XFAIL tests covered by the PR. These will be un-XFAILed as they are fixed. --- Diffs of the changes: (+13 -6) 2004-02-14-InefficientStackPointer.llx | 1 + fp-stack-ret.ll | 6 +++--- fp_constant_op.llx | 1 + fp_load_fold.llx | 1 + sse-fcopysign.ll | 1 + sse-load-ret.ll | 8 +++++--- vec_call.ll | 1 + 7 files changed, 13 insertions(+), 6 deletions(-) Index: llvm/test/CodeGen/X86/2004-02-14-InefficientStackPointer.llx diff -u llvm/test/CodeGen/X86/2004-02-14-InefficientStackPointer.llx:1.2 llvm/test/CodeGen/X86/2004-02-14-InefficientStackPointer.llx:1.3 --- llvm/test/CodeGen/X86/2004-02-14-InefficientStackPointer.llx:1.2 Fri Dec 1 22:23:08 2006 +++ llvm/test/CodeGen/X86/2004-02-14-InefficientStackPointer.llx Sun Apr 15 18:00:46 2007 @@ -1,4 +1,5 @@ ; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | grep ESP | not grep sub +; XFAIL: * int %test(int %X) { ret int %X Index: llvm/test/CodeGen/X86/fp-stack-ret.ll diff -u llvm/test/CodeGen/X86/fp-stack-ret.ll:1.2 llvm/test/CodeGen/X86/fp-stack-ret.ll:1.3 --- llvm/test/CodeGen/X86/fp-stack-ret.ll:1.2 Sun Feb 25 03:30:03 2007 +++ llvm/test/CodeGen/X86/fp-stack-ret.ll Sun Apr 15 18:00:46 2007 @@ -1,7 +1,7 @@ -; RUN: llvm-as < %s | llc -mtriple=i686-apple-darwin8 -mcpu=yonah -march=x86 > %t && -; RUN: grep fldl %t | wc -l | grep 1 && +; RUN: llvm-as < %s | llc -mtriple=i686-apple-darwin8 -mcpu=yonah -march=x86 > %t +; RUN: grep fldl %t | wc -l | grep 1 ; RUN: not grep xmm %t && -; RUN: grep 'sub.*esp' %t | wc -l | grep 1 +; RUN: grep {sub.*esp} %t | wc -l | grep 1 ; These testcases shouldn't require loading into an XMM register then storing ; to memory, then reloading into an FPStack reg. Index: llvm/test/CodeGen/X86/fp_constant_op.llx diff -u llvm/test/CodeGen/X86/fp_constant_op.llx:1.4 llvm/test/CodeGen/X86/fp_constant_op.llx:1.5 --- llvm/test/CodeGen/X86/fp_constant_op.llx:1.4 Sun Apr 15 17:16:46 2007 +++ llvm/test/CodeGen/X86/fp_constant_op.llx Sun Apr 15 18:00:46 2007 @@ -1,5 +1,6 @@ ; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=intel | \ ; RUN: grep ST | not grep {fadd\\|fsub\\|fdiv\\|fmul} +; XFAIL: * ; Test that the load of the constant is folded into the operation. Index: llvm/test/CodeGen/X86/fp_load_fold.llx diff -u llvm/test/CodeGen/X86/fp_load_fold.llx:1.4 llvm/test/CodeGen/X86/fp_load_fold.llx:1.5 --- llvm/test/CodeGen/X86/fp_load_fold.llx:1.4 Sun Apr 15 17:16:46 2007 +++ llvm/test/CodeGen/X86/fp_load_fold.llx Sun Apr 15 18:00:46 2007 @@ -1,5 +1,6 @@ ; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=intel | \ ; RUN: grep ST | not grep {fadd\\|fsub\\|fdiv\\|fmul} +; XFAIL: * ; Test that the load of the memory location is folded into the operation. Index: llvm/test/CodeGen/X86/sse-fcopysign.ll diff -u llvm/test/CodeGen/X86/sse-fcopysign.ll:1.5 llvm/test/CodeGen/X86/sse-fcopysign.ll:1.6 --- llvm/test/CodeGen/X86/sse-fcopysign.ll:1.5 Sun Apr 15 17:16:46 2007 +++ llvm/test/CodeGen/X86/sse-fcopysign.ll Sun Apr 15 18:00:46 2007 @@ -1,4 +1,5 @@ ; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 | not grep test +; XFAIL: * define float @test1(float %a, float %b) { %tmp = tail call float @copysignf( float %b, float %a ) Index: llvm/test/CodeGen/X86/sse-load-ret.ll diff -u llvm/test/CodeGen/X86/sse-load-ret.ll:1.2 llvm/test/CodeGen/X86/sse-load-ret.ll:1.3 --- llvm/test/CodeGen/X86/sse-load-ret.ll:1.2 Fri Dec 1 22:23:08 2006 +++ llvm/test/CodeGen/X86/sse-load-ret.ll Sun Apr 15 18:00:46 2007 @@ -1,6 +1,8 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mcpu=yonah -enable-x86-sse && -; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mcpu=yonah -enable-x86-sse | not grep movss -; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mcpu=yonah -enable-x86-sse | not grep xmm +; RUN: llvm-upgrade < %s | llvm-as | \ +; RUN: llc -march=x86 -mcpu=yonah -enable-x86-sse | not grep movss +; RUN: llvm-upgrade < %s | llvm-as | \ +; RUN: llc -march=x86 -mcpu=yonah -enable-x86-sse | not grep xmm +; XFAIL: * double %test1(double *%P) { %X = load double* %P Index: llvm/test/CodeGen/X86/vec_call.ll diff -u llvm/test/CodeGen/X86/vec_call.ll:1.3 llvm/test/CodeGen/X86/vec_call.ll:1.4 --- llvm/test/CodeGen/X86/vec_call.ll:1.3 Sun Apr 15 17:16:46 2007 +++ llvm/test/CodeGen/X86/vec_call.ll Sun Apr 15 18:00:46 2007 @@ -2,6 +2,7 @@ ; RUN: grep {subl.*60} ; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2 | \ ; RUN: grep {movdqa.*32} +; XFAIL: * void %test() { tail call void %xx( int 1, int 2, int 3, int 4, int 5, int 6, int 7, <2 x long> cast (<4 x int> < int 4, int 3, int 2, int 1 > to <2 x long>), <2 x long> cast (<4 x int> < int 8, int 7, int 6, int 5 > to <2 x long>), <2 x long> cast (<4 x int> < int 6, int 4, int 2, int 0 > to <2 x long>), <2 x long> cast (<4 x int> < int 8, int 4, int 2, int 1 > to <2 x long>), <2 x long> cast (<4 x int> < int 0, int 1, int 3, int 9 > to <2 x long>) ) _______________________________________________ llvm-commits mailing list llvm-commits@cs.uiuc.edu http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits