zturner created this revision. zturner added a reviewer: chaoren. zturner added a subscriber: lldb-commits.
New output looks like this: FAIL: LLDB (suite) :: TestThreadStepOut.py (Windows zturner-win81 8 6.2.9200 AMD64 Intel64 Family 6 Model 45 Stepping 7, GenuineIntel) FAIL: LLDB (suite) :: TestValueAPI.py (Windows zturner-win81 8 6.2.9200 AMD64 Intel64 Family 6 Model 45 Stepping 7, GenuineIntel) FAIL: LLDB (suite) :: TestValueMD5Crash.py (Windows zturner-win81 8 6.2.9200 AMD64 Intel64 Family 6 Model 45 Stepping 7, GenuineIntel) FAIL: LLDB (suite) :: TestVectorTypesFormatting.py (Windows zturner-win81 8 6.2.9200 AMD64 Intel64 Family 6 Model 45 Stepping 7, GenuineIntel) Unexpected Successes (1) UNEXPECTED SUCCESS: LLDB (suite) :: TestSettings.py (Windows zturner-win81 8 6.2.9200 AMD64 Intel64 Family 6 Model 45 Stepping 7, GenuineIntel) [50856 refs] ninja: build stopped: subcommand failed. It is not smart enough to dig into the individual file (TestSettings.py in this example) and find out which test passed that was not expected. That can be done as future work if anyone is interested. The important thing though is that unexpected successes currently have zero visibility, so it is hard to tell when a change fixes existing broken stuff. The hope is that surfacing this will lead to more tests being re-enabled. http://reviews.llvm.org/D12020 Files: test/dosep.py
Index: test/dosep.py =================================================================== --- test/dosep.py +++ test/dosep.py @@ -121,6 +121,7 @@ def parse_test_results(output): passes = 0 failures = 0 + unexpected_successes = 0 for result in output: pass_count = re.search("^RESULT:.*([0-9]+) passes", result, re.MULTILINE) @@ -128,16 +129,20 @@ result, re.MULTILINE) error_count = re.search("^RESULT:.*([0-9]+) errors", result, re.MULTILINE) + unexpected_success_count = re.search("^RESULT:.*([0-9]+) unexpected successes", + result, re.MULTILINE) this_fail_count = 0 this_error_count = 0 if pass_count is not None: passes = passes + int(pass_count.group(1)) if fail_count is not None: failures = failures + int(fail_count.group(1)) + if unexpected_success_count is not None: + unexpected_successes = unexpected_successes + int(unexpected_success_count.group(1)) if error_count is not None: failures = failures + int(error_count.group(1)) pass - return passes, failures + return passes, failures, unexpected_successes def call_with_timeout(command, timeout, name): @@ -161,14 +166,14 @@ stderr=subprocess.PIPE) output = process.communicate() exit_status = process.returncode - passes, failures = parse_test_results(output) + passes, failures, unexpected_successes = parse_test_results(output) if exit_status == 0: # stdout does not have any useful information from 'dotest.py', # only stderr does. report_test_pass(name, output[1]) else: report_test_failure(name, command, output[1]) - return name, exit_status, passes, failures + return name, exit_status, passes, failures, unexpected_successes def process_dir(root, files, test_root, dotest_argv): @@ -187,17 +192,19 @@ results.append(call_with_timeout(command, timeout, name)) - # result = (name, status, passes, failures) - timed_out = [name for name, status, _, _ in results + # result = (name, status, passes, failures, unexpected_successes) + timed_out = [name for name, status, _, _, _ in results if status == eTimedOut] - passed = [name for name, status, _, _ in results + passed = [name for name, status, _, _, _ in results if status == ePassed] - failed = [name for name, status, _, _ in results + failed = [name for name, status, _, _, _ in results if status != ePassed] + xpasses = [name for name, status, _, _, unexpected_successes in results if unexpected_successes > 0] + pass_count = sum([result[2] for result in results]) fail_count = sum([result[3] for result in results]) - return (timed_out, passed, failed, pass_count, fail_count) + return (timed_out, passed, failed, xpasses, pass_count, fail_count) in_q = None out_q = None @@ -259,10 +266,11 @@ timed_out = sum([result[0] for result in test_results], []) passed = sum([result[1] for result in test_results], []) failed = sum([result[2] for result in test_results], []) - pass_count = sum([result[3] for result in test_results]) - fail_count = sum([result[4] for result in test_results]) + unexpected_successes = sum([result[3] for result in test_results], []) + pass_count = sum([result[4] for result in test_results]) + fail_count = sum([result[5] for result in test_results]) - return (timed_out, passed, failed, pass_count, fail_count) + return (timed_out, passed, failed, unexpected_successes, pass_count, fail_count) def getExpectedTimeouts(platform_name): @@ -433,7 +441,7 @@ num_threads = 1 system_info = " ".join(platform.uname()) - (timed_out, passed, failed, pass_count, fail_count) = walk_and_invoke( + (timed_out, passed, failed, unexpected_successes, pass_count, fail_count) = walk_and_invoke( test_directory, test_subdir, dotest_argv, num_threads) timed_out = set(timed_out) @@ -474,6 +482,8 @@ sys.stdout.write(" (%d failed) (%f%%)" % ( fail_count, 100.0 * fail_count / num_test_cases)) print + exit_code = 0 + if len(failed) > 0: failed.sort() print "Failing Tests (%d)" % len(failed) @@ -481,8 +491,15 @@ print "%s: LLDB (suite) :: %s (%s)" % ( "TIMEOUT" if f in timed_out else "FAIL", f, system_info ) - sys.exit(1) - sys.exit(0) + exit_code = 1 + + if len(unexpected_successes) > 0: + unexpected_successes.sort() + print "\nUnexpected Successes (%d)" % len(unexpected_successes) + for u in unexpected_successes: + print "UNEXPECTED SUCCESS: LLDB (suite) :: %s (%s)" % (u, system_info) + + sys.exit(exit_code) if __name__ == '__main__': main()
_______________________________________________ lldb-commits mailing list lldb-commits@lists.llvm.org http://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits