From: Alexis Lothoré <alexis.loth...@bootlin.com> Some regression reports show a lot of "PASSED->None" transitions. When such big lot of identical transitions are observed, it could be that tests are now failing, but it could also be that some tests has been renamed.
To detect such case, add a log in regression report to report the number of new tests (i.e: tests that are present in target results but not in base result). This new log also allows to know about newly added tests bases Signed-off-by: Alexis Lothoré <alexis.loth...@bootlin.com> Signed-off-by: Alexandre Belloni <alexandre.bell...@bootlin.com> (cherry picked from commit 01b5cefd07e01c7407bc663842b8a8d502358a6d) Signed-off-by: Steve Sakoman <st...@sakoman.com> --- scripts/lib/resulttool/regression.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/scripts/lib/resulttool/regression.py b/scripts/lib/resulttool/regression.py index 1facbcd85e..f80a9182a9 100644 --- a/scripts/lib/resulttool/regression.py +++ b/scripts/lib/resulttool/regression.py @@ -178,6 +178,8 @@ def compare_result(logger, base_name, target_name, base_result, target_result): base_result = base_result.get('result') target_result = target_result.get('result') result = {} + new_tests = 0 + if base_result and target_result: for k in base_result: base_testcase = base_result[k] @@ -189,6 +191,13 @@ def compare_result(logger, base_name, target_name, base_result, target_result): result[k] = {'base': base_status, 'target': target_status} else: logger.error('Failed to retrieved base test case status: %s' % k) + + # Also count new tests that were not present in base results: it + # could be newly added tests, but it could also highlights some tests + # renames or fixed faulty ptests + for k in target_result: + if k not in base_result: + new_tests += 1 if result: new_pass_count = sum(test['target'] is not None and test['target'].startswith("PASS") for test in result.values()) # Print a regression report only if at least one test has a regression status (FAIL, SKIPPED, absent...) @@ -200,10 +209,13 @@ def compare_result(logger, base_name, target_name, base_result, target_result): if new_pass_count > 0: resultstring += f' Additionally, {new_pass_count} previously failing test(s) is/are now passing\n' else: - resultstring = "Improvement: %s\n %s\n (+%d test(s) passing)" % (base_name, target_name, new_pass_count) + resultstring = "Improvement: %s\n %s\n (+%d test(s) passing)\n" % (base_name, target_name, new_pass_count) result = None else: - resultstring = "Match: %s\n %s" % (base_name, target_name) + resultstring = "Match: %s\n %s\n" % (base_name, target_name) + + if new_tests > 0: + resultstring += f' Additionally, {new_tests} new test(s) is/are present\n' return result, resultstring def get_results(logger, source): -- 2.34.1
-=-=-=-=-=-=-=-=-=-=-=- Links: You receive all messages sent to this group. View/Reply Online (#185789): https://lists.openembedded.org/g/openembedded-core/message/185789 Mute This Topic: https://lists.openembedded.org/mt/100666373/21656 Group Owner: openembedded-core+ow...@lists.openembedded.org Unsubscribe: https://lists.openembedded.org/g/openembedded-core/unsub [arch...@mail-archive.com] -=-=-=-=-=-=-=-=-=-=-=-