Diff
Modified: trunk/Tools/ChangeLog (113801 => 113802)
--- trunk/Tools/ChangeLog 2012-04-11 01:30:24 UTC (rev 113801)
+++ trunk/Tools/ChangeLog 2012-04-11 01:37:47 UTC (rev 113802)
@@ -1,5 +1,59 @@
2012-04-10 Dirk Pranke <dpra...@chromium.org>
+ webkit-patch: add a print-expectations command
+ https://bugs.webkit.org/show_bug.cgi?id=83347
+
+ Reviewed by Adam Barth.
+
+ This command will print the expected results for a given set of
+ tests on a given set of ports matching a given set of keywords.
+
+ Example output (printing all the tests not expected to pass, and their
+ expected results, on a Mac):
+
+ $ webkit-patch print-expectations -x pass fast/html
+ // For mac-snowleopard
+ fast/html/details-open4.html = TEXT
+ fast/html/details-open2.html = TEXT
+ fast/html/details-no-summary4.html = TEXT
+ fast/html/details-open-_javascript_.html = TEXT
+ $
+
+ You can also print full test-expectatons.txt lines using --full and a
+ CSV-style report (which can be useful for post-processing) using --csv.
+
+ It will replace the 'skipped-files' command (which will be
+ removed in a subsequent patch) and is a more general (and
+ cleaner and properly layered) solution.
+
+ Also add an update() command to MockOptions() to make overriding keywords easier.
+
+ * Scripts/webkitpy/layout_tests/models/test_expectations.py:
+ (TestExpectationSerializer.to_string):
+ (TestExpectationSerializer):
+ (TestExpectationSerializer.to_csv):
+ (TestExpectationSerializer._format_result):
+ (TestExpectationLine.create_passing_expectation):
+ (TestExpectationsModel.get_test_set_for_keyword):
+ (TestExpectationsModel.has_keyword):
+ (TestExpectations.model):
+ * Scripts/webkitpy/tool/commands/queries.py:
+ (execute):
+ (PrintExpectations):
+ (PrintExpectations.__init__):
+ (PrintExpectations.execute):
+ (PrintExpectations._filter_tests):
+ (PrintExpectations._format_lines):
+ (PrintExpectations._model):
+ * Scripts/webkitpy/tool/commands/queries_unittest.py:
+ (TestPrintExpectations): Added.
+ * Scripts/webkitpy/tool/mocktool.py:
+ (MockOptions):
+ (MockOptions.__init__):
+ (MockOptions.update): Added.
+
+2012-04-10 Dirk Pranke <dpra...@chromium.org>
+
webkitpy: refactor handling of --platform and related options
https://bugs.webkit.org/show_bug.cgi?id=83525
Modified: trunk/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py (113801 => 113802)
--- trunk/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py 2012-04-11 01:30:24 UTC (rev 113801)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py 2012-04-11 01:37:47 UTC (rev 113802)
@@ -119,7 +119,7 @@
self._test_configuration_converter = test_configuration_converter
self._parsed_expectation_to_string = dict([[parsed_expectation, expectation_string] for expectation_string, parsed_expectation in TestExpectations.EXPECTATIONS.items()])
- def to_string(self, expectation_line):
+ def to_string(self, expectation_line, include_modifiers=True, include_expectations=True, include_comment=True):
if expectation_line.is_invalid():
return expectation_line.original_string or ''
@@ -135,8 +135,16 @@
result.append(self._format_result(modifiers, expectation_line.name, expectations, expectation_line.comment))
return "\n".join(result) if result else None
- return self._format_result(" ".join(expectation_line.modifiers), expectation_line.name, " ".join(expectation_line.expectations), expectation_line.comment)
+ return self._format_result(" ".join(expectation_line.modifiers),
+ expectation_line.name,
+ " ".join(expectation_line.expectations),
+ expectation_line.comment,
+ include_modifiers, include_expectations, include_comment)
+ def to_csv(self, expectation_line):
+ # Note that this doesn't include the comments.
+ return '%s,%s,%s' % (expectation_line.name, ' '.join(expectation_line.modifiers), ' '.join(expectation_line.expectations))
+
def _parsed_expectations_string(self, expectation_line):
result = []
for index in TestExpectations.EXPECTATION_ORDER:
@@ -154,9 +162,14 @@
return ' '.join(result)
@classmethod
- def _format_result(cls, modifiers, name, expectations, comment):
- result = "%s : %s = %s" % (modifiers.upper(), name, expectations.upper())
- if comment is not None:
+ def _format_result(cls, modifiers, name, expectations, comment, include_modifiers=True, include_expectations=True, include_comment=True):
+ result = ''
+ if include_modifiers:
+ result += '%s : ' % modifiers.upper()
+ result += name
+ if include_expectations:
+ result += ' = %s' % expectations.upper()
+ if include_comment and comment is not None:
result += " //%s" % comment
return result
@@ -395,6 +408,7 @@
expectation_line.name = test
expectation_line.path = test
expectation_line.parsed_expectations = set([PASS])
+ expectation_line.expectations = set(['PASS'])
expectation_line.matching_tests = [test]
return expectation_line
@@ -445,6 +459,24 @@
return tests
+ def get_test_set_for_keyword(self, keyword):
+ # FIXME: get_test_set() is an awkward public interface because it requires
+ # callers to know the difference between modifiers and expectations. We
+ # should replace that with this where possible.
+ expectation_enum = TestExpectations.EXPECTATIONS.get(keyword.lower(), None)
+ if expectation_enum is not None:
+ return self._expectation_to_tests[expectation_enum]
+ modifier_enum = TestExpectations.MODIFIERS.get(keyword.lower(), None)
+ if modifier_enum is not None:
+ return self._modifier_to_tests[modifier_enum]
+
+ # We must not have an index on this modifier.
+ matching_tests = set()
+ for test, modifiers in self._test_to_modifiers.iteritems():
+ if keyword.lower() in modifiers:
+ matching_tests.add(test)
+ return matching_tests
+
def get_tests_with_result_type(self, result_type):
return self._result_type_to_tests[result_type]
@@ -458,6 +490,10 @@
def has_modifier(self, test, modifier):
return test in self._modifier_to_tests[modifier]
+ def has_keyword(self, test, keyword):
+ return (keyword.upper() in self.get_expectations_string(test) or
+ keyword.lower() in self.get_modifiers(test))
+
def has_test(self, test):
return test in self._test_to_expectation_line
@@ -729,6 +765,8 @@
# TODO(ojan): Allow for removing skipped tests when getting the list of
# tests to run, but not when getting metrics.
+ def model(self):
+ return self._model
def get_rebaselining_failures(self):
return (self._model.get_test_set(REBASELINE, FAIL) |
Modified: trunk/Tools/Scripts/webkitpy/tool/commands/queries.py (113801 => 113802)
--- trunk/Tools/Scripts/webkitpy/tool/commands/queries.py 2012-04-11 01:30:24 UTC (rev 113801)
+++ trunk/Tools/Scripts/webkitpy/tool/commands/queries.py 2012-04-11 01:37:47 UTC (rev 113802)
@@ -44,6 +44,7 @@
from webkitpy.tool.grammar import pluralize
from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
from webkitpy.common.system.deprecated_logging import log
+from webkitpy.layout_tests.models.test_expectations import TestExpectations, TestExpectationSerializer
from webkitpy.layout_tests.port import port_options
@@ -401,6 +402,92 @@
print "Test %r is not skipped by any port." % test_name
+class PrintExpectations(AbstractDeclarativeCommand):
+ name = 'print-expectations'
+ help_text = 'Print the expected result for the given test(s) on the given port(s)'
+
+ def __init__(self):
+ options = [
+ make_option('--all', action='', default=False,
+ help='display the expectations for *all* tests'),
+ make_option('-x', '--exclude-keyword', action='', default=[],
+ help='limit to tests not matching the given keyword (for example, "skip", "slow", or "crash". May specify multiple times'),
+ make_option('-i', '--include-keyword', action='', default=[],
+ help='limit to tests with the given keyword (for example, "skip", "slow", or "crash". May specify multiple times'),
+ make_option('--csv', action='', default=False,
+ help='Print a CSV-style report that includes the port name, modifiers, tests, and expectations'),
+ make_option('-f', '--full', action='', default=False,
+ help='Print a full test_expectations.txt-style line for every match'),
+ ] + port_options(platform='port/platform to use. Use glob-style wildcards for multiple ports (implies --csv)')
+
+ AbstractDeclarativeCommand.__init__(self, options=options)
+ self._expectation_models = {}
+
+ def execute(self, options, args, tool):
+ if not args and not options.all:
+ print "You must either specify one or more test paths or --all."
+ return
+
+ default_port = tool.port_factory.get(options=options)
+ if options.platform:
+ port_names = fnmatch.filter(tool.port_factory.all_port_names(), options.platform)
+ if not port_names:
+ default_port = tool.port_factory.get(options.platform)
+ if default_port:
+ port_names = [default_port.name()]
+ else:
+ print "No port names match '%s'" % options.platform
+ return
+ else:
+ port_names = [default_port.name()]
+
+ serializer = TestExpectationSerializer()
+ tests = default_port.tests(args)
+ for port_name in port_names:
+ model = self._model(options, port_name, tests)
+ tests_to_print = self._filter_tests(options, model, tests)
+ lines = [model.get_expectation_line(test) for test in sorted(tests_to_print)]
+ print '\n'.join(self._format_lines(options, port_name, serializer, lines))
+
+ def _filter_tests(self, options, model, tests):
+ filtered_tests = set()
+ if options.include_keyword:
+ for keyword in options.include_keyword:
+ filtered_tests.update(model.get_test_set_for_keyword(keyword))
+ else:
+ filtered_tests = tests
+
+ for keyword in options.exclude_keyword:
+ filtered_tests.difference_update(model.get_test_set_for_keyword(keyword))
+ return filtered_tests
+
+ def _format_lines(self, options, port_name, serializer, lines):
+ output = []
+ if options.csv:
+ for line in lines:
+ output.append("%s,%s" % (port_name, serializer.to_csv(line)))
+ elif lines:
+ include_modifiers = options.full
+ include_expectations = options.full or len(options.include_keyword) != 1 or len(options.exclude_keyword)
+ output.append("// For %s" % port_name)
+ for line in lines:
+ output.append("%s" % serializer.to_string(line, include_modifiers, include_expectations, include_comment=False))
+ return output
+
+ def _model(self, options, port_name, tests):
+ port = self._tool.port_factory.get(port_name, options)
+ expectations_path = port.path_to_test_expectations_file()
+ if not expectations_path in self._expectation_models:
+ lint_mode = False
+ self._expectation_models[expectations_path] = TestExpectations(port, tests,
+ port.test_expectations(),
+ port.test_configuration(),
+ lint_mode,
+ port.test_expectations_overrides(),
+ port.skipped_tests(tests)).model()
+ return self._expectation_models[expectations_path]
+
+
class PrintBaselines(AbstractDeclarativeCommand):
name = 'print-baselines'
help_text = 'Prints the baseline locations for given test(s) on the given port(s)'
Modified: trunk/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py (113801 => 113802)
--- trunk/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py 2012-04-11 01:30:24 UTC (rev 113801)
+++ trunk/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py 2012-04-11 01:37:47 UTC (rev 113802)
@@ -28,6 +28,7 @@
import unittest
+from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.net.bugzilla import Bugzilla
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.thirdparty.mock import Mock
@@ -118,6 +119,54 @@
self.assertEquals(command._blame_line_for_revision(None), "FAILED to fetch CommitInfo for rNone, exception: MESSAGE")
+class PrintExpectationsTest(unittest.TestCase):
+ def run_test(self, tests, expected_stdout, **args):
+ options = MockOptions(all=False, csv=False, full=False, platform='test-win-xp',
+ include_keyword=[], exclude_keyword=[]).update(**args)
+ tool = MockTool()
+ command = PrintExpectations()
+ command.bind_to_tool(tool)
+
+ oc = OutputCapture()
+ try:
+ oc.capture_output()
+ command.execute(options, tests, tool)
+ finally:
+ stdout, _, _ = oc.restore_output()
+ self.assertEquals(stdout, expected_stdout)
+
+ def test_basic(self):
+ self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
+ ('// For test-win-xp\n'
+ 'failures/expected/image.html = IMAGE\n'
+ 'failures/expected/text.html = TEXT\n'))
+
+ def test_full(self):
+ self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
+ ('// For test-win-xp\n'
+ 'WONTFIX : failures/expected/image.html = IMAGE\n'
+ 'WONTFIX : failures/expected/text.html = TEXT\n'),
+ full=True)
+
+ def test_exclude(self):
+ self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
+ ('// For test-win-xp\n'
+ 'failures/expected/text.html = TEXT\n'),
+ exclude_keyword=['image'])
+
+ def test_include(self):
+ self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
+ ('// For test-win-xp\n'
+ 'failures/expected/image.html\n'),
+ include_keyword=['image'])
+
+ def test_csv(self):
+ self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
+ ('test-win-xp,failures/expected/image.html,wontfix,image\n'
+ 'test-win-xp,failures/expected/text.html,wontfix,text\n'),
+ csv=True)
+
+
class PrintBaselinesTest(unittest.TestCase):
def setUp(self):
self.oc = None
Modified: trunk/Tools/Scripts/webkitpy/tool/mocktool.py (113801 => 113802)
--- trunk/Tools/Scripts/webkitpy/tool/mocktool.py 2012-04-11 01:30:24 UTC (rev 113801)
+++ trunk/Tools/Scripts/webkitpy/tool/mocktool.py 2012-04-11 01:37:47 UTC (rev 113802)
@@ -47,10 +47,13 @@
# object will be used. Generally speaking unit tests should
# subclass this or provider wrapper functions that set a common
# set of options.
- for key, value in kwargs.items():
- self.__dict__[key] = value
+ self.update(**kwargs)
+ def update(self, **kwargs):
+ self.__dict__.update(**kwargs)
+ return self
+
# FIXME: This should be renamed MockWebKitPatch.
class MockTool(MockHost):
def __init__(self, *args, **kwargs):