Commit 723c8258 authored by Rae Moar's avatar Rae Moar Committed by Shuah Khan

kunit: tool: Add command line interface to filter and report attributes

Add ability to kunit.py to filter attributes and report a list of tests
including attributes without running tests.

Add flag "--filter" to input filters on test attributes. Tests will be
filtered out if they do not match all inputted filters.

Example: --filter speed=slow (This filter would run only the tests that are
marked as slow)

Filters have operations: <, >, <=, >=, !=, and =. But note that the
characters < and > are often interpreted by the shell, so they may need to
be quoted or escaped.

Example: --filter "speed>slow" or --filter speed\>slow (This filter would
run only the tests that have the speed faster than slow.

Additionally, multiple filters can be used.

Example: --filter "speed=slow, module!=example" (This filter would run
only the tests that have the speed slow and are not in the "example"
module)

Note if the user wants to skip filtered tests instead of not
running/showing them use the "--filter_action=skip" flag instead.

Expose the output of kunit.action=list option with flag "--list_tests" to
output a list of tests. Additionally, add flag "--list_tests_attr" to
output a list of tests and their attributes. These flags are useful to see
tests and test attributes without needing to run tests.

Example of the output of "--list_tests_attr":
  example
  example.test_1
  example.test_2
  # example.test_2.speed: slow

This output includes a suite, example, with two test cases, test_1 and
test_2. And in this instance test_2 has been marked as slow.
Reviewed-by: default avatarDavid Gow <davidgow@google.com>
Signed-off-by: default avatarRae Moar <rmoar@google.com>
Signed-off-by: default avatarShuah Khan <skhan@linuxfoundation.org>
parent 529534e8
......@@ -55,8 +55,12 @@ class KunitExecRequest(KunitParseRequest):
build_dir: str
timeout: int
filter_glob: str
filter: str
filter_action: Optional[str]
kernel_args: Optional[List[str]]
run_isolated: Optional[str]
list_tests: bool
list_tests_attr: bool
@dataclass
class KunitRequest(KunitExecRequest, KunitBuildRequest):
......@@ -102,19 +106,41 @@ def config_and_build_tests(linux: kunit_kernel.LinuxSourceTree,
def _list_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> List[str]:
args = ['kunit.action=list']
if request.kernel_args:
args.extend(request.kernel_args)
output = linux.run_kernel(args=args,
timeout=request.timeout,
filter_glob=request.filter_glob,
filter=request.filter,
filter_action=request.filter_action,
build_dir=request.build_dir)
lines = kunit_parser.extract_tap_lines(output)
# Hack! Drop the dummy TAP version header that the executor prints out.
lines.pop()
# Filter out any extraneous non-test output that might have gotten mixed in.
return [l for l in output if re.match(r'^[^\s.]+\.[^\s.]+$', l)]
def _list_tests_attr(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> Iterable[str]:
args = ['kunit.action=list_attr']
if request.kernel_args:
args.extend(request.kernel_args)
output = linux.run_kernel(args=args,
timeout=request.timeout,
filter_glob=request.filter_glob,
filter=request.filter,
filter_action=request.filter_action,
build_dir=request.build_dir)
lines = kunit_parser.extract_tap_lines(output)
# Hack! Drop the dummy TAP version header that the executor prints out.
lines.pop()
# Filter out any extraneous non-test output that might have gotten mixed in.
return [l for l in lines if re.match(r'^[^\s.]+\.[^\s.]+$', l)]
return lines
def _suites_from_test_list(tests: List[str]) -> List[str]:
"""Extracts all the suites from an ordered list of tests."""
......@@ -128,10 +154,18 @@ def _suites_from_test_list(tests: List[str]) -> List[str]:
suites.append(suite)
return suites
def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> KunitResult:
filter_globs = [request.filter_glob]
if request.list_tests:
output = _list_tests(linux, request)
for line in output:
print(line.rstrip())
return KunitResult(status=KunitStatus.SUCCESS, elapsed_time=0.0)
if request.list_tests_attr:
attr_output = _list_tests_attr(linux, request)
for line in attr_output:
print(line.rstrip())
return KunitResult(status=KunitStatus.SUCCESS, elapsed_time=0.0)
if request.run_isolated:
tests = _list_tests(linux, request)
if request.run_isolated == 'test':
......@@ -155,6 +189,8 @@ def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -
args=request.kernel_args,
timeout=request.timeout,
filter_glob=filter_glob,
filter=request.filter,
filter_action=request.filter_action,
build_dir=request.build_dir)
_, test_result = parse_tests(request, metadata, run_result)
......@@ -341,6 +377,16 @@ def add_exec_opts(parser: argparse.ArgumentParser) -> None:
nargs='?',
default='',
metavar='filter_glob')
parser.add_argument('--filter',
help='Filter KUnit tests with attributes, '
'e.g. module=example or speed>slow',
type=str,
default='')
parser.add_argument('--filter_action',
help='If set to skip, filtered tests will be skipped, '
'e.g. --filter_action=skip. Otherwise they will not run.',
type=str,
choices=['skip'])
parser.add_argument('--kernel_args',
help='Kernel command-line parameters. Maybe be repeated',
action='append', metavar='')
......@@ -350,6 +396,12 @@ def add_exec_opts(parser: argparse.ArgumentParser) -> None:
'what ran before it.',
type=str,
choices=['suite', 'test'])
parser.add_argument('--list_tests', help='If set, list all tests that will be '
'run.',
action='store_true')
parser.add_argument('--list_tests_attr', help='If set, list all tests and test '
'attributes.',
action='store_true')
def add_parse_opts(parser: argparse.ArgumentParser) -> None:
parser.add_argument('--raw_output', help='If set don\'t parse output from kernel. '
......@@ -398,8 +450,12 @@ def run_handler(cli_args: argparse.Namespace) -> None:
json=cli_args.json,
timeout=cli_args.timeout,
filter_glob=cli_args.filter_glob,
filter=cli_args.filter,
filter_action=cli_args.filter_action,
kernel_args=cli_args.kernel_args,
run_isolated=cli_args.run_isolated)
run_isolated=cli_args.run_isolated,
list_tests=cli_args.list_tests,
list_tests_attr=cli_args.list_tests_attr)
result = run_tests(linux, request)
if result.status != KunitStatus.SUCCESS:
sys.exit(1)
......@@ -441,8 +497,12 @@ def exec_handler(cli_args: argparse.Namespace) -> None:
json=cli_args.json,
timeout=cli_args.timeout,
filter_glob=cli_args.filter_glob,
filter=cli_args.filter,
filter_action=cli_args.filter_action,
kernel_args=cli_args.kernel_args,
run_isolated=cli_args.run_isolated)
run_isolated=cli_args.run_isolated,
list_tests=cli_args.list_tests,
list_tests_attr=cli_args.list_tests_attr)
result = exec_tests(linux, exec_request)
stdout.print_with_timestamp((
'Elapsed time: %.3fs\n') % (result.elapsed_time))
......
......@@ -330,11 +330,15 @@ class LinuxSourceTree:
return False
return self.validate_config(build_dir)
def run_kernel(self, args: Optional[List[str]]=None, build_dir: str='', filter_glob: str='', timeout: Optional[int]=None) -> Iterator[str]:
def run_kernel(self, args: Optional[List[str]]=None, build_dir: str='', filter_glob: str='', filter: str='', filter_action: Optional[str]=None, timeout: Optional[int]=None) -> Iterator[str]:
if not args:
args = []
if filter_glob:
args.append('kunit.filter_glob='+filter_glob)
args.append('kunit.filter_glob=' + filter_glob)
if filter:
args.append('kunit.filter="' + filter + '"')
if filter_action:
args.append('kunit.filter_action=' + filter_action)
args.append('kunit.enable=1')
process = self._ops.start(args, build_dir)
......
......@@ -212,6 +212,7 @@ KTAP_START = re.compile(r'\s*KTAP version ([0-9]+)$')
TAP_START = re.compile(r'\s*TAP version ([0-9]+)$')
KTAP_END = re.compile(r'\s*(List of all partitions:|'
'Kernel panic - not syncing: VFS:|reboot: System halted)')
EXECUTOR_ERROR = re.compile(r'\s*kunit executor: (.*)$')
def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream:
"""Extracts KTAP lines from the kernel output."""
......@@ -242,6 +243,8 @@ def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream:
# remove the prefix, if any.
line = line[prefix_len:]
yield line_num, line
elif EXECUTOR_ERROR.search(line):
yield line_num, line
return LineStream(lines=isolate_ktap_output(kernel_output))
KTAP_VERSIONS = [1]
......@@ -447,7 +450,7 @@ def parse_diagnostic(lines: LineStream) -> List[str]:
Log of diagnostic lines
"""
log = [] # type: List[str]
non_diagnostic_lines = [TEST_RESULT, TEST_HEADER, KTAP_START]
non_diagnostic_lines = [TEST_RESULT, TEST_HEADER, KTAP_START, TAP_START]
while lines and not any(re.match(lines.peek())
for re in non_diagnostic_lines):
log.append(lines.pop())
......@@ -713,6 +716,11 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest:
"""
test = Test()
test.log.extend(log)
# Parse any errors prior to parsing tests
err_log = parse_diagnostic(lines)
test.log.extend(err_log)
if not is_subtest:
# If parsing the main/top-level test, parse KTAP version line and
# test plan
......@@ -774,6 +782,7 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest:
# Don't override a bad status if this test had one reported.
# Assumption: no subtests means CRASHED is from Test.__init__()
if test.status in (TestStatus.TEST_CRASHED, TestStatus.SUCCESS):
print_log(test.log)
test.status = TestStatus.NO_TESTS
test.add_error('0 tests run!')
......
......@@ -597,7 +597,7 @@ class KUnitMainTest(unittest.TestCase):
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 0)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir='.kunit', filter_glob='', timeout=300)
args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_run_passes_args_pass(self):
......@@ -605,7 +605,7 @@ class KUnitMainTest(unittest.TestCase):
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir='.kunit', filter_glob='', timeout=300)
args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_exec_passes_args_fail(self):
......@@ -629,7 +629,7 @@ class KUnitMainTest(unittest.TestCase):
kunit.main(['run'])
self.assertEqual(e.exception.code, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir='.kunit', filter_glob='', timeout=300)
args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=300)
self.print_mock.assert_any_call(StrContains(' 0 tests run!'))
def test_exec_raw_output(self):
......@@ -670,13 +670,13 @@ class KUnitMainTest(unittest.TestCase):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
kunit.main(['run', '--raw_output', 'filter_glob'])
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir='.kunit', filter_glob='filter_glob', timeout=300)
args=None, build_dir='.kunit', filter_glob='filter_glob', filter='', filter_action=None, timeout=300)
def test_exec_timeout(self):
timeout = 3453
kunit.main(['exec', '--timeout', str(timeout)])
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir='.kunit', filter_glob='', timeout=timeout)
args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=timeout)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_run_timeout(self):
......@@ -684,7 +684,7 @@ class KUnitMainTest(unittest.TestCase):
kunit.main(['run', '--timeout', str(timeout)])
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir='.kunit', filter_glob='', timeout=timeout)
args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=timeout)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_run_builddir(self):
......@@ -692,7 +692,7 @@ class KUnitMainTest(unittest.TestCase):
kunit.main(['run', '--build_dir=.kunit'])
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir=build_dir, filter_glob='', timeout=300)
args=None, build_dir=build_dir, filter_glob='', filter='', filter_action=None, timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_config_builddir(self):
......@@ -710,7 +710,7 @@ class KUnitMainTest(unittest.TestCase):
build_dir = '.kunit'
kunit.main(['exec', '--build_dir', build_dir])
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir=build_dir, filter_glob='', timeout=300)
args=None, build_dir=build_dir, filter_glob='', filter='', filter_action=None, timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_run_kunitconfig(self):
......@@ -786,7 +786,7 @@ class KUnitMainTest(unittest.TestCase):
kunit.main(['run', '--kernel_args=a=1', '--kernel_args=b=2'])
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
args=['a=1','b=2'], build_dir='.kunit', filter_glob='', timeout=300)
args=['a=1','b=2'], build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_list_tests(self):
......@@ -794,13 +794,11 @@ class KUnitMainTest(unittest.TestCase):
self.linux_source_mock.run_kernel.return_value = ['TAP version 14', 'init: random output'] + want
got = kunit._list_tests(self.linux_source_mock,
kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*', None, 'suite'))
kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*', '', None, None, 'suite', False, False))
self.assertEqual(got, want)
# Should respect the user's filter glob when listing tests.
self.linux_source_mock.run_kernel.assert_called_once_with(
args=['kunit.action=list'], build_dir='.kunit', filter_glob='suite*', timeout=300)
args=['kunit.action=list'], build_dir='.kunit', filter_glob='suite*', filter='', filter_action=None, timeout=300)
@mock.patch.object(kunit, '_list_tests')
def test_run_isolated_by_suite(self, mock_tests):
......@@ -809,10 +807,10 @@ class KUnitMainTest(unittest.TestCase):
# Should respect the user's filter glob when listing tests.
mock_tests.assert_called_once_with(mock.ANY,
kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*.test*', None, 'suite'))
kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*.test*', '', None, None, 'suite', False, False))
self.linux_source_mock.run_kernel.assert_has_calls([
mock.call(args=None, build_dir='.kunit', filter_glob='suite.test*', timeout=300),
mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test*', timeout=300),
mock.call(args=None, build_dir='.kunit', filter_glob='suite.test*', filter='', filter_action=None, timeout=300),
mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test*', filter='', filter_action=None, timeout=300),
])
@mock.patch.object(kunit, '_list_tests')
......@@ -822,13 +820,12 @@ class KUnitMainTest(unittest.TestCase):
# Should respect the user's filter glob when listing tests.
mock_tests.assert_called_once_with(mock.ANY,
kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*', None, 'test'))
kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*', '', None, None, 'test', False, False))
self.linux_source_mock.run_kernel.assert_has_calls([
mock.call(args=None, build_dir='.kunit', filter_glob='suite.test1', timeout=300),
mock.call(args=None, build_dir='.kunit', filter_glob='suite.test2', timeout=300),
mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test1', timeout=300),
mock.call(args=None, build_dir='.kunit', filter_glob='suite.test1', filter='', filter_action=None, timeout=300),
mock.call(args=None, build_dir='.kunit', filter_glob='suite.test2', filter='', filter_action=None, timeout=300),
mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test1', filter='', filter_action=None, timeout=300),
])
if __name__ == '__main__':
unittest.main()
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment