2020-11-23 22:15:33 +08:00
|
|
|
#!/usr/bin/env python3
|
2019-09-23 17:02:43 +08:00
|
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
|
|
#
|
|
|
|
# A thin wrapper on top of the KUnit Kernel
|
|
|
|
#
|
|
|
|
# Copyright (C) 2019, Google LLC.
|
|
|
|
# Author: Felix Guo <felixguoxiuping@gmail.com>
|
|
|
|
# Author: Brendan Higgins <brendanhiggins@google.com>
|
|
|
|
|
|
|
|
import argparse
|
|
|
|
import os
|
kunit: tool: support running each suite/test separately
The new --run_isolated flag makes the tool boot the kernel once per
suite or test, preventing leftover state from one suite to impact the
other. This can be useful as a starting point to debugging test
hermeticity issues.
Note: it takes a lot longer, so people should not use it normally.
Consider the following very simplified example:
bool disable_something_for_test = false;
void function_being_tested() {
...
if (disable_something_for_test) return;
...
}
static void test_before(struct kunit *test)
{
disable_something_for_test = true;
function_being_tested();
/* oops, we forgot to reset it back to false */
}
static void test_after(struct kunit *test)
{
/* oops, now "fixing" test_before can cause test_after to fail! */
function_being_tested();
}
Presented like this, the issues are obvious, but it gets a lot more
complicated to track down as the amount of test setup and helper
functions increases.
Another use case is memory corruption. It might not be surfaced as a
failure/crash in the test case or suite that caused it. I've noticed in
kunit's own unit tests, the 3rd suite after might be the one to finally
crash after an out-of-bounds write, for example.
Example usage:
Per suite:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite
...
Starting KUnit Kernel (1/7)...
============================================================
======== [PASSED] kunit_executor_test ========
....
Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/7)...
============================================================
======== [PASSED] kunit-try-catch-test ========
...
Per test:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test
Starting KUnit Kernel (1/23)...
============================================================
======== [PASSED] kunit_executor_test ========
[PASSED] parse_filter_test
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/23)...
============================================================
======== [PASSED] kunit_executor_test ========
[PASSED] filter_subsuite_test
...
It works with filters as well:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example
...
Starting KUnit Kernel (1/1)...
============================================================
======== [PASSED] example ========
...
It also handles test filters, '*.*skip*' runs these 3 tests:
kunit_status.kunit_status_mark_skipped_test
example.example_skip_test
example.example_mark_skipped_test
Fixed up merge conflict between:
d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and
6710951ee039 ("kunit: tool: support running each suite/test separately")
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Shuah Khan <skhan@linuxfoundation.org>
Signed-off-by: Daniel Latypov <dlatypov@google.com>
Reviewed-by: David Gow <davidgow@google.com>
Reviewed-by: Brendan Higgins <brendanhiggins@google.com>
Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-10-01 06:20:48 +08:00
|
|
|
import re
|
|
|
|
import sys
|
2019-09-23 17:02:43 +08:00
|
|
|
import time
|
|
|
|
|
2021-07-13 03:52:58 +08:00
|
|
|
assert sys.version_info >= (3, 7), "Python version is too old"
|
|
|
|
|
2019-09-23 17:02:43 +08:00
|
|
|
from collections import namedtuple
|
|
|
|
from enum import Enum, auto
|
kunit: tool: support running each suite/test separately
The new --run_isolated flag makes the tool boot the kernel once per
suite or test, preventing leftover state from one suite to impact the
other. This can be useful as a starting point to debugging test
hermeticity issues.
Note: it takes a lot longer, so people should not use it normally.
Consider the following very simplified example:
bool disable_something_for_test = false;
void function_being_tested() {
...
if (disable_something_for_test) return;
...
}
static void test_before(struct kunit *test)
{
disable_something_for_test = true;
function_being_tested();
/* oops, we forgot to reset it back to false */
}
static void test_after(struct kunit *test)
{
/* oops, now "fixing" test_before can cause test_after to fail! */
function_being_tested();
}
Presented like this, the issues are obvious, but it gets a lot more
complicated to track down as the amount of test setup and helper
functions increases.
Another use case is memory corruption. It might not be surfaced as a
failure/crash in the test case or suite that caused it. I've noticed in
kunit's own unit tests, the 3rd suite after might be the one to finally
crash after an out-of-bounds write, for example.
Example usage:
Per suite:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite
...
Starting KUnit Kernel (1/7)...
============================================================
======== [PASSED] kunit_executor_test ========
....
Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/7)...
============================================================
======== [PASSED] kunit-try-catch-test ========
...
Per test:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test
Starting KUnit Kernel (1/23)...
============================================================
======== [PASSED] kunit_executor_test ========
[PASSED] parse_filter_test
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/23)...
============================================================
======== [PASSED] kunit_executor_test ========
[PASSED] filter_subsuite_test
...
It works with filters as well:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example
...
Starting KUnit Kernel (1/1)...
============================================================
======== [PASSED] example ========
...
It also handles test filters, '*.*skip*' runs these 3 tests:
kunit_status.kunit_status_mark_skipped_test
example.example_skip_test
example.example_mark_skipped_test
Fixed up merge conflict between:
d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and
6710951ee039 ("kunit: tool: support running each suite/test separately")
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Shuah Khan <skhan@linuxfoundation.org>
Signed-off-by: Daniel Latypov <dlatypov@google.com>
Reviewed-by: David Gow <davidgow@google.com>
Reviewed-by: Brendan Higgins <brendanhiggins@google.com>
Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-10-01 06:20:48 +08:00
|
|
|
from typing import Iterable, Sequence, List
|
2019-09-23 17:02:43 +08:00
|
|
|
|
2020-08-12 05:27:56 +08:00
|
|
|
import kunit_json
|
2019-09-23 17:02:43 +08:00
|
|
|
import kunit_kernel
|
|
|
|
import kunit_parser
|
|
|
|
|
2020-05-01 12:27:01 +08:00
|
|
|
KunitResult = namedtuple('KunitResult', ['status','result','elapsed_time'])
|
2019-09-23 17:02:43 +08:00
|
|
|
|
2020-05-01 12:27:01 +08:00
|
|
|
KunitConfigRequest = namedtuple('KunitConfigRequest',
|
kunit: Fix TabError, remove defconfig code and handle when there is no kunitconfig
The identation before this code
(`if not os.path.exists(cli_args.build_dir):``)
was with spaces instead of tabs after fixed up merge conflits,
this commit revert spaces to tabs:
[iha@bbking linux]$ tools/testing/kunit/kunit.py run
File "tools/testing/kunit/kunit.py", line 247
if not linux:
^
TabError: inconsistent use of tabs and spaces in indentation
[iha@bbking linux]$ tools/testing/kunit/kunit.py run
Traceback (most recent call last):
File "tools/testing/kunit/kunit.py", line 338, in <module>
main(sys.argv[1:])
File "tools/testing/kunit/kunit.py", line 215, in main
add_config_opts(config_parser)
[iha@bbking linux]$ tools/testing/kunit/kunit.py run
Traceback (most recent call last):
File "tools/testing/kunit/kunit.py", line 337, in <module>
main(sys.argv[1:])
File "tools/testing/kunit/kunit.py", line 255, in main
result = run_tests(linux, request)
File "tools/testing/kunit/kunit.py", line 133, in run_tests
request.defconfig,
AttributeError: 'KunitRequest' object has no attribute 'defconfig'
Handles when there is no .kunitconfig, the error due to merge conflicts
between the following:
commit 9bdf64b35117 ("kunit: use KUnit defconfig by default")
commit 45ba7a893ad8 ("kunit: kunit_tool: Separate out
config/build/exec/parse")
[iha@bbking linux]$ tools/testing/kunit/kunit.py run
Traceback (most recent call last):
File "tools/testing/kunit/kunit.py", line 335, in <module>
main(sys.argv[1:])
File "tools/testing/kunit/kunit.py", line 246, in main
linux = kunit_kernel.LinuxSourceTree()
File "../tools/testing/kunit/kunit_kernel.py", line 109, in __init__
self._kconfig.read_from_file(kunitconfig_path)
File "t../ools/testing/kunit/kunit_config.py", line 88, in read_from_file
with open(path, 'r') as f:
FileNotFoundError: [Errno 2] No such file or directory: '.kunit/.kunitconfig'
Signed-off-by: Vitor Massaru Iha <vitor@massaru.org>
Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2020-05-30 03:28:45 +08:00
|
|
|
['build_dir', 'make_options'])
|
2020-05-01 12:27:01 +08:00
|
|
|
KunitBuildRequest = namedtuple('KunitBuildRequest',
|
|
|
|
['jobs', 'build_dir', 'alltests',
|
|
|
|
'make_options'])
|
|
|
|
KunitExecRequest = namedtuple('KunitExecRequest',
|
kunit: tool: support running each suite/test separately
The new --run_isolated flag makes the tool boot the kernel once per
suite or test, preventing leftover state from one suite to impact the
other. This can be useful as a starting point to debugging test
hermeticity issues.
Note: it takes a lot longer, so people should not use it normally.
Consider the following very simplified example:
bool disable_something_for_test = false;
void function_being_tested() {
...
if (disable_something_for_test) return;
...
}
static void test_before(struct kunit *test)
{
disable_something_for_test = true;
function_being_tested();
/* oops, we forgot to reset it back to false */
}
static void test_after(struct kunit *test)
{
/* oops, now "fixing" test_before can cause test_after to fail! */
function_being_tested();
}
Presented like this, the issues are obvious, but it gets a lot more
complicated to track down as the amount of test setup and helper
functions increases.
Another use case is memory corruption. It might not be surfaced as a
failure/crash in the test case or suite that caused it. I've noticed in
kunit's own unit tests, the 3rd suite after might be the one to finally
crash after an out-of-bounds write, for example.
Example usage:
Per suite:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite
...
Starting KUnit Kernel (1/7)...
============================================================
======== [PASSED] kunit_executor_test ========
....
Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/7)...
============================================================
======== [PASSED] kunit-try-catch-test ========
...
Per test:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test
Starting KUnit Kernel (1/23)...
============================================================
======== [PASSED] kunit_executor_test ========
[PASSED] parse_filter_test
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/23)...
============================================================
======== [PASSED] kunit_executor_test ========
[PASSED] filter_subsuite_test
...
It works with filters as well:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example
...
Starting KUnit Kernel (1/1)...
============================================================
======== [PASSED] example ========
...
It also handles test filters, '*.*skip*' runs these 3 tests:
kunit_status.kunit_status_mark_skipped_test
example.example_skip_test
example.example_mark_skipped_test
Fixed up merge conflict between:
d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and
6710951ee039 ("kunit: tool: support running each suite/test separately")
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Shuah Khan <skhan@linuxfoundation.org>
Signed-off-by: Daniel Latypov <dlatypov@google.com>
Reviewed-by: David Gow <davidgow@google.com>
Reviewed-by: Brendan Higgins <brendanhiggins@google.com>
Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-10-01 06:20:48 +08:00
|
|
|
['timeout', 'build_dir', 'alltests',
|
|
|
|
'filter_glob', 'kernel_args', 'run_isolated'])
|
2020-05-01 12:27:01 +08:00
|
|
|
KunitParseRequest = namedtuple('KunitParseRequest',
|
2021-10-01 06:20:46 +08:00
|
|
|
['raw_output', 'build_dir', 'json'])
|
2020-03-17 04:21:25 +08:00
|
|
|
KunitRequest = namedtuple('KunitRequest', ['raw_output','timeout', 'jobs',
|
2021-02-06 08:08:53 +08:00
|
|
|
'build_dir', 'alltests', 'filter_glob',
|
kunit: tool: support running each suite/test separately
The new --run_isolated flag makes the tool boot the kernel once per
suite or test, preventing leftover state from one suite to impact the
other. This can be useful as a starting point to debugging test
hermeticity issues.
Note: it takes a lot longer, so people should not use it normally.
Consider the following very simplified example:
bool disable_something_for_test = false;
void function_being_tested() {
...
if (disable_something_for_test) return;
...
}
static void test_before(struct kunit *test)
{
disable_something_for_test = true;
function_being_tested();
/* oops, we forgot to reset it back to false */
}
static void test_after(struct kunit *test)
{
/* oops, now "fixing" test_before can cause test_after to fail! */
function_being_tested();
}
Presented like this, the issues are obvious, but it gets a lot more
complicated to track down as the amount of test setup and helper
functions increases.
Another use case is memory corruption. It might not be surfaced as a
failure/crash in the test case or suite that caused it. I've noticed in
kunit's own unit tests, the 3rd suite after might be the one to finally
crash after an out-of-bounds write, for example.
Example usage:
Per suite:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite
...
Starting KUnit Kernel (1/7)...
============================================================
======== [PASSED] kunit_executor_test ========
....
Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/7)...
============================================================
======== [PASSED] kunit-try-catch-test ========
...
Per test:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test
Starting KUnit Kernel (1/23)...
============================================================
======== [PASSED] kunit_executor_test ========
[PASSED] parse_filter_test
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/23)...
============================================================
======== [PASSED] kunit_executor_test ========
[PASSED] filter_subsuite_test
...
It works with filters as well:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example
...
Starting KUnit Kernel (1/1)...
============================================================
======== [PASSED] example ========
...
It also handles test filters, '*.*skip*' runs these 3 tests:
kunit_status.kunit_status_mark_skipped_test
example.example_skip_test
example.example_mark_skipped_test
Fixed up merge conflict between:
d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and
6710951ee039 ("kunit: tool: support running each suite/test separately")
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Shuah Khan <skhan@linuxfoundation.org>
Signed-off-by: Daniel Latypov <dlatypov@google.com>
Reviewed-by: David Gow <davidgow@google.com>
Reviewed-by: Brendan Higgins <brendanhiggins@google.com>
Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-10-01 06:20:48 +08:00
|
|
|
'kernel_args', 'run_isolated', 'json', 'make_options'])
|
2019-09-23 17:02:43 +08:00
|
|
|
|
2020-02-19 06:19:16 +08:00
|
|
|
KernelDirectoryPath = sys.argv[0].split('tools/testing/kunit/')[0]
|
|
|
|
|
2019-09-23 17:02:43 +08:00
|
|
|
class KunitStatus(Enum):
|
|
|
|
SUCCESS = auto()
|
|
|
|
CONFIG_FAILURE = auto()
|
|
|
|
BUILD_FAILURE = auto()
|
|
|
|
TEST_FAILURE = auto()
|
|
|
|
|
2021-01-15 08:39:11 +08:00
|
|
|
def get_kernel_root_path() -> str:
|
|
|
|
path = sys.argv[0] if not __file__ else __file__
|
|
|
|
parts = os.path.realpath(path).split('tools/testing/kunit')
|
2020-02-19 06:19:16 +08:00
|
|
|
if len(parts) != 2:
|
|
|
|
sys.exit(1)
|
|
|
|
return parts[0]
|
|
|
|
|
2020-05-01 12:27:01 +08:00
|
|
|
def config_tests(linux: kunit_kernel.LinuxSourceTree,
|
|
|
|
request: KunitConfigRequest) -> KunitResult:
|
|
|
|
kunit_parser.print_with_timestamp('Configuring KUnit Kernel ...')
|
|
|
|
|
2019-09-23 17:02:43 +08:00
|
|
|
config_start = time.time()
|
2020-03-24 03:04:59 +08:00
|
|
|
success = linux.build_reconfig(request.build_dir, request.make_options)
|
2019-09-23 17:02:43 +08:00
|
|
|
config_end = time.time()
|
|
|
|
if not success:
|
2020-05-01 12:27:01 +08:00
|
|
|
return KunitResult(KunitStatus.CONFIG_FAILURE,
|
|
|
|
'could not configure kernel',
|
|
|
|
config_end - config_start)
|
|
|
|
return KunitResult(KunitStatus.SUCCESS,
|
|
|
|
'configured kernel successfully',
|
|
|
|
config_end - config_start)
|
2019-09-23 17:02:43 +08:00
|
|
|
|
2020-05-01 12:27:01 +08:00
|
|
|
def build_tests(linux: kunit_kernel.LinuxSourceTree,
|
|
|
|
request: KunitBuildRequest) -> KunitResult:
|
2019-09-23 17:02:43 +08:00
|
|
|
kunit_parser.print_with_timestamp('Building KUnit Kernel ...')
|
|
|
|
|
|
|
|
build_start = time.time()
|
2021-05-27 05:24:06 +08:00
|
|
|
success = linux.build_kernel(request.alltests,
|
|
|
|
request.jobs,
|
|
|
|
request.build_dir,
|
|
|
|
request.make_options)
|
2019-09-23 17:02:43 +08:00
|
|
|
build_end = time.time()
|
|
|
|
if not success:
|
2020-06-16 14:47:30 +08:00
|
|
|
return KunitResult(KunitStatus.BUILD_FAILURE,
|
|
|
|
'could not build kernel',
|
|
|
|
build_end - build_start)
|
2020-05-01 12:27:01 +08:00
|
|
|
if not success:
|
|
|
|
return KunitResult(KunitStatus.BUILD_FAILURE,
|
|
|
|
'could not build kernel',
|
|
|
|
build_end - build_start)
|
|
|
|
return KunitResult(KunitStatus.SUCCESS,
|
|
|
|
'built kernel successfully',
|
|
|
|
build_end - build_start)
|
2019-09-23 17:02:43 +08:00
|
|
|
|
kunit: tool: support running each suite/test separately
The new --run_isolated flag makes the tool boot the kernel once per
suite or test, preventing leftover state from one suite to impact the
other. This can be useful as a starting point to debugging test
hermeticity issues.
Note: it takes a lot longer, so people should not use it normally.
Consider the following very simplified example:
bool disable_something_for_test = false;
void function_being_tested() {
...
if (disable_something_for_test) return;
...
}
static void test_before(struct kunit *test)
{
disable_something_for_test = true;
function_being_tested();
/* oops, we forgot to reset it back to false */
}
static void test_after(struct kunit *test)
{
/* oops, now "fixing" test_before can cause test_after to fail! */
function_being_tested();
}
Presented like this, the issues are obvious, but it gets a lot more
complicated to track down as the amount of test setup and helper
functions increases.
Another use case is memory corruption. It might not be surfaced as a
failure/crash in the test case or suite that caused it. I've noticed in
kunit's own unit tests, the 3rd suite after might be the one to finally
crash after an out-of-bounds write, for example.
Example usage:
Per suite:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite
...
Starting KUnit Kernel (1/7)...
============================================================
======== [PASSED] kunit_executor_test ========
....
Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/7)...
============================================================
======== [PASSED] kunit-try-catch-test ========
...
Per test:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test
Starting KUnit Kernel (1/23)...
============================================================
======== [PASSED] kunit_executor_test ========
[PASSED] parse_filter_test
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/23)...
============================================================
======== [PASSED] kunit_executor_test ========
[PASSED] filter_subsuite_test
...
It works with filters as well:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example
...
Starting KUnit Kernel (1/1)...
============================================================
======== [PASSED] example ========
...
It also handles test filters, '*.*skip*' runs these 3 tests:
kunit_status.kunit_status_mark_skipped_test
example.example_skip_test
example.example_mark_skipped_test
Fixed up merge conflict between:
d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and
6710951ee039 ("kunit: tool: support running each suite/test separately")
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Shuah Khan <skhan@linuxfoundation.org>
Signed-off-by: Daniel Latypov <dlatypov@google.com>
Reviewed-by: David Gow <davidgow@google.com>
Reviewed-by: Brendan Higgins <brendanhiggins@google.com>
Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-10-01 06:20:48 +08:00
|
|
|
def _list_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> List[str]:
|
|
|
|
args = ['kunit.action=list']
|
|
|
|
if request.kernel_args:
|
|
|
|
args.extend(request.kernel_args)
|
|
|
|
|
|
|
|
output = linux.run_kernel(args=args,
|
|
|
|
timeout=None if request.alltests else request.timeout,
|
|
|
|
filter_glob=request.filter_glob,
|
|
|
|
build_dir=request.build_dir)
|
|
|
|
lines = kunit_parser.extract_tap_lines(output)
|
|
|
|
# Hack! Drop the dummy TAP version header that the executor prints out.
|
|
|
|
lines.pop()
|
|
|
|
|
|
|
|
# Filter out any extraneous non-test output that might have gotten mixed in.
|
|
|
|
return [l for l in lines if re.match('^[^\s.]+\.[^\s.]+$', l)]
|
|
|
|
|
|
|
|
def _suites_from_test_list(tests: List[str]) -> List[str]:
|
|
|
|
"""Extracts all the suites from an ordered list of tests."""
|
|
|
|
suites = [] # type: List[str]
|
|
|
|
for t in tests:
|
|
|
|
parts = t.split('.', maxsplit=2)
|
|
|
|
if len(parts) != 2:
|
|
|
|
raise ValueError(f'internal KUnit error, test name should be of the form "<suite>.<test>", got "{t}"')
|
|
|
|
suite, case = parts
|
|
|
|
if not suites or suites[-1] != suite:
|
|
|
|
suites.append(suite)
|
|
|
|
return suites
|
|
|
|
|
|
|
|
|
|
|
|
|
2021-10-01 06:20:46 +08:00
|
|
|
def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest,
|
|
|
|
parse_request: KunitParseRequest) -> KunitResult:
|
kunit: tool: support running each suite/test separately
The new --run_isolated flag makes the tool boot the kernel once per
suite or test, preventing leftover state from one suite to impact the
other. This can be useful as a starting point to debugging test
hermeticity issues.
Note: it takes a lot longer, so people should not use it normally.
Consider the following very simplified example:
bool disable_something_for_test = false;
void function_being_tested() {
...
if (disable_something_for_test) return;
...
}
static void test_before(struct kunit *test)
{
disable_something_for_test = true;
function_being_tested();
/* oops, we forgot to reset it back to false */
}
static void test_after(struct kunit *test)
{
/* oops, now "fixing" test_before can cause test_after to fail! */
function_being_tested();
}
Presented like this, the issues are obvious, but it gets a lot more
complicated to track down as the amount of test setup and helper
functions increases.
Another use case is memory corruption. It might not be surfaced as a
failure/crash in the test case or suite that caused it. I've noticed in
kunit's own unit tests, the 3rd suite after might be the one to finally
crash after an out-of-bounds write, for example.
Example usage:
Per suite:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite
...
Starting KUnit Kernel (1/7)...
============================================================
======== [PASSED] kunit_executor_test ========
....
Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/7)...
============================================================
======== [PASSED] kunit-try-catch-test ========
...
Per test:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test
Starting KUnit Kernel (1/23)...
============================================================
======== [PASSED] kunit_executor_test ========
[PASSED] parse_filter_test
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/23)...
============================================================
======== [PASSED] kunit_executor_test ========
[PASSED] filter_subsuite_test
...
It works with filters as well:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example
...
Starting KUnit Kernel (1/1)...
============================================================
======== [PASSED] example ========
...
It also handles test filters, '*.*skip*' runs these 3 tests:
kunit_status.kunit_status_mark_skipped_test
example.example_skip_test
example.example_mark_skipped_test
Fixed up merge conflict between:
d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and
6710951ee039 ("kunit: tool: support running each suite/test separately")
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Shuah Khan <skhan@linuxfoundation.org>
Signed-off-by: Daniel Latypov <dlatypov@google.com>
Reviewed-by: David Gow <davidgow@google.com>
Reviewed-by: Brendan Higgins <brendanhiggins@google.com>
Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-10-01 06:20:48 +08:00
|
|
|
filter_globs = [request.filter_glob]
|
|
|
|
if request.run_isolated:
|
|
|
|
tests = _list_tests(linux, request)
|
|
|
|
if request.run_isolated == 'test':
|
|
|
|
filter_globs = tests
|
|
|
|
if request.run_isolated == 'suite':
|
|
|
|
filter_globs = _suites_from_test_list(tests)
|
|
|
|
# Apply the test-part of the user's glob, if present.
|
|
|
|
if '.' in request.filter_glob:
|
|
|
|
test_glob = request.filter_glob.split('.', maxsplit=2)[1]
|
|
|
|
filter_globs = [g + '.'+ test_glob for g in filter_globs]
|
|
|
|
|
2021-10-12 05:50:37 +08:00
|
|
|
test_counts = kunit_parser.TestCounts()
|
kunit: tool: support running each suite/test separately
The new --run_isolated flag makes the tool boot the kernel once per
suite or test, preventing leftover state from one suite to impact the
other. This can be useful as a starting point to debugging test
hermeticity issues.
Note: it takes a lot longer, so people should not use it normally.
Consider the following very simplified example:
bool disable_something_for_test = false;
void function_being_tested() {
...
if (disable_something_for_test) return;
...
}
static void test_before(struct kunit *test)
{
disable_something_for_test = true;
function_being_tested();
/* oops, we forgot to reset it back to false */
}
static void test_after(struct kunit *test)
{
/* oops, now "fixing" test_before can cause test_after to fail! */
function_being_tested();
}
Presented like this, the issues are obvious, but it gets a lot more
complicated to track down as the amount of test setup and helper
functions increases.
Another use case is memory corruption. It might not be surfaced as a
failure/crash in the test case or suite that caused it. I've noticed in
kunit's own unit tests, the 3rd suite after might be the one to finally
crash after an out-of-bounds write, for example.
Example usage:
Per suite:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite
...
Starting KUnit Kernel (1/7)...
============================================================
======== [PASSED] kunit_executor_test ========
....
Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/7)...
============================================================
======== [PASSED] kunit-try-catch-test ========
...
Per test:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test
Starting KUnit Kernel (1/23)...
============================================================
======== [PASSED] kunit_executor_test ========
[PASSED] parse_filter_test
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/23)...
============================================================
======== [PASSED] kunit_executor_test ========
[PASSED] filter_subsuite_test
...
It works with filters as well:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example
...
Starting KUnit Kernel (1/1)...
============================================================
======== [PASSED] example ========
...
It also handles test filters, '*.*skip*' runs these 3 tests:
kunit_status.kunit_status_mark_skipped_test
example.example_skip_test
example.example_mark_skipped_test
Fixed up merge conflict between:
d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and
6710951ee039 ("kunit: tool: support running each suite/test separately")
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Shuah Khan <skhan@linuxfoundation.org>
Signed-off-by: Daniel Latypov <dlatypov@google.com>
Reviewed-by: David Gow <davidgow@google.com>
Reviewed-by: Brendan Higgins <brendanhiggins@google.com>
Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-10-01 06:20:48 +08:00
|
|
|
exec_time = 0.0
|
|
|
|
for i, filter_glob in enumerate(filter_globs):
|
|
|
|
kunit_parser.print_with_timestamp('Starting KUnit Kernel ({}/{})...'.format(i+1, len(filter_globs)))
|
|
|
|
|
|
|
|
test_start = time.time()
|
|
|
|
run_result = linux.run_kernel(
|
|
|
|
args=request.kernel_args,
|
|
|
|
timeout=None if request.alltests else request.timeout,
|
|
|
|
filter_glob=filter_glob,
|
|
|
|
build_dir=request.build_dir)
|
|
|
|
|
|
|
|
result = parse_tests(parse_request, run_result)
|
|
|
|
# run_kernel() doesn't block on the kernel exiting.
|
|
|
|
# That only happens after we get the last line of output from `run_result`.
|
|
|
|
# So exec_time here actually contains parsing + execution time, which is fine.
|
|
|
|
test_end = time.time()
|
|
|
|
exec_time += test_end - test_start
|
|
|
|
|
2021-10-12 05:50:37 +08:00
|
|
|
test_counts.add_subtest_counts(result.result.test.counts)
|
2020-05-01 12:27:01 +08:00
|
|
|
|
2021-10-12 05:50:37 +08:00
|
|
|
kunit_status = _map_to_overall_status(test_counts.get_status())
|
|
|
|
return KunitResult(status=kunit_status, result=result.result, elapsed_time=exec_time)
|
|
|
|
|
|
|
|
def _map_to_overall_status(test_status: kunit_parser.TestStatus) -> KunitStatus:
|
|
|
|
if test_status in (kunit_parser.TestStatus.SUCCESS, kunit_parser.TestStatus.SKIPPED):
|
|
|
|
return KunitStatus.SUCCESS
|
|
|
|
else:
|
|
|
|
return KunitStatus.TEST_FAILURE
|
2020-05-01 12:27:01 +08:00
|
|
|
|
2021-10-01 06:20:46 +08:00
|
|
|
def parse_tests(request: KunitParseRequest, input_data: Iterable[str]) -> KunitResult:
|
2020-05-01 12:27:01 +08:00
|
|
|
parse_start = time.time()
|
|
|
|
|
|
|
|
test_result = kunit_parser.TestResult(kunit_parser.TestStatus.SUCCESS,
|
2021-10-12 05:50:37 +08:00
|
|
|
kunit_parser.Test(),
|
2020-05-01 12:27:01 +08:00
|
|
|
'Tests not Parsed.')
|
2020-08-12 05:27:56 +08:00
|
|
|
|
2019-09-23 17:02:43 +08:00
|
|
|
if request.raw_output:
|
2021-10-12 05:50:37 +08:00
|
|
|
# Treat unparsed results as one passing test.
|
|
|
|
test_result.test.status = kunit_parser.TestStatus.SUCCESS
|
|
|
|
test_result.test.counts.passed = 1
|
|
|
|
|
2021-10-01 06:20:46 +08:00
|
|
|
output: Iterable[str] = input_data
|
2021-08-06 07:51:44 +08:00
|
|
|
if request.raw_output == 'all':
|
|
|
|
pass
|
|
|
|
elif request.raw_output == 'kunit':
|
|
|
|
output = kunit_parser.extract_tap_lines(output)
|
|
|
|
else:
|
|
|
|
print(f'Unknown --raw_output option "{request.raw_output}"', file=sys.stderr)
|
|
|
|
for line in output:
|
|
|
|
print(line.rstrip())
|
|
|
|
|
2019-09-23 17:02:43 +08:00
|
|
|
else:
|
2021-10-01 06:20:46 +08:00
|
|
|
test_result = kunit_parser.parse_run_tests(input_data)
|
2020-05-01 12:27:01 +08:00
|
|
|
parse_end = time.time()
|
|
|
|
|
2020-08-12 05:27:56 +08:00
|
|
|
if request.json:
|
|
|
|
json_obj = kunit_json.get_json_result(
|
|
|
|
test_result=test_result,
|
|
|
|
def_config='kunit_defconfig',
|
|
|
|
build_dir=request.build_dir,
|
|
|
|
json_path=request.json)
|
|
|
|
if request.json == 'stdout':
|
|
|
|
print(json_obj)
|
|
|
|
|
2020-05-01 12:27:01 +08:00
|
|
|
if test_result.status != kunit_parser.TestStatus.SUCCESS:
|
|
|
|
return KunitResult(KunitStatus.TEST_FAILURE, test_result,
|
|
|
|
parse_end - parse_start)
|
|
|
|
|
|
|
|
return KunitResult(KunitStatus.SUCCESS, test_result,
|
|
|
|
parse_end - parse_start)
|
|
|
|
|
|
|
|
def run_tests(linux: kunit_kernel.LinuxSourceTree,
|
|
|
|
request: KunitRequest) -> KunitResult:
|
|
|
|
run_start = time.time()
|
|
|
|
|
|
|
|
config_request = KunitConfigRequest(request.build_dir,
|
|
|
|
request.make_options)
|
|
|
|
config_result = config_tests(linux, config_request)
|
|
|
|
if config_result.status != KunitStatus.SUCCESS:
|
|
|
|
return config_result
|
|
|
|
|
|
|
|
build_request = KunitBuildRequest(request.jobs, request.build_dir,
|
|
|
|
request.alltests,
|
|
|
|
request.make_options)
|
|
|
|
build_result = build_tests(linux, build_request)
|
|
|
|
if build_result.status != KunitStatus.SUCCESS:
|
|
|
|
return build_result
|
|
|
|
|
|
|
|
exec_request = KunitExecRequest(request.timeout, request.build_dir,
|
2021-07-16 00:08:19 +08:00
|
|
|
request.alltests, request.filter_glob,
|
kunit: tool: support running each suite/test separately
The new --run_isolated flag makes the tool boot the kernel once per
suite or test, preventing leftover state from one suite to impact the
other. This can be useful as a starting point to debugging test
hermeticity issues.
Note: it takes a lot longer, so people should not use it normally.
Consider the following very simplified example:
bool disable_something_for_test = false;
void function_being_tested() {
...
if (disable_something_for_test) return;
...
}
static void test_before(struct kunit *test)
{
disable_something_for_test = true;
function_being_tested();
/* oops, we forgot to reset it back to false */
}
static void test_after(struct kunit *test)
{
/* oops, now "fixing" test_before can cause test_after to fail! */
function_being_tested();
}
Presented like this, the issues are obvious, but it gets a lot more
complicated to track down as the amount of test setup and helper
functions increases.
Another use case is memory corruption. It might not be surfaced as a
failure/crash in the test case or suite that caused it. I've noticed in
kunit's own unit tests, the 3rd suite after might be the one to finally
crash after an out-of-bounds write, for example.
Example usage:
Per suite:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite
...
Starting KUnit Kernel (1/7)...
============================================================
======== [PASSED] kunit_executor_test ========
....
Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/7)...
============================================================
======== [PASSED] kunit-try-catch-test ========
...
Per test:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test
Starting KUnit Kernel (1/23)...
============================================================
======== [PASSED] kunit_executor_test ========
[PASSED] parse_filter_test
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/23)...
============================================================
======== [PASSED] kunit_executor_test ========
[PASSED] filter_subsuite_test
...
It works with filters as well:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example
...
Starting KUnit Kernel (1/1)...
============================================================
======== [PASSED] example ========
...
It also handles test filters, '*.*skip*' runs these 3 tests:
kunit_status.kunit_status_mark_skipped_test
example.example_skip_test
example.example_mark_skipped_test
Fixed up merge conflict between:
d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and
6710951ee039 ("kunit: tool: support running each suite/test separately")
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Shuah Khan <skhan@linuxfoundation.org>
Signed-off-by: Daniel Latypov <dlatypov@google.com>
Reviewed-by: David Gow <davidgow@google.com>
Reviewed-by: Brendan Higgins <brendanhiggins@google.com>
Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-10-01 06:20:48 +08:00
|
|
|
request.kernel_args, request.run_isolated)
|
2020-05-01 12:27:01 +08:00
|
|
|
parse_request = KunitParseRequest(request.raw_output,
|
2020-08-12 05:27:56 +08:00
|
|
|
request.build_dir,
|
|
|
|
request.json)
|
2021-10-01 06:20:46 +08:00
|
|
|
|
|
|
|
exec_result = exec_tests(linux, exec_request, parse_request)
|
2020-05-01 12:27:01 +08:00
|
|
|
|
|
|
|
run_end = time.time()
|
2019-09-23 17:02:43 +08:00
|
|
|
|
|
|
|
kunit_parser.print_with_timestamp((
|
|
|
|
'Elapsed time: %.3fs total, %.3fs configuring, %.3fs ' +
|
|
|
|
'building, %.3fs running\n') % (
|
2020-05-01 12:27:01 +08:00
|
|
|
run_end - run_start,
|
|
|
|
config_result.elapsed_time,
|
|
|
|
build_result.elapsed_time,
|
|
|
|
exec_result.elapsed_time))
|
2021-10-01 06:20:46 +08:00
|
|
|
return exec_result
|
2020-05-01 12:27:01 +08:00
|
|
|
|
2021-09-23 00:39:21 +08:00
|
|
|
# Problem:
|
|
|
|
# $ kunit.py run --json
|
|
|
|
# works as one would expect and prints the parsed test results as JSON.
|
|
|
|
# $ kunit.py run --json suite_name
|
|
|
|
# would *not* pass suite_name as the filter_glob and print as json.
|
|
|
|
# argparse will consider it to be another way of writing
|
|
|
|
# $ kunit.py run --json=suite_name
|
|
|
|
# i.e. it would run all tests, and dump the json to a `suite_name` file.
|
|
|
|
# So we hackily automatically rewrite --json => --json=stdout
|
|
|
|
pseudo_bool_flag_defaults = {
|
|
|
|
'--json': 'stdout',
|
|
|
|
'--raw_output': 'kunit',
|
|
|
|
}
|
|
|
|
def massage_argv(argv: Sequence[str]) -> Sequence[str]:
|
|
|
|
def massage_arg(arg: str) -> str:
|
|
|
|
if arg not in pseudo_bool_flag_defaults:
|
|
|
|
return arg
|
|
|
|
return f'{arg}={pseudo_bool_flag_defaults[arg]}'
|
|
|
|
return list(map(massage_arg, argv))
|
|
|
|
|
2021-01-15 08:39:11 +08:00
|
|
|
def add_common_opts(parser) -> None:
|
2020-05-01 12:27:01 +08:00
|
|
|
parser.add_argument('--build_dir',
|
|
|
|
help='As in the make command, it specifies the build '
|
|
|
|
'directory.',
|
2021-08-06 07:51:44 +08:00
|
|
|
type=str, default='.kunit', metavar='build_dir')
|
2020-05-01 12:27:01 +08:00
|
|
|
parser.add_argument('--make_options',
|
|
|
|
help='X=Y make option, can be repeated.',
|
|
|
|
action='append')
|
|
|
|
parser.add_argument('--alltests',
|
|
|
|
help='Run all KUnit tests through allyesconfig',
|
|
|
|
action='store_true')
|
2021-02-02 04:55:14 +08:00
|
|
|
parser.add_argument('--kunitconfig',
|
2021-02-23 06:52:41 +08:00
|
|
|
help='Path to Kconfig fragment that enables KUnit tests.'
|
|
|
|
' If given a directory, (e.g. lib/kunit), "/.kunitconfig" '
|
|
|
|
'will get automatically appended.',
|
2021-02-02 04:55:14 +08:00
|
|
|
metavar='kunitconfig')
|
2020-05-01 12:27:01 +08:00
|
|
|
|
2021-05-27 05:24:06 +08:00
|
|
|
parser.add_argument('--arch',
|
|
|
|
help=('Specifies the architecture to run tests under. '
|
|
|
|
'The architecture specified here must match the '
|
|
|
|
'string passed to the ARCH make param, '
|
|
|
|
'e.g. i386, x86_64, arm, um, etc. Non-UML '
|
|
|
|
'architectures run on QEMU.'),
|
|
|
|
type=str, default='um', metavar='arch')
|
|
|
|
|
|
|
|
parser.add_argument('--cross_compile',
|
|
|
|
help=('Sets make\'s CROSS_COMPILE variable; it should '
|
|
|
|
'be set to a toolchain path prefix (the prefix '
|
|
|
|
'of gcc and other tools in your toolchain, for '
|
|
|
|
'example `sparc64-linux-gnu-` if you have the '
|
|
|
|
'sparc toolchain installed on your system, or '
|
|
|
|
'`$HOME/toolchains/microblaze/gcc-9.2.0-nolibc/microblaze-linux/bin/microblaze-linux-` '
|
|
|
|
'if you have downloaded the microblaze toolchain '
|
|
|
|
'from the 0-day website to a directory in your '
|
|
|
|
'home directory called `toolchains`).'),
|
|
|
|
metavar='cross_compile')
|
|
|
|
|
|
|
|
parser.add_argument('--qemu_config',
|
|
|
|
help=('Takes a path to a path to a file containing '
|
|
|
|
'a QemuArchParams object.'),
|
|
|
|
type=str, metavar='qemu_config')
|
|
|
|
|
2021-01-15 08:39:11 +08:00
|
|
|
def add_build_opts(parser) -> None:
|
2020-05-01 12:27:01 +08:00
|
|
|
parser.add_argument('--jobs',
|
|
|
|
help='As in the make command, "Specifies the number of '
|
|
|
|
'jobs (commands) to run simultaneously."',
|
|
|
|
type=int, default=8, metavar='jobs')
|
|
|
|
|
2021-01-15 08:39:11 +08:00
|
|
|
def add_exec_opts(parser) -> None:
|
2020-05-01 12:27:01 +08:00
|
|
|
parser.add_argument('--timeout',
|
|
|
|
help='maximum number of seconds to allow for all tests '
|
|
|
|
'to run. This does not include time taken to build the '
|
|
|
|
'tests.',
|
|
|
|
type=int,
|
|
|
|
default=300,
|
|
|
|
metavar='timeout')
|
2021-02-06 08:08:53 +08:00
|
|
|
parser.add_argument('filter_glob',
|
2021-09-15 05:03:48 +08:00
|
|
|
help='Filter which KUnit test suites/tests run at '
|
|
|
|
'boot-time, e.g. list* or list*.*del_test',
|
2021-02-06 08:08:53 +08:00
|
|
|
type=str,
|
|
|
|
nargs='?',
|
|
|
|
default='',
|
|
|
|
metavar='filter_glob')
|
2021-07-16 00:08:19 +08:00
|
|
|
parser.add_argument('--kernel_args',
|
|
|
|
help='Kernel command-line parameters. Maybe be repeated',
|
|
|
|
action='append')
|
kunit: tool: support running each suite/test separately
The new --run_isolated flag makes the tool boot the kernel once per
suite or test, preventing leftover state from one suite to impact the
other. This can be useful as a starting point to debugging test
hermeticity issues.
Note: it takes a lot longer, so people should not use it normally.
Consider the following very simplified example:
bool disable_something_for_test = false;
void function_being_tested() {
...
if (disable_something_for_test) return;
...
}
static void test_before(struct kunit *test)
{
disable_something_for_test = true;
function_being_tested();
/* oops, we forgot to reset it back to false */
}
static void test_after(struct kunit *test)
{
/* oops, now "fixing" test_before can cause test_after to fail! */
function_being_tested();
}
Presented like this, the issues are obvious, but it gets a lot more
complicated to track down as the amount of test setup and helper
functions increases.
Another use case is memory corruption. It might not be surfaced as a
failure/crash in the test case or suite that caused it. I've noticed in
kunit's own unit tests, the 3rd suite after might be the one to finally
crash after an out-of-bounds write, for example.
Example usage:
Per suite:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite
...
Starting KUnit Kernel (1/7)...
============================================================
======== [PASSED] kunit_executor_test ========
....
Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/7)...
============================================================
======== [PASSED] kunit-try-catch-test ========
...
Per test:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test
Starting KUnit Kernel (1/23)...
============================================================
======== [PASSED] kunit_executor_test ========
[PASSED] parse_filter_test
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/23)...
============================================================
======== [PASSED] kunit_executor_test ========
[PASSED] filter_subsuite_test
...
It works with filters as well:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example
...
Starting KUnit Kernel (1/1)...
============================================================
======== [PASSED] example ========
...
It also handles test filters, '*.*skip*' runs these 3 tests:
kunit_status.kunit_status_mark_skipped_test
example.example_skip_test
example.example_mark_skipped_test
Fixed up merge conflict between:
d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and
6710951ee039 ("kunit: tool: support running each suite/test separately")
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Shuah Khan <skhan@linuxfoundation.org>
Signed-off-by: Daniel Latypov <dlatypov@google.com>
Reviewed-by: David Gow <davidgow@google.com>
Reviewed-by: Brendan Higgins <brendanhiggins@google.com>
Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-10-01 06:20:48 +08:00
|
|
|
parser.add_argument('--run_isolated', help='If set, boot the kernel for each '
|
|
|
|
'individual suite/test. This is can be useful for debugging '
|
|
|
|
'a non-hermetic test, one that might pass/fail based on '
|
|
|
|
'what ran before it.',
|
|
|
|
type=str,
|
|
|
|
choices=['suite', 'test']),
|
2020-05-01 12:27:01 +08:00
|
|
|
|
2021-01-15 08:39:11 +08:00
|
|
|
def add_parse_opts(parser) -> None:
|
2021-08-06 07:51:44 +08:00
|
|
|
parser.add_argument('--raw_output', help='If set don\'t format output from kernel. '
|
|
|
|
'If set to --raw_output=kunit, filters to just KUnit output.',
|
|
|
|
type=str, nargs='?', const='all', default=None)
|
2020-08-12 05:27:56 +08:00
|
|
|
parser.add_argument('--json',
|
|
|
|
nargs='?',
|
|
|
|
help='Stores test results in a JSON, and either '
|
|
|
|
'prints to stdout or saves to file if a '
|
|
|
|
'filename is specified',
|
|
|
|
type=str, const='stdout', default=None)
|
2019-09-23 17:02:43 +08:00
|
|
|
|
2019-09-23 17:02:44 +08:00
|
|
|
def main(argv, linux=None):
|
2019-09-23 17:02:43 +08:00
|
|
|
parser = argparse.ArgumentParser(
|
|
|
|
description='Helps writing and running KUnit tests.')
|
|
|
|
subparser = parser.add_subparsers(dest='subcommand')
|
|
|
|
|
2020-05-01 12:27:01 +08:00
|
|
|
# The 'run' command will config, build, exec, and parse in one go.
|
2019-09-23 17:02:43 +08:00
|
|
|
run_parser = subparser.add_parser('run', help='Runs KUnit tests.')
|
2020-05-01 12:27:01 +08:00
|
|
|
add_common_opts(run_parser)
|
|
|
|
add_build_opts(run_parser)
|
|
|
|
add_exec_opts(run_parser)
|
|
|
|
add_parse_opts(run_parser)
|
|
|
|
|
|
|
|
config_parser = subparser.add_parser('config',
|
|
|
|
help='Ensures that .config contains all of '
|
|
|
|
'the options in .kunitconfig')
|
|
|
|
add_common_opts(config_parser)
|
|
|
|
|
|
|
|
build_parser = subparser.add_parser('build', help='Builds a kernel with KUnit tests')
|
|
|
|
add_common_opts(build_parser)
|
|
|
|
add_build_opts(build_parser)
|
|
|
|
|
|
|
|
exec_parser = subparser.add_parser('exec', help='Run a kernel with KUnit tests')
|
|
|
|
add_common_opts(exec_parser)
|
|
|
|
add_exec_opts(exec_parser)
|
|
|
|
add_parse_opts(exec_parser)
|
|
|
|
|
|
|
|
# The 'parse' option is special, as it doesn't need the kernel source
|
|
|
|
# (therefore there is no need for a build_dir, hence no add_common_opts)
|
|
|
|
# and the '--file' argument is not relevant to 'run', so isn't in
|
|
|
|
# add_parse_opts()
|
|
|
|
parse_parser = subparser.add_parser('parse',
|
|
|
|
help='Parses KUnit results from a file, '
|
|
|
|
'and parses formatted results.')
|
|
|
|
add_parse_opts(parse_parser)
|
|
|
|
parse_parser.add_argument('file',
|
|
|
|
help='Specifies the file to read results from.',
|
|
|
|
type=str, nargs='?', metavar='input_file')
|
2020-03-24 03:04:59 +08:00
|
|
|
|
2021-09-23 00:39:21 +08:00
|
|
|
cli_args = parser.parse_args(massage_argv(argv))
|
2019-09-23 17:02:43 +08:00
|
|
|
|
2020-08-12 05:27:55 +08:00
|
|
|
if get_kernel_root_path():
|
|
|
|
os.chdir(get_kernel_root_path())
|
|
|
|
|
2019-09-23 17:02:43 +08:00
|
|
|
if cli_args.subcommand == 'run':
|
kunit: Fix TabError, remove defconfig code and handle when there is no kunitconfig
The identation before this code
(`if not os.path.exists(cli_args.build_dir):``)
was with spaces instead of tabs after fixed up merge conflits,
this commit revert spaces to tabs:
[iha@bbking linux]$ tools/testing/kunit/kunit.py run
File "tools/testing/kunit/kunit.py", line 247
if not linux:
^
TabError: inconsistent use of tabs and spaces in indentation
[iha@bbking linux]$ tools/testing/kunit/kunit.py run
Traceback (most recent call last):
File "tools/testing/kunit/kunit.py", line 338, in <module>
main(sys.argv[1:])
File "tools/testing/kunit/kunit.py", line 215, in main
add_config_opts(config_parser)
[iha@bbking linux]$ tools/testing/kunit/kunit.py run
Traceback (most recent call last):
File "tools/testing/kunit/kunit.py", line 337, in <module>
main(sys.argv[1:])
File "tools/testing/kunit/kunit.py", line 255, in main
result = run_tests(linux, request)
File "tools/testing/kunit/kunit.py", line 133, in run_tests
request.defconfig,
AttributeError: 'KunitRequest' object has no attribute 'defconfig'
Handles when there is no .kunitconfig, the error due to merge conflicts
between the following:
commit 9bdf64b35117 ("kunit: use KUnit defconfig by default")
commit 45ba7a893ad8 ("kunit: kunit_tool: Separate out
config/build/exec/parse")
[iha@bbking linux]$ tools/testing/kunit/kunit.py run
Traceback (most recent call last):
File "tools/testing/kunit/kunit.py", line 335, in <module>
main(sys.argv[1:])
File "tools/testing/kunit/kunit.py", line 246, in main
linux = kunit_kernel.LinuxSourceTree()
File "../tools/testing/kunit/kunit_kernel.py", line 109, in __init__
self._kconfig.read_from_file(kunitconfig_path)
File "t../ools/testing/kunit/kunit_config.py", line 88, in read_from_file
with open(path, 'r') as f:
FileNotFoundError: [Errno 2] No such file or directory: '.kunit/.kunitconfig'
Signed-off-by: Vitor Massaru Iha <vitor@massaru.org>
Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2020-05-30 03:28:45 +08:00
|
|
|
if not os.path.exists(cli_args.build_dir):
|
|
|
|
os.mkdir(cli_args.build_dir)
|
2020-09-29 04:02:27 +08:00
|
|
|
|
2019-09-23 17:02:44 +08:00
|
|
|
if not linux:
|
2021-05-27 05:24:06 +08:00
|
|
|
linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir,
|
|
|
|
kunitconfig_path=cli_args.kunitconfig,
|
|
|
|
arch=cli_args.arch,
|
|
|
|
cross_compile=cli_args.cross_compile,
|
|
|
|
qemu_config_path=cli_args.qemu_config)
|
2020-10-27 00:59:25 +08:00
|
|
|
|
2019-09-23 17:02:43 +08:00
|
|
|
request = KunitRequest(cli_args.raw_output,
|
|
|
|
cli_args.timeout,
|
|
|
|
cli_args.jobs,
|
2019-09-23 17:02:44 +08:00
|
|
|
cli_args.build_dir,
|
2020-03-24 03:04:59 +08:00
|
|
|
cli_args.alltests,
|
2021-02-06 08:08:53 +08:00
|
|
|
cli_args.filter_glob,
|
2021-07-16 00:08:19 +08:00
|
|
|
cli_args.kernel_args,
|
kunit: tool: support running each suite/test separately
The new --run_isolated flag makes the tool boot the kernel once per
suite or test, preventing leftover state from one suite to impact the
other. This can be useful as a starting point to debugging test
hermeticity issues.
Note: it takes a lot longer, so people should not use it normally.
Consider the following very simplified example:
bool disable_something_for_test = false;
void function_being_tested() {
...
if (disable_something_for_test) return;
...
}
static void test_before(struct kunit *test)
{
disable_something_for_test = true;
function_being_tested();
/* oops, we forgot to reset it back to false */
}
static void test_after(struct kunit *test)
{
/* oops, now "fixing" test_before can cause test_after to fail! */
function_being_tested();
}
Presented like this, the issues are obvious, but it gets a lot more
complicated to track down as the amount of test setup and helper
functions increases.
Another use case is memory corruption. It might not be surfaced as a
failure/crash in the test case or suite that caused it. I've noticed in
kunit's own unit tests, the 3rd suite after might be the one to finally
crash after an out-of-bounds write, for example.
Example usage:
Per suite:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite
...
Starting KUnit Kernel (1/7)...
============================================================
======== [PASSED] kunit_executor_test ========
....
Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/7)...
============================================================
======== [PASSED] kunit-try-catch-test ========
...
Per test:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test
Starting KUnit Kernel (1/23)...
============================================================
======== [PASSED] kunit_executor_test ========
[PASSED] parse_filter_test
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/23)...
============================================================
======== [PASSED] kunit_executor_test ========
[PASSED] filter_subsuite_test
...
It works with filters as well:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example
...
Starting KUnit Kernel (1/1)...
============================================================
======== [PASSED] example ========
...
It also handles test filters, '*.*skip*' runs these 3 tests:
kunit_status.kunit_status_mark_skipped_test
example.example_skip_test
example.example_mark_skipped_test
Fixed up merge conflict between:
d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and
6710951ee039 ("kunit: tool: support running each suite/test separately")
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Shuah Khan <skhan@linuxfoundation.org>
Signed-off-by: Daniel Latypov <dlatypov@google.com>
Reviewed-by: David Gow <davidgow@google.com>
Reviewed-by: Brendan Higgins <brendanhiggins@google.com>
Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-10-01 06:20:48 +08:00
|
|
|
cli_args.run_isolated,
|
2020-08-12 05:27:56 +08:00
|
|
|
cli_args.json,
|
2020-03-24 03:04:59 +08:00
|
|
|
cli_args.make_options)
|
2019-09-23 17:02:43 +08:00
|
|
|
result = run_tests(linux, request)
|
|
|
|
if result.status != KunitStatus.SUCCESS:
|
|
|
|
sys.exit(1)
|
2020-05-01 12:27:01 +08:00
|
|
|
elif cli_args.subcommand == 'config':
|
2020-09-29 04:02:27 +08:00
|
|
|
if cli_args.build_dir and (
|
|
|
|
not os.path.exists(cli_args.build_dir)):
|
|
|
|
os.mkdir(cli_args.build_dir)
|
|
|
|
|
2020-05-01 12:27:01 +08:00
|
|
|
if not linux:
|
2021-05-27 05:24:06 +08:00
|
|
|
linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir,
|
|
|
|
kunitconfig_path=cli_args.kunitconfig,
|
|
|
|
arch=cli_args.arch,
|
|
|
|
cross_compile=cli_args.cross_compile,
|
|
|
|
qemu_config_path=cli_args.qemu_config)
|
2020-10-27 00:59:25 +08:00
|
|
|
|
2020-05-01 12:27:01 +08:00
|
|
|
request = KunitConfigRequest(cli_args.build_dir,
|
|
|
|
cli_args.make_options)
|
|
|
|
result = config_tests(linux, request)
|
|
|
|
kunit_parser.print_with_timestamp((
|
|
|
|
'Elapsed time: %.3fs\n') % (
|
|
|
|
result.elapsed_time))
|
|
|
|
if result.status != KunitStatus.SUCCESS:
|
|
|
|
sys.exit(1)
|
|
|
|
elif cli_args.subcommand == 'build':
|
|
|
|
if not linux:
|
2021-05-27 05:24:06 +08:00
|
|
|
linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir,
|
|
|
|
kunitconfig_path=cli_args.kunitconfig,
|
|
|
|
arch=cli_args.arch,
|
|
|
|
cross_compile=cli_args.cross_compile,
|
|
|
|
qemu_config_path=cli_args.qemu_config)
|
2020-10-27 00:59:25 +08:00
|
|
|
|
2020-05-01 12:27:01 +08:00
|
|
|
request = KunitBuildRequest(cli_args.jobs,
|
|
|
|
cli_args.build_dir,
|
|
|
|
cli_args.alltests,
|
|
|
|
cli_args.make_options)
|
|
|
|
result = build_tests(linux, request)
|
|
|
|
kunit_parser.print_with_timestamp((
|
|
|
|
'Elapsed time: %.3fs\n') % (
|
|
|
|
result.elapsed_time))
|
|
|
|
if result.status != KunitStatus.SUCCESS:
|
|
|
|
sys.exit(1)
|
|
|
|
elif cli_args.subcommand == 'exec':
|
|
|
|
if not linux:
|
2021-05-27 05:24:06 +08:00
|
|
|
linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir,
|
|
|
|
kunitconfig_path=cli_args.kunitconfig,
|
|
|
|
arch=cli_args.arch,
|
|
|
|
cross_compile=cli_args.cross_compile,
|
|
|
|
qemu_config_path=cli_args.qemu_config)
|
2020-10-27 00:59:25 +08:00
|
|
|
|
2020-05-01 12:27:01 +08:00
|
|
|
exec_request = KunitExecRequest(cli_args.timeout,
|
|
|
|
cli_args.build_dir,
|
2021-02-06 08:08:53 +08:00
|
|
|
cli_args.alltests,
|
2021-07-16 00:08:19 +08:00
|
|
|
cli_args.filter_glob,
|
kunit: tool: support running each suite/test separately
The new --run_isolated flag makes the tool boot the kernel once per
suite or test, preventing leftover state from one suite to impact the
other. This can be useful as a starting point to debugging test
hermeticity issues.
Note: it takes a lot longer, so people should not use it normally.
Consider the following very simplified example:
bool disable_something_for_test = false;
void function_being_tested() {
...
if (disable_something_for_test) return;
...
}
static void test_before(struct kunit *test)
{
disable_something_for_test = true;
function_being_tested();
/* oops, we forgot to reset it back to false */
}
static void test_after(struct kunit *test)
{
/* oops, now "fixing" test_before can cause test_after to fail! */
function_being_tested();
}
Presented like this, the issues are obvious, but it gets a lot more
complicated to track down as the amount of test setup and helper
functions increases.
Another use case is memory corruption. It might not be surfaced as a
failure/crash in the test case or suite that caused it. I've noticed in
kunit's own unit tests, the 3rd suite after might be the one to finally
crash after an out-of-bounds write, for example.
Example usage:
Per suite:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite
...
Starting KUnit Kernel (1/7)...
============================================================
======== [PASSED] kunit_executor_test ========
....
Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/7)...
============================================================
======== [PASSED] kunit-try-catch-test ========
...
Per test:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test
Starting KUnit Kernel (1/23)...
============================================================
======== [PASSED] kunit_executor_test ========
[PASSED] parse_filter_test
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/23)...
============================================================
======== [PASSED] kunit_executor_test ========
[PASSED] filter_subsuite_test
...
It works with filters as well:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example
...
Starting KUnit Kernel (1/1)...
============================================================
======== [PASSED] example ========
...
It also handles test filters, '*.*skip*' runs these 3 tests:
kunit_status.kunit_status_mark_skipped_test
example.example_skip_test
example.example_mark_skipped_test
Fixed up merge conflict between:
d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and
6710951ee039 ("kunit: tool: support running each suite/test separately")
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Shuah Khan <skhan@linuxfoundation.org>
Signed-off-by: Daniel Latypov <dlatypov@google.com>
Reviewed-by: David Gow <davidgow@google.com>
Reviewed-by: Brendan Higgins <brendanhiggins@google.com>
Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-10-01 06:20:48 +08:00
|
|
|
cli_args.kernel_args,
|
|
|
|
cli_args.run_isolated)
|
2020-05-01 12:27:01 +08:00
|
|
|
parse_request = KunitParseRequest(cli_args.raw_output,
|
2020-08-12 05:27:56 +08:00
|
|
|
cli_args.build_dir,
|
|
|
|
cli_args.json)
|
2021-10-01 06:20:46 +08:00
|
|
|
result = exec_tests(linux, exec_request, parse_request)
|
2020-05-01 12:27:01 +08:00
|
|
|
kunit_parser.print_with_timestamp((
|
2021-10-01 06:20:46 +08:00
|
|
|
'Elapsed time: %.3fs\n') % (result.elapsed_time))
|
2020-05-01 12:27:01 +08:00
|
|
|
if result.status != KunitStatus.SUCCESS:
|
|
|
|
sys.exit(1)
|
|
|
|
elif cli_args.subcommand == 'parse':
|
|
|
|
if cli_args.file == None:
|
2021-10-21 07:21:21 +08:00
|
|
|
sys.stdin.reconfigure(errors='backslashreplace') # pytype: disable=attribute-error
|
2020-05-01 12:27:01 +08:00
|
|
|
kunit_output = sys.stdin
|
|
|
|
else:
|
2021-10-21 07:21:21 +08:00
|
|
|
with open(cli_args.file, 'r', errors='backslashreplace') as f:
|
2020-05-01 12:27:01 +08:00
|
|
|
kunit_output = f.read().splitlines()
|
|
|
|
request = KunitParseRequest(cli_args.raw_output,
|
2020-10-21 15:16:03 +08:00
|
|
|
None,
|
2020-08-12 05:27:56 +08:00
|
|
|
cli_args.json)
|
2021-10-01 06:20:46 +08:00
|
|
|
result = parse_tests(request, kunit_output)
|
2020-05-01 12:27:01 +08:00
|
|
|
if result.status != KunitStatus.SUCCESS:
|
|
|
|
sys.exit(1)
|
2019-09-23 17:02:43 +08:00
|
|
|
else:
|
|
|
|
parser.print_help()
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
2019-09-23 17:02:44 +08:00
|
|
|
main(sys.argv[1:])
|