systemd/test/run-integration-tests.sh

159 lines
3.8 KiB
Bash
Raw Permalink Normal View History

#!/usr/bin/env bash
# SPDX-License-Identifier: LGPL-2.1-or-later
set -e
is_valid_target() {
local target="${1:?}"
local t
for t in all setup run clean clean-again; do
[[ "$target" == "$t" ]] && return 0
done
return 1
}
pass_deny_list() {
local test="${1:?}"
local marker
for marker in $DENY_LIST_MARKERS $BLACKLIST_MARKERS; do
if [[ -f "$test/$marker" ]]; then
echo "========== DENY-LISTED: $test ($marker) =========="
return 1
fi
done
return 0
}
test_run() {
local test_name="${1:?}"
shift
if [[ $# -eq 0 ]]; then
echo >&2 "test_run: missing arguments"
exit 1
fi
# Note: let's be very explicit in reporting the return code of the test command here, i.e don't rely on
# `set -e` or the return code of the last statement in the function, since reporting false positive
# would be very bad in this case.
if [[ "${SPLIT_TEST_LOGS:-0}" -ne 0 && -n "${ARTIFACT_DIRECTORY:-}" ]]; then
(set -x; "$@") &>>"$ARTIFACT_DIRECTORY/$test_name.log" || return $?
else
(set -x; "$@") || return $?
fi
}
ARGS=(setup run clean-again)
CLEAN=0
CLEAN_AGAIN=0
COUNT=0
FAILURES=0
declare -A RESULTS
declare -A TIMES
if [[ "${NO_BUILD:-0}" =~ ^(1|yes|true)$ ]]; then
BUILD_DIR=""
2021-09-30 02:30:08 +08:00
elif BUILD_DIR="$("$(dirname "$0")/../tools/find-build-dir.sh")"; then
ninja -C "$BUILD_DIR"
else
echo >&2 "No build found, please set BUILD_DIR or NO_BUILD"
exit 1
fi
if [[ $# -gt 0 ]]; then
ARGS=("$@")
fi
# Reject invalid make targets
for arg in "${ARGS[@]}"; do
if ! is_valid_target "$arg"; then
echo >&2 "Invalid target: $arg"
exit 1
fi
done
# Separate 'clean' and 'clean-again' operations
args_filtered=()
for arg in "${ARGS[@]}"; do
if [[ "$arg" == "clean-again" ]]; then
CLEAN_AGAIN=1
elif [[ "$arg" == "clean" ]]; then
CLEAN=1
else
args_filtered+=("$arg")
fi
done
ARGS=("${args_filtered[@]}")
cd "$(dirname "$0")"
test: rework how images are created Before, we'd create a separate image for each test, in /var/tmp/systemd-test.XXXXX/rootdisk.img. Most of the images where very similar, except that each one had some unit files installed specifically for the test. The installation of those custom unit files was removed in previous commits (all the unit files are always installed). The new approach is to only create as few distinct images as possible. We have: default.img: the "normal" image suitable for almost all the tests basic.img: the same as default image but doesn't mask any services cryptsetup.img: p2 is used for encrypted /var badid.img: /etc/machine-id is overwritten with stuff selinux.img: with selinux added for fun and fun and a few others: ls -l build/test/*img lrwxrwxrwx 1 root root 38 Mar 21 21:23 build/test/badid.img -> /var/tmp/systemd-test.PJFFeo/badid.img lrwxrwxrwx 1 root root 38 Mar 21 21:17 build/test/basic.img -> /var/tmp/systemd-test.na0xOI/basic.img lrwxrwxrwx 1 root root 43 Mar 21 21:18 build/test/cryptsetup.img -> /var/tmp/systemd-test.Tzjv06/cryptsetup.img lrwxrwxrwx 1 root root 40 Mar 21 21:19 build/test/default.img -> /var/tmp/systemd-test.EscAsS/default.img lrwxrwxrwx 1 root root 39 Mar 21 21:22 build/test/nspawn.img -> /var/tmp/systemd-test.HSebKo/nspawn.img lrwxrwxrwx 1 root root 40 Mar 21 21:20 build/test/selinux.img -> /var/tmp/systemd-test.daBjbx/selinux.img lrwxrwxrwx 1 root root 39 Mar 21 21:21 build/test/test08.img -> /var/tmp/systemd-test.OgnN8Z/test08.img I considered trying to use the same image everywhere. It would probably be possible, but it would be very brittle. By using separate images where it is necessary we keep various orthogonal modifications independent. The way that images are cached is complicated by the fact that we still want to keep them in /var/tmp. Thus, an image is created on first use and linked to from build/test/ so it can be found by other tests. Tests cannot be run in parallel. I think that is an acceptable limitation. Creation of the images was probably taking more resources then the actual tests, so we should be better off anyway.
2019-12-12 16:37:19 +08:00
SELECTED_TESTS="${SELECTED_TESTS:-TEST-??-*}"
# Let's always do the cleaning operation first, because it destroys the image
# cache.
if [[ $CLEAN -eq 1 ]]; then
for test in $SELECTED_TESTS; do
test_run "$test" make -C "$test" clean
done
fi
# Run actual tests (if requested)
if [[ ${#ARGS[@]} -ne 0 ]]; then
for test in $SELECTED_TESTS; do
COUNT=$((COUNT + 1))
pass_deny_list "$test" || continue
SECONDS=0
echo -e "\n[$(date +%R:%S)] --x-- Running $test --x--"
set +e
test_run "$test" make -C "$test" "${ARGS[@]}"
result=$?
set -e
echo "[$(date +%R:%S)] --x-- Result of $test: $result --x--"
RESULTS["$test"]="$result"
TIMES["$test"]="$SECONDS"
# Run clean-again here to free up space, if requested, and if the test succeeded
if [[ "$result" -ne 0 ]]; then
FAILURES=$((FAILURES + 1))
elif [[ $CLEAN_AGAIN -eq 1 ]]; then
test_run "$test" make -C "$test" clean-again
fi
test: rework how images are created Before, we'd create a separate image for each test, in /var/tmp/systemd-test.XXXXX/rootdisk.img. Most of the images where very similar, except that each one had some unit files installed specifically for the test. The installation of those custom unit files was removed in previous commits (all the unit files are always installed). The new approach is to only create as few distinct images as possible. We have: default.img: the "normal" image suitable for almost all the tests basic.img: the same as default image but doesn't mask any services cryptsetup.img: p2 is used for encrypted /var badid.img: /etc/machine-id is overwritten with stuff selinux.img: with selinux added for fun and fun and a few others: ls -l build/test/*img lrwxrwxrwx 1 root root 38 Mar 21 21:23 build/test/badid.img -> /var/tmp/systemd-test.PJFFeo/badid.img lrwxrwxrwx 1 root root 38 Mar 21 21:17 build/test/basic.img -> /var/tmp/systemd-test.na0xOI/basic.img lrwxrwxrwx 1 root root 43 Mar 21 21:18 build/test/cryptsetup.img -> /var/tmp/systemd-test.Tzjv06/cryptsetup.img lrwxrwxrwx 1 root root 40 Mar 21 21:19 build/test/default.img -> /var/tmp/systemd-test.EscAsS/default.img lrwxrwxrwx 1 root root 39 Mar 21 21:22 build/test/nspawn.img -> /var/tmp/systemd-test.HSebKo/nspawn.img lrwxrwxrwx 1 root root 40 Mar 21 21:20 build/test/selinux.img -> /var/tmp/systemd-test.daBjbx/selinux.img lrwxrwxrwx 1 root root 39 Mar 21 21:21 build/test/test08.img -> /var/tmp/systemd-test.OgnN8Z/test08.img I considered trying to use the same image everywhere. It would probably be possible, but it would be very brittle. By using separate images where it is necessary we keep various orthogonal modifications independent. The way that images are cached is complicated by the fact that we still want to keep them in /var/tmp. Thus, an image is created on first use and linked to from build/test/ so it can be found by other tests. Tests cannot be run in parallel. I think that is an acceptable limitation. Creation of the images was probably taking more resources then the actual tests, so we should be better off anyway.
2019-12-12 16:37:19 +08:00
done
fi
echo ""
for test in "${!RESULTS[@]}"; do
result="${RESULTS[$test]}"
time="${TIMES[$test]}"
[[ "$result" -eq 0 ]] && string="SUCCESS" || string="FAIL"
printf "%-35s %-8s (%3s s)\n" "$test:" "$string" "$time"
done | sort
if [[ "$FAILURES" -eq 0 ]]; then
echo -e "\nALL $COUNT TESTS PASSED"
else
echo -e "\nTOTAL FAILURES: $FAILURES OF $COUNT"
fi
# If we have coverage files, merge them into a single report for upload
if [[ -n "$ARTIFACT_DIRECTORY" ]]; then
lcov_args=()
while read -r info_file; do
lcov_args+=(--add-tracefile "$info_file")
done < <(find "$ARTIFACT_DIRECTORY" -maxdepth 1 -name "*.coverage-info")
if [[ ${#lcov_args[@]} -gt 1 ]]; then
lcov "${lcov_args[@]}" --output-file "$ARTIFACT_DIRECTORY/merged.coverage-info"
fi
fi
exit "$FAILURES"