mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
linux_kselftest-next-6.7-rc1
This kselftest update for Linux 6.7-rc1 consists of: -- kbuild kselftest-merge target fixes -- fixes to several tests -- resctrl test fixes and enhancements -- ksft_perror() helper and reporting improvements -- printf attribute to kselftest prints to improve reporting -- documentation and clang build warning fixes Bulk of the patches are for resctrl fixes and enhancements. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEPZKym/RZuOCGeA/kCwJExA0NQxwFAmVCoHMACgkQCwJExA0N QxwzrA//ehiiLdV2lyghzPpDTVY8jKlB1xIpg3s0r0M3m/j6nAdnOgOe2gkapT7T gFGL0r7xL9crqFdymwDANLSvNWOeghqB1oIok9Ruw5Rl3FcLnkh920bE6tPsddJg 9+/KqtZvL0Sr43l9OSgX2Uzqyw60wRQwpO0431hmgnKjblk8Rh4GZ7fUCLLNf4Ia yOq1s2/cdmEwRc96lDaBWZaOTusejwh/xy8tgAjozHipLsmsexbyyHVWJWkVhMOD ZklCtrq4lckRz+Vky6akvjoL6Mjl//7pg323e2fUcDCQxQvqwnCo2VqqyOVBnN2A 6XHQ6yXwh0xzCKRFgAiFhWlsKOz3wEIDrdp4dmhDkg4lw4gGJcwNke1UyX5zXYKM 1a6R1vbQS9qQOsWf34AYKZBHruFNtUt0FJYgI43SuH+fGc0D5cU91Rz+s9QIPCwj 8tcr5RWin8BOziDz05lxSKWRHD+3oc5qmsmGYBJhilwtvY2wNbRZNDZjiO28kiIy 3kUWXeCtHmZE1KHK1H5v6bMC8SqUU7ukvV5WebqGpxzJ2eFPbeXcek9/AWSWOFni 7thUg6MG3e4c/zRk8JYbmqXS/GeTkdmc3+VMXApLhTB8uSOWsnVMfJS9Zc2A1tGg n6NRBJFQO8t9Wm1l9XvlnC9HA/8lO/3uih+SzKn/u8KvoN96HPM= =JZb+ -----END PGP SIGNATURE----- Merge tag 'linux_kselftest-next-6.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest Pull kselftest updates from Shuah Khan: - kbuild kselftest-merge target fixes - fixes to several tests - resctrl test fixes and enhancements - ksft_perror() helper and reporting improvements - printf attribute to kselftest prints to improve reporting - documentation and clang build warning fixes The bulk of the patches are for resctrl fixes and enhancements. * tag 'linux_kselftest-next-6.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest: (51 commits) selftests/resctrl: Fix MBM test failure when MBA unavailable selftests/clone3: Report descriptive test names selftests:modify the incorrect print format selftests/efivarfs: create-read: fix a resource leak selftests/ftrace: Add riscv support for kprobe arg tests selftests/ftrace: add loongarch support for kprobe args char tests selftests/amd-pstate: Added option to provide perf binary path selftests/amd-pstate: Fix broken paths to run workloads in amd-pstate-ut selftests/resctrl: Move run_benchmark() to a more fitting file selftests/resctrl: Fix schemata write error check selftests/resctrl: Reduce failures due to outliers in MBA/MBM tests selftests/resctrl: Fix feature checks selftests/resctrl: Refactor feature check to use resource and feature name selftests/resctrl: Move _GNU_SOURCE define into Makefile selftests/resctrl: Remove duplicate feature check from CMT test selftests/resctrl: Extend signal handler coverage to unmount on receiving signal selftests/resctrl: Fix uninitialized .sa_flags selftests/resctrl: Cleanup benchmark argument parsing selftests/resctrl: Remove ben_count variable selftests/resctrl: Make benchmark command const and build it with pointers ...
This commit is contained in:
commit
7dc0e9c7dd
@ -112,7 +112,7 @@ You can specify multiple tests to skip::
|
||||
You can also specify a restricted list of tests to run together with a
|
||||
dedicated skiplist::
|
||||
|
||||
$ make TARGETS="bpf breakpoints size timers" SKIP_TARGETS=bpf kselftest
|
||||
$ make TARGETS="breakpoints size timers" SKIP_TARGETS=size kselftest
|
||||
|
||||
See the top-level tools/testing/selftests/Makefile for the list of all
|
||||
possible targets.
|
||||
@ -165,7 +165,7 @@ To see the list of available tests, the `-l` option can be used::
|
||||
The `-c` option can be used to run all the tests from a test collection, or
|
||||
the `-t` option for specific single tests. Either can be used multiple times::
|
||||
|
||||
$ ./run_kselftest.sh -c bpf -c seccomp -t timers:posix_timers -t timer:nanosleep
|
||||
$ ./run_kselftest.sh -c size -c seccomp -t timers:posix_timers -t timer:nanosleep
|
||||
|
||||
For other features see the script usage output, seen with the `-h` option.
|
||||
|
||||
@ -210,7 +210,7 @@ option is supported, such as::
|
||||
tests by using variables specified in `Running a subset of selftests`_
|
||||
section::
|
||||
|
||||
$ make -C tools/testing/selftests gen_tar TARGETS="bpf" FORMAT=.xz
|
||||
$ make -C tools/testing/selftests gen_tar TARGETS="size" FORMAT=.xz
|
||||
|
||||
.. _tar's auto-compress: https://www.gnu.org/software/tar/manual/html_node/gzip.html#auto_002dcompress
|
||||
|
||||
|
4
Makefile
4
Makefile
@ -1367,8 +1367,8 @@ kselftest-%: headers FORCE
|
||||
PHONY += kselftest-merge
|
||||
kselftest-merge:
|
||||
$(if $(wildcard $(objtree)/.config),, $(error No .config exists, config your kernel first!))
|
||||
$(Q)find $(srctree)/tools/testing/selftests -name config | \
|
||||
xargs $(srctree)/scripts/kconfig/merge_config.sh -m $(objtree)/.config
|
||||
$(Q)find $(srctree)/tools/testing/selftests -name config -o -name config.$(UTS_MACHINE) | \
|
||||
xargs $(srctree)/scripts/kconfig/merge_config.sh -y -m $(objtree)/.config
|
||||
$(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
|
@ -30,8 +30,7 @@ import getopt
|
||||
import Gnuplot
|
||||
from numpy import *
|
||||
from decimal import *
|
||||
sys.path.append('../intel_pstate_tracer')
|
||||
#import intel_pstate_tracer
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "intel_pstate_tracer"))
|
||||
import intel_pstate_tracer as ipt
|
||||
|
||||
__license__ = "GPL version 2"
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/bin/sh
|
||||
#!/bin/bash
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
# Testing and monitor the cpu desire performance, frequency, load,
|
||||
@ -66,12 +66,15 @@ post_clear_gitsource()
|
||||
|
||||
install_gitsource()
|
||||
{
|
||||
if [ ! -d $git_name ]; then
|
||||
if [ ! -d $SCRIPTDIR/$git_name ]; then
|
||||
pushd $(pwd) > /dev/null 2>&1
|
||||
cd $SCRIPTDIR
|
||||
printf "Download gitsource, please wait a moment ...\n\n"
|
||||
wget -O $git_tar $gitsource_url > /dev/null 2>&1
|
||||
|
||||
printf "Tar gitsource ...\n\n"
|
||||
tar -xzf $git_tar
|
||||
popd > /dev/null 2>&1
|
||||
fi
|
||||
}
|
||||
|
||||
@ -79,12 +82,14 @@ install_gitsource()
|
||||
run_gitsource()
|
||||
{
|
||||
echo "Launching amd pstate tracer for $1 #$2 tracer_interval: $TRACER_INTERVAL"
|
||||
./amd_pstate_trace.py -n tracer-gitsource-$1-$2 -i $TRACER_INTERVAL > /dev/null 2>&1 &
|
||||
$TRACER -n tracer-gitsource-$1-$2 -i $TRACER_INTERVAL > /dev/null 2>&1 &
|
||||
|
||||
printf "Make and test gitsource for $1 #$2 make_cpus: $MAKE_CPUS\n"
|
||||
cd $git_name
|
||||
perf stat -a --per-socket -I 1000 -e power/energy-pkg/ /usr/bin/time -o ../$OUTFILE_GIT.time-gitsource-$1-$2.log make test -j$MAKE_CPUS > ../$OUTFILE_GIT-perf-$1-$2.log 2>&1
|
||||
cd ..
|
||||
BACKUP_DIR=$(pwd)
|
||||
pushd $BACKUP_DIR > /dev/null 2>&1
|
||||
cd $SCRIPTDIR/$git_name
|
||||
$PERF stat -a --per-socket -I 1000 -e power/energy-pkg/ /usr/bin/time -o $BACKUP_DIR/$OUTFILE_GIT.time-gitsource-$1-$2.log make test -j$MAKE_CPUS > $BACKUP_DIR/$OUTFILE_GIT-perf-$1-$2.log 2>&1
|
||||
popd > /dev/null 2>&1
|
||||
|
||||
for job in `jobs -p`
|
||||
do
|
||||
|
@ -8,9 +8,12 @@ else
|
||||
FILE_MAIN=DONE
|
||||
fi
|
||||
|
||||
source basic.sh
|
||||
source tbench.sh
|
||||
source gitsource.sh
|
||||
SCRIPTDIR=`dirname "$0"`
|
||||
TRACER=$SCRIPTDIR/../../../power/x86/amd_pstate_tracer/amd_pstate_trace.py
|
||||
|
||||
source $SCRIPTDIR/basic.sh
|
||||
source $SCRIPTDIR/tbench.sh
|
||||
source $SCRIPTDIR/gitsource.sh
|
||||
|
||||
# amd-pstate-ut only run on x86/x86_64 AMD systems.
|
||||
ARCH=$(uname -m 2>/dev/null | sed -e 's/i.86/x86/' -e 's/x86_64/x86/')
|
||||
@ -22,6 +25,7 @@ OUTFILE=selftest
|
||||
OUTFILE_TBENCH="$OUTFILE.tbench"
|
||||
OUTFILE_GIT="$OUTFILE.gitsource"
|
||||
|
||||
PERF=/usr/bin/perf
|
||||
SYSFS=
|
||||
CPUROOT=
|
||||
CPUFREQROOT=
|
||||
@ -151,6 +155,7 @@ help()
|
||||
[-p <tbench process number>]
|
||||
[-l <loop times for tbench>]
|
||||
[-i <amd tracer interval>]
|
||||
[-b <perf binary>]
|
||||
[-m <comparative test: acpi-cpufreq>]
|
||||
\n"
|
||||
exit 2
|
||||
@ -158,7 +163,7 @@ help()
|
||||
|
||||
parse_arguments()
|
||||
{
|
||||
while getopts ho:c:t:p:l:i:m: arg
|
||||
while getopts ho:c:t:p:l:i:b:m: arg
|
||||
do
|
||||
case $arg in
|
||||
h) # --help
|
||||
@ -189,6 +194,10 @@ parse_arguments()
|
||||
TRACER_INTERVAL=$OPTARG
|
||||
;;
|
||||
|
||||
b) # --perf-binary
|
||||
PERF=`realpath $OPTARG`
|
||||
;;
|
||||
|
||||
m) # --comparative-test
|
||||
COMPARATIVE_TEST=$OPTARG
|
||||
;;
|
||||
@ -202,8 +211,8 @@ parse_arguments()
|
||||
|
||||
command_perf()
|
||||
{
|
||||
if ! command -v perf > /dev/null; then
|
||||
echo $msg please install perf. >&2
|
||||
if ! $PERF -v; then
|
||||
echo $msg please install perf or provide perf binary path as argument >&2
|
||||
exit $ksft_skip
|
||||
fi
|
||||
}
|
||||
|
@ -64,11 +64,11 @@ post_clear_tbench()
|
||||
run_tbench()
|
||||
{
|
||||
echo "Launching amd pstate tracer for $1 #$2 tracer_interval: $TRACER_INTERVAL"
|
||||
./amd_pstate_trace.py -n tracer-tbench-$1-$2 -i $TRACER_INTERVAL > /dev/null 2>&1 &
|
||||
$TRACER -n tracer-tbench-$1-$2 -i $TRACER_INTERVAL > /dev/null 2>&1 &
|
||||
|
||||
printf "Test tbench for $1 #$2 time_limit: $TIME_LIMIT procs_num: $PROCESS_NUM\n"
|
||||
tbench_srv > /dev/null 2>&1 &
|
||||
perf stat -a --per-socket -I 1000 -e power/energy-pkg/ tbench -t $TIME_LIMIT $PROCESS_NUM > $OUTFILE_TBENCH-perf-$1-$2.log 2>&1
|
||||
$PERF stat -a --per-socket -I 1000 -e power/energy-pkg/ tbench -t $TIME_LIMIT $PROCESS_NUM > $OUTFILE_TBENCH-perf-$1-$2.log 2>&1
|
||||
|
||||
pid=`pidof tbench_srv`
|
||||
kill $pid
|
||||
|
@ -27,7 +27,7 @@ static const char * const dev_files[] = {
|
||||
void print_cachestat(struct cachestat *cs)
|
||||
{
|
||||
ksft_print_msg(
|
||||
"Using cachestat: Cached: %lu, Dirty: %lu, Writeback: %lu, Evicted: %lu, Recently Evicted: %lu\n",
|
||||
"Using cachestat: Cached: %llu, Dirty: %llu, Writeback: %llu, Evicted: %llu, Recently Evicted: %llu\n",
|
||||
cs->nr_cache, cs->nr_dirty, cs->nr_writeback,
|
||||
cs->nr_evicted, cs->nr_recently_evicted);
|
||||
}
|
||||
|
@ -2,7 +2,7 @@
|
||||
TEST_GEN_FILES := validate_cap
|
||||
TEST_GEN_PROGS := test_execve
|
||||
|
||||
CFLAGS += -O2 -g -std=gnu99 -Wall
|
||||
CFLAGS += -O2 -g -std=gnu99 -Wall $(KHDR_INCLUDES)
|
||||
LDLIBS += -lcap-ng -lrt -ldl
|
||||
|
||||
include ../lib.mk
|
||||
|
@ -20,14 +20,6 @@
|
||||
|
||||
#include "../kselftest.h"
|
||||
|
||||
#ifndef PR_CAP_AMBIENT
|
||||
#define PR_CAP_AMBIENT 47
|
||||
# define PR_CAP_AMBIENT_IS_SET 1
|
||||
# define PR_CAP_AMBIENT_RAISE 2
|
||||
# define PR_CAP_AMBIENT_LOWER 3
|
||||
# define PR_CAP_AMBIENT_CLEAR_ALL 4
|
||||
#endif
|
||||
|
||||
static int nerrs;
|
||||
static pid_t mpid; /* main() pid is used to avoid duplicate test counts */
|
||||
|
||||
|
@ -9,14 +9,6 @@
|
||||
|
||||
#include "../kselftest.h"
|
||||
|
||||
#ifndef PR_CAP_AMBIENT
|
||||
#define PR_CAP_AMBIENT 47
|
||||
# define PR_CAP_AMBIENT_IS_SET 1
|
||||
# define PR_CAP_AMBIENT_RAISE 2
|
||||
# define PR_CAP_AMBIENT_LOWER 3
|
||||
# define PR_CAP_AMBIENT_CLEAR_ALL 4
|
||||
#endif
|
||||
|
||||
#if __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 19)
|
||||
# define HAVE_GETAUXVAL
|
||||
#endif
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <inttypes.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
@ -103,8 +104,8 @@ static int call_clone3(uint64_t flags, size_t size, enum test_mode test_mode)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void test_clone3(uint64_t flags, size_t size, int expected,
|
||||
enum test_mode test_mode)
|
||||
static bool test_clone3(uint64_t flags, size_t size, int expected,
|
||||
enum test_mode test_mode)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -114,92 +115,210 @@ static void test_clone3(uint64_t flags, size_t size, int expected,
|
||||
ret = call_clone3(flags, size, test_mode);
|
||||
ksft_print_msg("[%d] clone3() with flags says: %d expected %d\n",
|
||||
getpid(), ret, expected);
|
||||
if (ret != expected)
|
||||
ksft_test_result_fail(
|
||||
if (ret != expected) {
|
||||
ksft_print_msg(
|
||||
"[%d] Result (%d) is different than expected (%d)\n",
|
||||
getpid(), ret, expected);
|
||||
else
|
||||
ksft_test_result_pass(
|
||||
"[%d] Result (%d) matches expectation (%d)\n",
|
||||
getpid(), ret, expected);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
typedef bool (*filter_function)(void);
|
||||
typedef size_t (*size_function)(void);
|
||||
|
||||
static bool not_root(void)
|
||||
{
|
||||
if (getuid() != 0) {
|
||||
ksft_print_msg("Not running as root\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static size_t page_size_plus_8(void)
|
||||
{
|
||||
return getpagesize() + 8;
|
||||
}
|
||||
|
||||
struct test {
|
||||
const char *name;
|
||||
uint64_t flags;
|
||||
size_t size;
|
||||
size_function size_function;
|
||||
int expected;
|
||||
enum test_mode test_mode;
|
||||
filter_function filter;
|
||||
};
|
||||
|
||||
static const struct test tests[] = {
|
||||
{
|
||||
.name = "simple clone3()",
|
||||
.flags = 0,
|
||||
.size = 0,
|
||||
.expected = 0,
|
||||
.test_mode = CLONE3_ARGS_NO_TEST,
|
||||
},
|
||||
{
|
||||
.name = "clone3() in a new PID_NS",
|
||||
.flags = CLONE_NEWPID,
|
||||
.size = 0,
|
||||
.expected = 0,
|
||||
.test_mode = CLONE3_ARGS_NO_TEST,
|
||||
.filter = not_root,
|
||||
},
|
||||
{
|
||||
.name = "CLONE_ARGS_SIZE_VER0",
|
||||
.flags = 0,
|
||||
.size = CLONE_ARGS_SIZE_VER0,
|
||||
.expected = 0,
|
||||
.test_mode = CLONE3_ARGS_NO_TEST,
|
||||
},
|
||||
{
|
||||
.name = "CLONE_ARGS_SIZE_VER0 - 8",
|
||||
.flags = 0,
|
||||
.size = CLONE_ARGS_SIZE_VER0 - 8,
|
||||
.expected = -EINVAL,
|
||||
.test_mode = CLONE3_ARGS_NO_TEST,
|
||||
},
|
||||
{
|
||||
.name = "sizeof(struct clone_args) + 8",
|
||||
.flags = 0,
|
||||
.size = sizeof(struct __clone_args) + 8,
|
||||
.expected = 0,
|
||||
.test_mode = CLONE3_ARGS_NO_TEST,
|
||||
},
|
||||
{
|
||||
.name = "exit_signal with highest 32 bits non-zero",
|
||||
.flags = 0,
|
||||
.size = 0,
|
||||
.expected = -EINVAL,
|
||||
.test_mode = CLONE3_ARGS_INVAL_EXIT_SIGNAL_BIG,
|
||||
},
|
||||
{
|
||||
.name = "negative 32-bit exit_signal",
|
||||
.flags = 0,
|
||||
.size = 0,
|
||||
.expected = -EINVAL,
|
||||
.test_mode = CLONE3_ARGS_INVAL_EXIT_SIGNAL_NEG,
|
||||
},
|
||||
{
|
||||
.name = "exit_signal not fitting into CSIGNAL mask",
|
||||
.flags = 0,
|
||||
.size = 0,
|
||||
.expected = -EINVAL,
|
||||
.test_mode = CLONE3_ARGS_INVAL_EXIT_SIGNAL_CSIG,
|
||||
},
|
||||
{
|
||||
.name = "NSIG < exit_signal < CSIG",
|
||||
.flags = 0,
|
||||
.size = 0,
|
||||
.expected = -EINVAL,
|
||||
.test_mode = CLONE3_ARGS_INVAL_EXIT_SIGNAL_NSIG,
|
||||
},
|
||||
{
|
||||
.name = "Arguments sizeof(struct clone_args) + 8",
|
||||
.flags = 0,
|
||||
.size = sizeof(struct __clone_args) + 8,
|
||||
.expected = 0,
|
||||
.test_mode = CLONE3_ARGS_ALL_0,
|
||||
},
|
||||
{
|
||||
.name = "Arguments sizeof(struct clone_args) + 16",
|
||||
.flags = 0,
|
||||
.size = sizeof(struct __clone_args) + 16,
|
||||
.expected = -E2BIG,
|
||||
.test_mode = CLONE3_ARGS_ALL_0,
|
||||
},
|
||||
{
|
||||
.name = "Arguments sizeof(struct clone_arg) * 2",
|
||||
.flags = 0,
|
||||
.size = sizeof(struct __clone_args) + 16,
|
||||
.expected = -E2BIG,
|
||||
.test_mode = CLONE3_ARGS_ALL_0,
|
||||
},
|
||||
{
|
||||
.name = "Arguments > page size",
|
||||
.flags = 0,
|
||||
.size_function = page_size_plus_8,
|
||||
.expected = -E2BIG,
|
||||
.test_mode = CLONE3_ARGS_NO_TEST,
|
||||
},
|
||||
{
|
||||
.name = "CLONE_ARGS_SIZE_VER0 in a new PID NS",
|
||||
.flags = CLONE_NEWPID,
|
||||
.size = CLONE_ARGS_SIZE_VER0,
|
||||
.expected = 0,
|
||||
.test_mode = CLONE3_ARGS_NO_TEST,
|
||||
.filter = not_root,
|
||||
},
|
||||
{
|
||||
.name = "CLONE_ARGS_SIZE_VER0 - 8 in a new PID NS",
|
||||
.flags = CLONE_NEWPID,
|
||||
.size = CLONE_ARGS_SIZE_VER0 - 8,
|
||||
.expected = -EINVAL,
|
||||
.test_mode = CLONE3_ARGS_NO_TEST,
|
||||
},
|
||||
{
|
||||
.name = "sizeof(struct clone_args) + 8 in a new PID NS",
|
||||
.flags = CLONE_NEWPID,
|
||||
.size = sizeof(struct __clone_args) + 8,
|
||||
.expected = 0,
|
||||
.test_mode = CLONE3_ARGS_NO_TEST,
|
||||
.filter = not_root,
|
||||
},
|
||||
{
|
||||
.name = "Arguments > page size in a new PID NS",
|
||||
.flags = CLONE_NEWPID,
|
||||
.size_function = page_size_plus_8,
|
||||
.expected = -E2BIG,
|
||||
.test_mode = CLONE3_ARGS_NO_TEST,
|
||||
},
|
||||
{
|
||||
.name = "New time NS",
|
||||
.flags = CLONE_NEWTIME,
|
||||
.size = 0,
|
||||
.expected = 0,
|
||||
.test_mode = CLONE3_ARGS_NO_TEST,
|
||||
},
|
||||
{
|
||||
.name = "exit signal (SIGCHLD) in flags",
|
||||
.flags = SIGCHLD,
|
||||
.size = 0,
|
||||
.expected = -EINVAL,
|
||||
.test_mode = CLONE3_ARGS_NO_TEST,
|
||||
},
|
||||
};
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
uid_t uid = getuid();
|
||||
size_t size;
|
||||
int i;
|
||||
|
||||
ksft_print_header();
|
||||
ksft_set_plan(19);
|
||||
ksft_set_plan(ARRAY_SIZE(tests));
|
||||
test_clone3_supported();
|
||||
|
||||
/* Just a simple clone3() should return 0.*/
|
||||
test_clone3(0, 0, 0, CLONE3_ARGS_NO_TEST);
|
||||
for (i = 0; i < ARRAY_SIZE(tests); i++) {
|
||||
if (tests[i].filter && tests[i].filter()) {
|
||||
ksft_test_result_skip("%s\n", tests[i].name);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Do a clone3() in a new PID NS.*/
|
||||
if (uid == 0)
|
||||
test_clone3(CLONE_NEWPID, 0, 0, CLONE3_ARGS_NO_TEST);
|
||||
else
|
||||
ksft_test_result_skip("Skipping clone3() with CLONE_NEWPID\n");
|
||||
if (tests[i].size_function)
|
||||
size = tests[i].size_function();
|
||||
else
|
||||
size = tests[i].size;
|
||||
|
||||
/* Do a clone3() with CLONE_ARGS_SIZE_VER0. */
|
||||
test_clone3(0, CLONE_ARGS_SIZE_VER0, 0, CLONE3_ARGS_NO_TEST);
|
||||
ksft_print_msg("Running test '%s'\n", tests[i].name);
|
||||
|
||||
/* Do a clone3() with CLONE_ARGS_SIZE_VER0 - 8 */
|
||||
test_clone3(0, CLONE_ARGS_SIZE_VER0 - 8, -EINVAL, CLONE3_ARGS_NO_TEST);
|
||||
|
||||
/* Do a clone3() with sizeof(struct clone_args) + 8 */
|
||||
test_clone3(0, sizeof(struct __clone_args) + 8, 0, CLONE3_ARGS_NO_TEST);
|
||||
|
||||
/* Do a clone3() with exit_signal having highest 32 bits non-zero */
|
||||
test_clone3(0, 0, -EINVAL, CLONE3_ARGS_INVAL_EXIT_SIGNAL_BIG);
|
||||
|
||||
/* Do a clone3() with negative 32-bit exit_signal */
|
||||
test_clone3(0, 0, -EINVAL, CLONE3_ARGS_INVAL_EXIT_SIGNAL_NEG);
|
||||
|
||||
/* Do a clone3() with exit_signal not fitting into CSIGNAL mask */
|
||||
test_clone3(0, 0, -EINVAL, CLONE3_ARGS_INVAL_EXIT_SIGNAL_CSIG);
|
||||
|
||||
/* Do a clone3() with NSIG < exit_signal < CSIG */
|
||||
test_clone3(0, 0, -EINVAL, CLONE3_ARGS_INVAL_EXIT_SIGNAL_NSIG);
|
||||
|
||||
test_clone3(0, sizeof(struct __clone_args) + 8, 0, CLONE3_ARGS_ALL_0);
|
||||
|
||||
test_clone3(0, sizeof(struct __clone_args) + 16, -E2BIG,
|
||||
CLONE3_ARGS_ALL_0);
|
||||
|
||||
test_clone3(0, sizeof(struct __clone_args) * 2, -E2BIG,
|
||||
CLONE3_ARGS_ALL_0);
|
||||
|
||||
/* Do a clone3() with > page size */
|
||||
test_clone3(0, getpagesize() + 8, -E2BIG, CLONE3_ARGS_NO_TEST);
|
||||
|
||||
/* Do a clone3() with CLONE_ARGS_SIZE_VER0 in a new PID NS. */
|
||||
if (uid == 0)
|
||||
test_clone3(CLONE_NEWPID, CLONE_ARGS_SIZE_VER0, 0,
|
||||
CLONE3_ARGS_NO_TEST);
|
||||
else
|
||||
ksft_test_result_skip("Skipping clone3() with CLONE_NEWPID\n");
|
||||
|
||||
/* Do a clone3() with CLONE_ARGS_SIZE_VER0 - 8 in a new PID NS */
|
||||
test_clone3(CLONE_NEWPID, CLONE_ARGS_SIZE_VER0 - 8, -EINVAL,
|
||||
CLONE3_ARGS_NO_TEST);
|
||||
|
||||
/* Do a clone3() with sizeof(struct clone_args) + 8 in a new PID NS */
|
||||
if (uid == 0)
|
||||
test_clone3(CLONE_NEWPID, sizeof(struct __clone_args) + 8, 0,
|
||||
CLONE3_ARGS_NO_TEST);
|
||||
else
|
||||
ksft_test_result_skip("Skipping clone3() with CLONE_NEWPID\n");
|
||||
|
||||
/* Do a clone3() with > page size in a new PID NS */
|
||||
test_clone3(CLONE_NEWPID, getpagesize() + 8, -E2BIG,
|
||||
CLONE3_ARGS_NO_TEST);
|
||||
|
||||
/* Do a clone3() in a new time namespace */
|
||||
test_clone3(CLONE_NEWTIME, 0, 0, CLONE3_ARGS_NO_TEST);
|
||||
|
||||
/* Do a clone3() with exit signal (SIGCHLD) in flags */
|
||||
test_clone3(SIGCHLD, 0, -EINVAL, CLONE3_ARGS_NO_TEST);
|
||||
ksft_test_result(test_clone3(tests[i].flags, size,
|
||||
tests[i].expected,
|
||||
tests[i].test_mode),
|
||||
"%s\n", tests[i].name);
|
||||
}
|
||||
|
||||
ksft_finished();
|
||||
}
|
||||
|
@ -27,9 +27,7 @@
|
||||
#include "../kselftest_harness.h"
|
||||
#include "clone3_selftests.h"
|
||||
|
||||
#ifndef MAX_PID_NS_LEVEL
|
||||
#define MAX_PID_NS_LEVEL 32
|
||||
#endif
|
||||
|
||||
static void child_exit(int ret)
|
||||
{
|
||||
|
@ -16,10 +16,6 @@
|
||||
#include "../kselftest.h"
|
||||
#include "clone3_selftests.h"
|
||||
|
||||
#ifndef CLONE_CLEAR_SIGHAND
|
||||
#define CLONE_CLEAR_SIGHAND 0x100000000ULL
|
||||
#endif
|
||||
|
||||
static void nop_handler(int signo)
|
||||
{
|
||||
}
|
||||
|
@ -15,10 +15,6 @@
|
||||
|
||||
#define ptr_to_u64(ptr) ((__u64)((uintptr_t)(ptr)))
|
||||
|
||||
#ifndef CLONE_INTO_CGROUP
|
||||
#define CLONE_INTO_CGROUP 0x200000000ULL /* Clone into a specific cgroup given the right permissions. */
|
||||
#endif
|
||||
|
||||
#ifndef __NR_clone3
|
||||
#define __NR_clone3 -1
|
||||
#endif
|
||||
@ -32,18 +28,9 @@ struct __clone_args {
|
||||
__aligned_u64 stack;
|
||||
__aligned_u64 stack_size;
|
||||
__aligned_u64 tls;
|
||||
#ifndef CLONE_ARGS_SIZE_VER0
|
||||
#define CLONE_ARGS_SIZE_VER0 64 /* sizeof first published struct */
|
||||
#endif
|
||||
__aligned_u64 set_tid;
|
||||
__aligned_u64 set_tid_size;
|
||||
#ifndef CLONE_ARGS_SIZE_VER1
|
||||
#define CLONE_ARGS_SIZE_VER1 80 /* sizeof second published struct */
|
||||
#endif
|
||||
__aligned_u64 cgroup;
|
||||
#ifndef CLONE_ARGS_SIZE_VER2
|
||||
#define CLONE_ARGS_SIZE_VER2 88 /* sizeof third published struct */
|
||||
#endif
|
||||
};
|
||||
|
||||
static pid_t sys_clone3(struct __clone_args *args, size_t size)
|
||||
|
@ -23,9 +23,7 @@
|
||||
#include "../kselftest.h"
|
||||
#include "clone3_selftests.h"
|
||||
|
||||
#ifndef MAX_PID_NS_LEVEL
|
||||
#define MAX_PID_NS_LEVEL 32
|
||||
#endif
|
||||
|
||||
static int pipe_1[2];
|
||||
static int pipe_2[2];
|
||||
|
@ -16,34 +16,6 @@
|
||||
#include "../kselftest_harness.h"
|
||||
#include "../clone3/clone3_selftests.h"
|
||||
|
||||
#ifndef __NR_close_range
|
||||
#if defined __alpha__
|
||||
#define __NR_close_range 546
|
||||
#elif defined _MIPS_SIM
|
||||
#if _MIPS_SIM == _MIPS_SIM_ABI32 /* o32 */
|
||||
#define __NR_close_range (436 + 4000)
|
||||
#endif
|
||||
#if _MIPS_SIM == _MIPS_SIM_NABI32 /* n32 */
|
||||
#define __NR_close_range (436 + 6000)
|
||||
#endif
|
||||
#if _MIPS_SIM == _MIPS_SIM_ABI64 /* n64 */
|
||||
#define __NR_close_range (436 + 5000)
|
||||
#endif
|
||||
#elif defined __ia64__
|
||||
#define __NR_close_range (436 + 1024)
|
||||
#else
|
||||
#define __NR_close_range 436
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef CLOSE_RANGE_UNSHARE
|
||||
#define CLOSE_RANGE_UNSHARE (1U << 1)
|
||||
#endif
|
||||
|
||||
#ifndef CLOSE_RANGE_CLOEXEC
|
||||
#define CLOSE_RANGE_CLOEXEC (1U << 2)
|
||||
#endif
|
||||
|
||||
static inline int sys_close_range(unsigned int fd, unsigned int max_fd,
|
||||
unsigned int flags)
|
||||
{
|
||||
|
0
tools/testing/selftests/damon/debugfs_attrs.sh
Normal file → Executable file
0
tools/testing/selftests/damon/debugfs_attrs.sh
Normal file → Executable file
0
tools/testing/selftests/damon/debugfs_duplicate_context_creation.sh
Normal file → Executable file
0
tools/testing/selftests/damon/debugfs_duplicate_context_creation.sh
Normal file → Executable file
0
tools/testing/selftests/damon/debugfs_empty_targets.sh
Normal file → Executable file
0
tools/testing/selftests/damon/debugfs_empty_targets.sh
Normal file → Executable file
0
tools/testing/selftests/damon/debugfs_huge_count_read_write.sh
Normal file → Executable file
0
tools/testing/selftests/damon/debugfs_huge_count_read_write.sh
Normal file → Executable file
0
tools/testing/selftests/damon/debugfs_rm_non_contexts.sh
Normal file → Executable file
0
tools/testing/selftests/damon/debugfs_rm_non_contexts.sh
Normal file → Executable file
0
tools/testing/selftests/damon/debugfs_schemes.sh
Normal file → Executable file
0
tools/testing/selftests/damon/debugfs_schemes.sh
Normal file → Executable file
0
tools/testing/selftests/damon/debugfs_target_ids.sh
Normal file → Executable file
0
tools/testing/selftests/damon/debugfs_target_ids.sh
Normal file → Executable file
0
tools/testing/selftests/damon/lru_sort.sh
Normal file → Executable file
0
tools/testing/selftests/damon/lru_sort.sh
Normal file → Executable file
0
tools/testing/selftests/damon/reclaim.sh
Normal file → Executable file
0
tools/testing/selftests/damon/reclaim.sh
Normal file → Executable file
0
tools/testing/selftests/damon/sysfs.sh
Normal file → Executable file
0
tools/testing/selftests/damon/sysfs.sh
Normal file → Executable file
0
tools/testing/selftests/damon/sysfs_update_removed_scheme_dir.sh
Normal file → Executable file
0
tools/testing/selftests/damon/sysfs_update_removed_scheme_dir.sh
Normal file → Executable file
1
tools/testing/selftests/dmabuf-heaps/.gitignore
vendored
Normal file
1
tools/testing/selftests/dmabuf-heaps/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
dmabuf-heap
|
@ -32,8 +32,10 @@ int main(int argc, char **argv)
|
||||
rc = read(fd, buf, sizeof(buf));
|
||||
if (rc != 0) {
|
||||
fprintf(stderr, "Reading a new var should return EOF\n");
|
||||
close(fd);
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
close(fd);
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
@ -23,6 +23,9 @@
|
||||
|
||||
#include "../kselftest.h"
|
||||
|
||||
#define TESTS_EXPECTED 51
|
||||
#define TEST_NAME_LEN (PATH_MAX * 4)
|
||||
|
||||
static char longpath[2 * PATH_MAX] = "";
|
||||
static char *envp[] = { "IN_TEST=yes", NULL, NULL };
|
||||
static char *argv[] = { "execveat", "99", NULL };
|
||||
@ -43,71 +46,85 @@ static int execveat_(int fd, const char *path, char **argv, char **envp,
|
||||
static int _check_execveat_fail(int fd, const char *path, int flags,
|
||||
int expected_errno, const char *errno_str)
|
||||
{
|
||||
char test_name[TEST_NAME_LEN];
|
||||
int rc;
|
||||
|
||||
errno = 0;
|
||||
printf("Check failure of execveat(%d, '%s', %d) with %s... ",
|
||||
fd, path?:"(null)", flags, errno_str);
|
||||
snprintf(test_name, sizeof(test_name),
|
||||
"Check failure of execveat(%d, '%s', %d) with %s",
|
||||
fd, path?:"(null)", flags, errno_str);
|
||||
rc = execveat_(fd, path, argv, envp, flags);
|
||||
|
||||
if (rc > 0) {
|
||||
printf("[FAIL] (unexpected success from execveat(2))\n");
|
||||
ksft_print_msg("unexpected success from execveat(2)\n");
|
||||
ksft_test_result_fail("%s\n", test_name);
|
||||
return 1;
|
||||
}
|
||||
if (errno != expected_errno) {
|
||||
printf("[FAIL] (expected errno %d (%s) not %d (%s)\n",
|
||||
expected_errno, strerror(expected_errno),
|
||||
errno, strerror(errno));
|
||||
ksft_print_msg("expected errno %d (%s) not %d (%s)\n",
|
||||
expected_errno, strerror(expected_errno),
|
||||
errno, strerror(errno));
|
||||
ksft_test_result_fail("%s\n", test_name);
|
||||
return 1;
|
||||
}
|
||||
printf("[OK]\n");
|
||||
ksft_test_result_pass("%s\n", test_name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_execveat_invoked_rc(int fd, const char *path, int flags,
|
||||
int expected_rc, int expected_rc2)
|
||||
{
|
||||
char test_name[TEST_NAME_LEN];
|
||||
int status;
|
||||
int rc;
|
||||
pid_t child;
|
||||
int pathlen = path ? strlen(path) : 0;
|
||||
|
||||
if (pathlen > 40)
|
||||
printf("Check success of execveat(%d, '%.20s...%s', %d)... ",
|
||||
fd, path, (path + pathlen - 20), flags);
|
||||
snprintf(test_name, sizeof(test_name),
|
||||
"Check success of execveat(%d, '%.20s...%s', %d)... ",
|
||||
fd, path, (path + pathlen - 20), flags);
|
||||
else
|
||||
printf("Check success of execveat(%d, '%s', %d)... ",
|
||||
fd, path?:"(null)", flags);
|
||||
snprintf(test_name, sizeof(test_name),
|
||||
"Check success of execveat(%d, '%s', %d)... ",
|
||||
fd, path?:"(null)", flags);
|
||||
|
||||
child = fork();
|
||||
if (child < 0) {
|
||||
printf("[FAIL] (fork() failed)\n");
|
||||
ksft_perror("fork() failed");
|
||||
ksft_test_result_fail("%s\n", test_name);
|
||||
return 1;
|
||||
}
|
||||
if (child == 0) {
|
||||
/* Child: do execveat(). */
|
||||
rc = execveat_(fd, path, argv, envp, flags);
|
||||
printf("[FAIL]: execveat() failed, rc=%d errno=%d (%s)\n",
|
||||
rc, errno, strerror(errno));
|
||||
ksft_print_msg("execveat() failed, rc=%d errno=%d (%s)\n",
|
||||
rc, errno, strerror(errno));
|
||||
ksft_test_result_fail("%s\n", test_name);
|
||||
exit(1); /* should not reach here */
|
||||
}
|
||||
/* Parent: wait for & check child's exit status. */
|
||||
rc = waitpid(child, &status, 0);
|
||||
if (rc != child) {
|
||||
printf("[FAIL] (waitpid(%d,...) returned %d)\n", child, rc);
|
||||
ksft_print_msg("waitpid(%d,...) returned %d\n", child, rc);
|
||||
ksft_test_result_fail("%s\n", test_name);
|
||||
return 1;
|
||||
}
|
||||
if (!WIFEXITED(status)) {
|
||||
printf("[FAIL] (child %d did not exit cleanly, status=%08x)\n",
|
||||
child, status);
|
||||
ksft_print_msg("child %d did not exit cleanly, status=%08x\n",
|
||||
child, status);
|
||||
ksft_test_result_fail("%s\n", test_name);
|
||||
return 1;
|
||||
}
|
||||
if ((WEXITSTATUS(status) != expected_rc) &&
|
||||
(WEXITSTATUS(status) != expected_rc2)) {
|
||||
printf("[FAIL] (child %d exited with %d not %d nor %d)\n",
|
||||
child, WEXITSTATUS(status), expected_rc, expected_rc2);
|
||||
ksft_print_msg("child %d exited with %d not %d nor %d\n",
|
||||
child, WEXITSTATUS(status), expected_rc,
|
||||
expected_rc2);
|
||||
ksft_test_result_fail("%s\n", test_name);
|
||||
return 1;
|
||||
}
|
||||
printf("[OK]\n");
|
||||
ksft_test_result_pass("%s\n", test_name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -129,11 +146,9 @@ static int open_or_die(const char *filename, int flags)
|
||||
{
|
||||
int fd = open(filename, flags);
|
||||
|
||||
if (fd < 0) {
|
||||
printf("Failed to open '%s'; "
|
||||
if (fd < 0)
|
||||
ksft_exit_fail_msg("Failed to open '%s'; "
|
||||
"check prerequisites are available\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
return fd;
|
||||
}
|
||||
|
||||
@ -162,8 +177,7 @@ static int check_execveat_pathmax(int root_dfd, const char *src, int is_script)
|
||||
char *cwd = getcwd(NULL, 0);
|
||||
|
||||
if (!cwd) {
|
||||
printf("Failed to getcwd(), errno=%d (%s)\n",
|
||||
errno, strerror(errno));
|
||||
ksft_perror("Failed to getcwd()");
|
||||
return 2;
|
||||
}
|
||||
strcpy(longpath, cwd);
|
||||
@ -193,12 +207,12 @@ static int check_execveat_pathmax(int root_dfd, const char *src, int is_script)
|
||||
*/
|
||||
fd = open(longpath, O_RDONLY);
|
||||
if (fd > 0) {
|
||||
printf("Invoke copy of '%s' via filename of length %zu:\n",
|
||||
src, strlen(longpath));
|
||||
ksft_print_msg("Invoke copy of '%s' via filename of length %zu:\n",
|
||||
src, strlen(longpath));
|
||||
fail += check_execveat(fd, "", AT_EMPTY_PATH);
|
||||
} else {
|
||||
printf("Failed to open length %zu filename, errno=%d (%s)\n",
|
||||
strlen(longpath), errno, strerror(errno));
|
||||
ksft_print_msg("Failed to open length %zu filename, errno=%d (%s)\n",
|
||||
strlen(longpath), errno, strerror(errno));
|
||||
fail++;
|
||||
}
|
||||
|
||||
@ -405,28 +419,31 @@ int main(int argc, char **argv)
|
||||
const char *in_test = getenv("IN_TEST");
|
||||
|
||||
if (verbose) {
|
||||
printf(" invoked with:");
|
||||
ksft_print_msg("invoked with:\n");
|
||||
for (ii = 0; ii < argc; ii++)
|
||||
printf(" [%d]='%s'", ii, argv[ii]);
|
||||
printf("\n");
|
||||
ksft_print_msg("\t[%d]='%s\n'", ii, argv[ii]);
|
||||
}
|
||||
|
||||
/* Check expected environment transferred. */
|
||||
if (!in_test || strcmp(in_test, "yes") != 0) {
|
||||
printf("[FAIL] (no IN_TEST=yes in env)\n");
|
||||
ksft_print_msg("no IN_TEST=yes in env\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Use the final argument as an exit code. */
|
||||
rc = atoi(argv[argc - 1]);
|
||||
fflush(stdout);
|
||||
exit(rc);
|
||||
} else {
|
||||
ksft_print_header();
|
||||
ksft_set_plan(TESTS_EXPECTED);
|
||||
prerequisites();
|
||||
if (verbose)
|
||||
envp[1] = "VERBOSE=1";
|
||||
rc = run_tests();
|
||||
if (rc > 0)
|
||||
printf("%d tests failed\n", rc);
|
||||
ksft_finished();
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -17,10 +17,6 @@
|
||||
#include <sys/wait.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#ifndef CLONE_NEWNS
|
||||
# define CLONE_NEWNS 0x00020000
|
||||
#endif
|
||||
|
||||
static char *fw_path = NULL;
|
||||
|
||||
static void die(char *fmt, ...)
|
||||
|
@ -28,6 +28,12 @@ s390*)
|
||||
mips*)
|
||||
ARG1=%r4
|
||||
;;
|
||||
loongarch*)
|
||||
ARG1=%r4
|
||||
;;
|
||||
riscv*)
|
||||
ARG1=%a0
|
||||
;;
|
||||
*)
|
||||
echo "Please implement other architecture here"
|
||||
exit_untested
|
||||
|
@ -31,6 +31,9 @@ mips*)
|
||||
loongarch*)
|
||||
ARG1=%r4
|
||||
;;
|
||||
riscv*)
|
||||
ARG1=%a0
|
||||
;;
|
||||
*)
|
||||
echo "Please implement other architecture here"
|
||||
exit_untested
|
||||
|
@ -44,6 +44,10 @@ loongarch*)
|
||||
GOODREG=%r4
|
||||
BADREG=%r12
|
||||
;;
|
||||
riscv*)
|
||||
GOODREG=%a0
|
||||
BADREG=%a8
|
||||
;;
|
||||
*)
|
||||
echo "Please implement other architecture here"
|
||||
exit_untested
|
||||
|
@ -48,6 +48,7 @@
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <stdarg.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#endif
|
||||
|
||||
@ -77,6 +78,8 @@
|
||||
#define KSFT_XPASS 3
|
||||
#define KSFT_SKIP 4
|
||||
|
||||
#define __printf(a, b) __attribute__((format(printf, a, b)))
|
||||
|
||||
/* counters */
|
||||
struct ksft_count {
|
||||
unsigned int ksft_pass;
|
||||
@ -129,7 +132,7 @@ static inline void ksft_print_header(void)
|
||||
static inline void ksft_set_plan(unsigned int plan)
|
||||
{
|
||||
ksft_plan = plan;
|
||||
printf("1..%d\n", ksft_plan);
|
||||
printf("1..%u\n", ksft_plan);
|
||||
}
|
||||
|
||||
static inline void ksft_print_cnts(void)
|
||||
@ -137,13 +140,13 @@ static inline void ksft_print_cnts(void)
|
||||
if (ksft_plan != ksft_test_num())
|
||||
printf("# Planned tests != run tests (%u != %u)\n",
|
||||
ksft_plan, ksft_test_num());
|
||||
printf("# Totals: pass:%d fail:%d xfail:%d xpass:%d skip:%d error:%d\n",
|
||||
printf("# Totals: pass:%u fail:%u xfail:%u xpass:%u skip:%u error:%u\n",
|
||||
ksft_cnt.ksft_pass, ksft_cnt.ksft_fail,
|
||||
ksft_cnt.ksft_xfail, ksft_cnt.ksft_xpass,
|
||||
ksft_cnt.ksft_xskip, ksft_cnt.ksft_error);
|
||||
}
|
||||
|
||||
static inline void ksft_print_msg(const char *msg, ...)
|
||||
static inline __printf(1, 2) void ksft_print_msg(const char *msg, ...)
|
||||
{
|
||||
int saved_errno = errno;
|
||||
va_list args;
|
||||
@ -155,7 +158,20 @@ static inline void ksft_print_msg(const char *msg, ...)
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
static inline void ksft_test_result_pass(const char *msg, ...)
|
||||
static inline void ksft_perror(const char *msg)
|
||||
{
|
||||
#ifndef NOLIBC
|
||||
ksft_print_msg("%s: %s (%d)\n", msg, strerror(errno), errno);
|
||||
#else
|
||||
/*
|
||||
* nolibc doesn't provide strerror() and it seems
|
||||
* inappropriate to add one, just print the errno.
|
||||
*/
|
||||
ksft_print_msg("%s: %d)\n", msg, errno);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline __printf(1, 2) void ksft_test_result_pass(const char *msg, ...)
|
||||
{
|
||||
int saved_errno = errno;
|
||||
va_list args;
|
||||
@ -163,13 +179,13 @@ static inline void ksft_test_result_pass(const char *msg, ...)
|
||||
ksft_cnt.ksft_pass++;
|
||||
|
||||
va_start(args, msg);
|
||||
printf("ok %d ", ksft_test_num());
|
||||
printf("ok %u ", ksft_test_num());
|
||||
errno = saved_errno;
|
||||
vprintf(msg, args);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
static inline void ksft_test_result_fail(const char *msg, ...)
|
||||
static inline __printf(1, 2) void ksft_test_result_fail(const char *msg, ...)
|
||||
{
|
||||
int saved_errno = errno;
|
||||
va_list args;
|
||||
@ -177,7 +193,7 @@ static inline void ksft_test_result_fail(const char *msg, ...)
|
||||
ksft_cnt.ksft_fail++;
|
||||
|
||||
va_start(args, msg);
|
||||
printf("not ok %d ", ksft_test_num());
|
||||
printf("not ok %u ", ksft_test_num());
|
||||
errno = saved_errno;
|
||||
vprintf(msg, args);
|
||||
va_end(args);
|
||||
@ -195,7 +211,7 @@ static inline void ksft_test_result_fail(const char *msg, ...)
|
||||
ksft_test_result_fail(fmt, ##__VA_ARGS__);\
|
||||
} while (0)
|
||||
|
||||
static inline void ksft_test_result_xfail(const char *msg, ...)
|
||||
static inline __printf(1, 2) void ksft_test_result_xfail(const char *msg, ...)
|
||||
{
|
||||
int saved_errno = errno;
|
||||
va_list args;
|
||||
@ -203,13 +219,13 @@ static inline void ksft_test_result_xfail(const char *msg, ...)
|
||||
ksft_cnt.ksft_xfail++;
|
||||
|
||||
va_start(args, msg);
|
||||
printf("ok %d # XFAIL ", ksft_test_num());
|
||||
printf("ok %u # XFAIL ", ksft_test_num());
|
||||
errno = saved_errno;
|
||||
vprintf(msg, args);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
static inline void ksft_test_result_skip(const char *msg, ...)
|
||||
static inline __printf(1, 2) void ksft_test_result_skip(const char *msg, ...)
|
||||
{
|
||||
int saved_errno = errno;
|
||||
va_list args;
|
||||
@ -217,14 +233,14 @@ static inline void ksft_test_result_skip(const char *msg, ...)
|
||||
ksft_cnt.ksft_xskip++;
|
||||
|
||||
va_start(args, msg);
|
||||
printf("ok %d # SKIP ", ksft_test_num());
|
||||
printf("ok %u # SKIP ", ksft_test_num());
|
||||
errno = saved_errno;
|
||||
vprintf(msg, args);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
/* TODO: how does "error" differ from "fail" or "skip"? */
|
||||
static inline void ksft_test_result_error(const char *msg, ...)
|
||||
static inline __printf(1, 2) void ksft_test_result_error(const char *msg, ...)
|
||||
{
|
||||
int saved_errno = errno;
|
||||
va_list args;
|
||||
@ -232,7 +248,7 @@ static inline void ksft_test_result_error(const char *msg, ...)
|
||||
ksft_cnt.ksft_error++;
|
||||
|
||||
va_start(args, msg);
|
||||
printf("not ok %d # error ", ksft_test_num());
|
||||
printf("not ok %u # error ", ksft_test_num());
|
||||
errno = saved_errno;
|
||||
vprintf(msg, args);
|
||||
va_end(args);
|
||||
@ -271,7 +287,7 @@ static inline int ksft_exit_fail(void)
|
||||
ksft_cnt.ksft_xfail + \
|
||||
ksft_cnt.ksft_xskip)
|
||||
|
||||
static inline int ksft_exit_fail_msg(const char *msg, ...)
|
||||
static inline __printf(1, 2) int ksft_exit_fail_msg(const char *msg, ...)
|
||||
{
|
||||
int saved_errno = errno;
|
||||
va_list args;
|
||||
@ -298,7 +314,7 @@ static inline int ksft_exit_xpass(void)
|
||||
exit(KSFT_XPASS);
|
||||
}
|
||||
|
||||
static inline int ksft_exit_skip(const char *msg, ...)
|
||||
static inline __printf(1, 2) int ksft_exit_skip(const char *msg, ...)
|
||||
{
|
||||
int saved_errno = errno;
|
||||
va_list args;
|
||||
|
@ -33,7 +33,7 @@ static inline int _no_printf(const char *format, ...) { return 0; }
|
||||
#define pr_info(...) _no_printf(__VA_ARGS__)
|
||||
#endif
|
||||
|
||||
void print_skip(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
|
||||
void __printf(1, 2) print_skip(const char *fmt, ...);
|
||||
#define __TEST_REQUIRE(f, fmt, ...) \
|
||||
do { \
|
||||
if (!(f)) \
|
||||
@ -46,9 +46,9 @@ ssize_t test_write(int fd, const void *buf, size_t count);
|
||||
ssize_t test_read(int fd, void *buf, size_t count);
|
||||
int test_seq_read(const char *path, char **bufp, size_t *sizep);
|
||||
|
||||
void test_assert(bool exp, const char *exp_str,
|
||||
const char *file, unsigned int line, const char *fmt, ...)
|
||||
__attribute__((format(printf, 5, 6)));
|
||||
void __printf(5, 6) test_assert(bool exp, const char *exp_str,
|
||||
const char *file, unsigned int line,
|
||||
const char *fmt, ...);
|
||||
|
||||
#define TEST_ASSERT(e, fmt, ...) \
|
||||
test_assert((e), #e, __FILE__, __LINE__, fmt, ##__VA_ARGS__)
|
||||
|
@ -338,7 +338,7 @@ static long long remap_region(struct config c, unsigned int threshold_mb,
|
||||
char c = (char) rand();
|
||||
|
||||
if (((char *) dest_addr)[i] != c) {
|
||||
ksft_print_msg("Data after remap doesn't match at offset %d\n",
|
||||
ksft_print_msg("Data after remap doesn't match at offset %llu\n",
|
||||
i);
|
||||
ksft_print_msg("Expected: %#x\t Got: %#x\n", c & 0xff,
|
||||
((char *) dest_addr)[i] & 0xff);
|
||||
|
@ -34,7 +34,7 @@ extern int test_nr;
|
||||
extern int iteration_nr;
|
||||
|
||||
#ifdef __GNUC__
|
||||
__attribute__((format(printf, 1, 2)))
|
||||
__printf(1, 2)
|
||||
#endif
|
||||
static inline void sigsafe_printf(const char *format, ...)
|
||||
{
|
||||
|
@ -300,7 +300,7 @@ void test_openat2_flags(void)
|
||||
|
||||
ksft_print_msg("openat2 unexpectedly returned ");
|
||||
if (fdpath)
|
||||
ksft_print_msg("%d['%s'] with %X (!= %X)\n",
|
||||
ksft_print_msg("%d['%s'] with %X (!= %llX)\n",
|
||||
fd, fdpath, fdflags,
|
||||
test->how.flags);
|
||||
else
|
||||
|
@ -62,7 +62,7 @@ static void error_report(struct error *err, const char *test_name)
|
||||
break;
|
||||
|
||||
case PIDFD_PASS:
|
||||
ksft_test_result_pass("%s test: Passed\n");
|
||||
ksft_test_result_pass("%s test: Passed\n", test_name);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -381,13 +381,13 @@ static int test_pidfd_send_signal_syscall_support(void)
|
||||
|
||||
static void *test_pidfd_poll_exec_thread(void *priv)
|
||||
{
|
||||
ksft_print_msg("Child Thread: starting. pid %d tid %d ; and sleeping\n",
|
||||
ksft_print_msg("Child Thread: starting. pid %d tid %ld ; and sleeping\n",
|
||||
getpid(), syscall(SYS_gettid));
|
||||
ksft_print_msg("Child Thread: doing exec of sleep\n");
|
||||
|
||||
execl("/bin/sleep", "sleep", str(CHILD_THREAD_MIN_WAIT), (char *)NULL);
|
||||
|
||||
ksft_print_msg("Child Thread: DONE. pid %d tid %d\n",
|
||||
ksft_print_msg("Child Thread: DONE. pid %d tid %ld\n",
|
||||
getpid(), syscall(SYS_gettid));
|
||||
return NULL;
|
||||
}
|
||||
@ -427,7 +427,7 @@ static int child_poll_exec_test(void *args)
|
||||
{
|
||||
pthread_t t1;
|
||||
|
||||
ksft_print_msg("Child (pidfd): starting. pid %d tid %d\n", getpid(),
|
||||
ksft_print_msg("Child (pidfd): starting. pid %d tid %ld\n", getpid(),
|
||||
syscall(SYS_gettid));
|
||||
pthread_create(&t1, NULL, test_pidfd_poll_exec_thread, NULL);
|
||||
/*
|
||||
@ -480,10 +480,10 @@ static void test_pidfd_poll_exec(int use_waitpid)
|
||||
|
||||
static void *test_pidfd_poll_leader_exit_thread(void *priv)
|
||||
{
|
||||
ksft_print_msg("Child Thread: starting. pid %d tid %d ; and sleeping\n",
|
||||
ksft_print_msg("Child Thread: starting. pid %d tid %ld ; and sleeping\n",
|
||||
getpid(), syscall(SYS_gettid));
|
||||
sleep(CHILD_THREAD_MIN_WAIT);
|
||||
ksft_print_msg("Child Thread: DONE. pid %d tid %d\n", getpid(), syscall(SYS_gettid));
|
||||
ksft_print_msg("Child Thread: DONE. pid %d tid %ld\n", getpid(), syscall(SYS_gettid));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -492,7 +492,7 @@ static int child_poll_leader_exit_test(void *args)
|
||||
{
|
||||
pthread_t t1, t2;
|
||||
|
||||
ksft_print_msg("Child: starting. pid %d tid %d\n", getpid(), syscall(SYS_gettid));
|
||||
ksft_print_msg("Child: starting. pid %d tid %ld\n", getpid(), syscall(SYS_gettid));
|
||||
pthread_create(&t1, NULL, test_pidfd_poll_leader_exit_thread, NULL);
|
||||
pthread_create(&t2, NULL, test_pidfd_poll_leader_exit_thread, NULL);
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2
|
||||
CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2 -D_GNU_SOURCE
|
||||
CFLAGS += $(KHDR_INCLUDES)
|
||||
|
||||
TEST_GEN_PROGS := resctrl_tests
|
||||
|
@ -205,10 +205,11 @@ int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
|
||||
* cache_val: execute benchmark and measure LLC occupancy resctrl
|
||||
* and perf cache miss for the benchmark
|
||||
* @param: parameters passed to cache_val()
|
||||
* @span: buffer size for the benchmark
|
||||
*
|
||||
* Return: 0 on success. non-zero on failure.
|
||||
*/
|
||||
int cat_val(struct resctrl_val_param *param)
|
||||
int cat_val(struct resctrl_val_param *param, size_t span)
|
||||
{
|
||||
int memflush = 1, operation = 0, ret = 0;
|
||||
char *resctrl_val = param->resctrl_val;
|
||||
@ -245,7 +246,7 @@ int cat_val(struct resctrl_val_param *param)
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
if (run_fill_buf(param->span, memflush, operation, true)) {
|
||||
if (run_fill_buf(span, memflush, operation, true)) {
|
||||
fprintf(stderr, "Error-running fill buffer\n");
|
||||
ret = -1;
|
||||
goto pe_close;
|
||||
@ -294,7 +295,7 @@ int show_cache_info(unsigned long sum_llc_val, int no_of_bits,
|
||||
ret = platform && abs((int)diff_percent) > max_diff_percent &&
|
||||
(cmt ? (abs(avg_diff) > max_diff) : true);
|
||||
|
||||
ksft_print_msg("%s Check cache miss rate within %d%%\n",
|
||||
ksft_print_msg("%s Check cache miss rate within %lu%%\n",
|
||||
ret ? "Fail:" : "Pass:", max_diff_percent);
|
||||
|
||||
ksft_print_msg("Percent diff=%d\n", abs((int)diff_percent));
|
||||
|
@ -41,7 +41,7 @@ static int cat_setup(struct resctrl_val_param *p)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int check_results(struct resctrl_val_param *param)
|
||||
static int check_results(struct resctrl_val_param *param, size_t span)
|
||||
{
|
||||
char *token_array[8], temp[512];
|
||||
unsigned long sum_llc_perf_miss = 0;
|
||||
@ -76,7 +76,7 @@ static int check_results(struct resctrl_val_param *param)
|
||||
fclose(fp);
|
||||
no_of_bits = count_bits(param->mask);
|
||||
|
||||
return show_cache_info(sum_llc_perf_miss, no_of_bits, param->span / 64,
|
||||
return show_cache_info(sum_llc_perf_miss, no_of_bits, span / 64,
|
||||
MAX_DIFF, MAX_DIFF_PERCENT, runs - 1,
|
||||
get_vendor() == ARCH_INTEL, false);
|
||||
}
|
||||
@ -96,6 +96,7 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
|
||||
char cbm_mask[256];
|
||||
int count_of_bits;
|
||||
char pipe_message;
|
||||
size_t span;
|
||||
|
||||
/* Get default cbm mask for L3/L2 cache */
|
||||
ret = get_cbm_mask(cache_type, cbm_mask);
|
||||
@ -140,7 +141,7 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
|
||||
/* Set param values for parent thread which will be allocated bitmask
|
||||
* with (max_bits - n) bits
|
||||
*/
|
||||
param.span = cache_size * (count_of_bits - n) / count_of_bits;
|
||||
span = cache_size * (count_of_bits - n) / count_of_bits;
|
||||
strcpy(param.ctrlgrp, "c2");
|
||||
strcpy(param.mongrp, "m2");
|
||||
strcpy(param.filename, RESULT_FILE_NAME2);
|
||||
@ -162,23 +163,17 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
|
||||
param.mask = l_mask_1;
|
||||
strcpy(param.ctrlgrp, "c1");
|
||||
strcpy(param.mongrp, "m1");
|
||||
param.span = cache_size * n / count_of_bits;
|
||||
span = cache_size * n / count_of_bits;
|
||||
strcpy(param.filename, RESULT_FILE_NAME1);
|
||||
param.num_of_runs = 0;
|
||||
param.cpu_no = sibling_cpu_no;
|
||||
} else {
|
||||
ret = signal_handler_register();
|
||||
if (ret) {
|
||||
kill(bm_pid, SIGKILL);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
remove(param.filename);
|
||||
|
||||
ret = cat_val(¶m);
|
||||
ret = cat_val(¶m, span);
|
||||
if (ret == 0)
|
||||
ret = check_results(¶m);
|
||||
ret = check_results(¶m, span);
|
||||
|
||||
if (bm_pid == 0) {
|
||||
/* Tell parent that child is ready */
|
||||
@ -208,10 +203,8 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
|
||||
}
|
||||
close(pipefd[0]);
|
||||
kill(bm_pid, SIGKILL);
|
||||
signal_handler_unregister();
|
||||
}
|
||||
|
||||
out:
|
||||
cat_test_cleanup();
|
||||
|
||||
return ret;
|
||||
|
@ -27,7 +27,7 @@ static int cmt_setup(struct resctrl_val_param *p)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_results(struct resctrl_val_param *param, int no_of_bits)
|
||||
static int check_results(struct resctrl_val_param *param, size_t span, int no_of_bits)
|
||||
{
|
||||
char *token_array[8], temp[512];
|
||||
unsigned long sum_llc_occu_resc = 0;
|
||||
@ -58,7 +58,7 @@ static int check_results(struct resctrl_val_param *param, int no_of_bits)
|
||||
}
|
||||
fclose(fp);
|
||||
|
||||
return show_cache_info(sum_llc_occu_resc, no_of_bits, param->span,
|
||||
return show_cache_info(sum_llc_occu_resc, no_of_bits, span,
|
||||
MAX_DIFF, MAX_DIFF_PERCENT, runs - 1,
|
||||
true, true);
|
||||
}
|
||||
@ -68,16 +68,17 @@ void cmt_test_cleanup(void)
|
||||
remove(RESULT_FILE_NAME);
|
||||
}
|
||||
|
||||
int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
|
||||
int cmt_resctrl_val(int cpu_no, int n, const char * const *benchmark_cmd)
|
||||
{
|
||||
const char * const *cmd = benchmark_cmd;
|
||||
const char *new_cmd[BENCHMARK_ARGS];
|
||||
unsigned long cache_size = 0;
|
||||
unsigned long long_mask;
|
||||
char *span_str = NULL;
|
||||
char cbm_mask[256];
|
||||
int count_of_bits;
|
||||
int ret;
|
||||
|
||||
if (!validate_resctrl_feature_request(CMT_STR))
|
||||
return -1;
|
||||
size_t span;
|
||||
int ret, i;
|
||||
|
||||
ret = get_cbm_mask("L3", cbm_mask);
|
||||
if (ret)
|
||||
@ -105,24 +106,36 @@ int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
|
||||
.cpu_no = cpu_no,
|
||||
.filename = RESULT_FILE_NAME,
|
||||
.mask = ~(long_mask << n) & long_mask,
|
||||
.span = cache_size * n / count_of_bits,
|
||||
.num_of_runs = 0,
|
||||
.setup = cmt_setup,
|
||||
};
|
||||
|
||||
if (strcmp(benchmark_cmd[0], "fill_buf") == 0)
|
||||
sprintf(benchmark_cmd[1], "%zu", param.span);
|
||||
span = cache_size * n / count_of_bits;
|
||||
|
||||
if (strcmp(cmd[0], "fill_buf") == 0) {
|
||||
/* Duplicate the command to be able to replace span in it */
|
||||
for (i = 0; benchmark_cmd[i]; i++)
|
||||
new_cmd[i] = benchmark_cmd[i];
|
||||
new_cmd[i] = NULL;
|
||||
|
||||
ret = asprintf(&span_str, "%zu", span);
|
||||
if (ret < 0)
|
||||
return -1;
|
||||
new_cmd[1] = span_str;
|
||||
cmd = new_cmd;
|
||||
}
|
||||
|
||||
remove(RESULT_FILE_NAME);
|
||||
|
||||
ret = resctrl_val(benchmark_cmd, ¶m);
|
||||
ret = resctrl_val(cmd, ¶m);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = check_results(¶m, n);
|
||||
ret = check_results(¶m, span, n);
|
||||
|
||||
out:
|
||||
cmt_test_cleanup();
|
||||
free(span_str);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -12,7 +12,7 @@
|
||||
|
||||
#define RESULT_FILE_NAME "result_mba"
|
||||
#define NUM_OF_RUNS 5
|
||||
#define MAX_DIFF_PERCENT 5
|
||||
#define MAX_DIFF_PERCENT 8
|
||||
#define ALLOCATION_MAX 100
|
||||
#define ALLOCATION_MIN 10
|
||||
#define ALLOCATION_STEP 10
|
||||
@ -141,7 +141,7 @@ void mba_test_cleanup(void)
|
||||
remove(RESULT_FILE_NAME);
|
||||
}
|
||||
|
||||
int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd)
|
||||
int mba_schemata_change(int cpu_no, const char * const *benchmark_cmd)
|
||||
{
|
||||
struct resctrl_val_param param = {
|
||||
.resctrl_val = MBA_STR,
|
||||
@ -149,7 +149,7 @@ int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd)
|
||||
.mongrp = "m1",
|
||||
.cpu_no = cpu_no,
|
||||
.filename = RESULT_FILE_NAME,
|
||||
.bw_report = bw_report,
|
||||
.bw_report = "reads",
|
||||
.setup = mba_setup
|
||||
};
|
||||
int ret;
|
||||
|
@ -11,7 +11,7 @@
|
||||
#include "resctrl.h"
|
||||
|
||||
#define RESULT_FILE_NAME "result_mbm"
|
||||
#define MAX_DIFF_PERCENT 5
|
||||
#define MAX_DIFF_PERCENT 8
|
||||
#define NUM_OF_RUNS 5
|
||||
|
||||
static int
|
||||
@ -95,7 +95,7 @@ static int mbm_setup(struct resctrl_val_param *p)
|
||||
return END_OF_TESTS;
|
||||
|
||||
/* Set up shemata with 100% allocation on the first run. */
|
||||
if (p->num_of_runs == 0)
|
||||
if (p->num_of_runs == 0 && validate_resctrl_feature_request("MB", NULL))
|
||||
ret = write_schemata(p->ctrlgrp, "100", p->cpu_no,
|
||||
p->resctrl_val);
|
||||
|
||||
@ -109,16 +109,15 @@ void mbm_test_cleanup(void)
|
||||
remove(RESULT_FILE_NAME);
|
||||
}
|
||||
|
||||
int mbm_bw_change(size_t span, int cpu_no, char *bw_report, char **benchmark_cmd)
|
||||
int mbm_bw_change(int cpu_no, const char * const *benchmark_cmd)
|
||||
{
|
||||
struct resctrl_val_param param = {
|
||||
.resctrl_val = MBM_STR,
|
||||
.ctrlgrp = "c1",
|
||||
.mongrp = "m1",
|
||||
.span = span,
|
||||
.cpu_no = cpu_no,
|
||||
.filename = RESULT_FILE_NAME,
|
||||
.bw_report = bw_report,
|
||||
.bw_report = "reads",
|
||||
.setup = mbm_setup
|
||||
};
|
||||
int ret;
|
||||
@ -129,7 +128,7 @@ int mbm_bw_change(size_t span, int cpu_no, char *bw_report, char **benchmark_cmd
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = check_results(span);
|
||||
ret = check_results(DEFAULT_SPAN);
|
||||
|
||||
out:
|
||||
mbm_test_cleanup();
|
||||
|
@ -1,5 +1,4 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#define _GNU_SOURCE
|
||||
#ifndef RESCTRL_H
|
||||
#define RESCTRL_H
|
||||
#include <stdio.h>
|
||||
@ -28,16 +27,16 @@
|
||||
#define RESCTRL_PATH "/sys/fs/resctrl"
|
||||
#define PHYS_ID_PATH "/sys/devices/system/cpu/cpu"
|
||||
#define INFO_PATH "/sys/fs/resctrl/info"
|
||||
#define L3_PATH "/sys/fs/resctrl/info/L3"
|
||||
#define MB_PATH "/sys/fs/resctrl/info/MB"
|
||||
#define L3_MON_PATH "/sys/fs/resctrl/info/L3_MON"
|
||||
#define L3_MON_FEATURES_PATH "/sys/fs/resctrl/info/L3_MON/mon_features"
|
||||
|
||||
#define ARCH_INTEL 1
|
||||
#define ARCH_AMD 2
|
||||
|
||||
#define END_OF_TESTS 1
|
||||
|
||||
#define BENCHMARK_ARGS 64
|
||||
|
||||
#define DEFAULT_SPAN (250 * MB)
|
||||
|
||||
#define PARENT_EXIT(err_msg) \
|
||||
do { \
|
||||
perror(err_msg); \
|
||||
@ -52,7 +51,6 @@
|
||||
* @ctrlgrp: Name of the control monitor group (con_mon grp)
|
||||
* @mongrp: Name of the monitor group (mon grp)
|
||||
* @cpu_no: CPU number to which the benchmark would be binded
|
||||
* @span: Memory bytes accessed in each benchmark iteration
|
||||
* @filename: Name of file to which the o/p should be written
|
||||
* @bw_report: Bandwidth report type (reads vs writes)
|
||||
* @setup: Call back function to setup test environment
|
||||
@ -62,7 +60,6 @@ struct resctrl_val_param {
|
||||
char ctrlgrp[64];
|
||||
char mongrp[64];
|
||||
int cpu_no;
|
||||
size_t span;
|
||||
char filename[64];
|
||||
char *bw_report;
|
||||
unsigned long mask;
|
||||
@ -86,10 +83,9 @@ int get_resource_id(int cpu_no, int *resource_id);
|
||||
int mount_resctrlfs(void);
|
||||
int umount_resctrlfs(void);
|
||||
int validate_bw_report_request(char *bw_report);
|
||||
bool validate_resctrl_feature_request(const char *resctrl_val);
|
||||
bool validate_resctrl_feature_request(const char *resource, const char *feature);
|
||||
char *fgrep(FILE *inf, const char *str);
|
||||
int taskset_benchmark(pid_t bm_pid, int cpu_no);
|
||||
void run_benchmark(int signum, siginfo_t *info, void *ucontext);
|
||||
int write_schemata(char *ctrlgrp, char *schemata, int cpu_no,
|
||||
char *resctrl_val);
|
||||
int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
|
||||
@ -97,21 +93,21 @@ int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
|
||||
int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu,
|
||||
int group_fd, unsigned long flags);
|
||||
int run_fill_buf(size_t span, int memflush, int op, bool once);
|
||||
int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param);
|
||||
int mbm_bw_change(size_t span, int cpu_no, char *bw_report, char **benchmark_cmd);
|
||||
int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *param);
|
||||
int mbm_bw_change(int cpu_no, const char * const *benchmark_cmd);
|
||||
void tests_cleanup(void);
|
||||
void mbm_test_cleanup(void);
|
||||
int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd);
|
||||
int mba_schemata_change(int cpu_no, const char * const *benchmark_cmd);
|
||||
void mba_test_cleanup(void);
|
||||
int get_cbm_mask(char *cache_type, char *cbm_mask);
|
||||
int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size);
|
||||
void ctrlc_handler(int signum, siginfo_t *info, void *ptr);
|
||||
int signal_handler_register(void);
|
||||
void signal_handler_unregister(void);
|
||||
int cat_val(struct resctrl_val_param *param);
|
||||
int cat_val(struct resctrl_val_param *param, size_t span);
|
||||
void cat_test_cleanup(void);
|
||||
int cat_perf_miss_val(int cpu_no, int no_of_bits, char *cache_type);
|
||||
int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd);
|
||||
int cmt_resctrl_val(int cpu_no, int n, const char * const *benchmark_cmd);
|
||||
unsigned int count_bits(unsigned long n);
|
||||
void cmt_test_cleanup(void);
|
||||
int get_core_sibling(int cpu_no);
|
||||
|
@ -10,9 +10,6 @@
|
||||
*/
|
||||
#include "resctrl.h"
|
||||
|
||||
#define BENCHMARK_ARGS 64
|
||||
#define BENCHMARK_ARG_SIZE 64
|
||||
|
||||
static int detect_vendor(void)
|
||||
{
|
||||
FILE *inf = fopen("/proc/cpuinfo", "r");
|
||||
@ -52,8 +49,8 @@ int get_vendor(void)
|
||||
|
||||
static void cmd_help(void)
|
||||
{
|
||||
printf("usage: resctrl_tests [-h] [-b \"benchmark_cmd [options]\"] [-t test list] [-n no_of_bits]\n");
|
||||
printf("\t-b benchmark_cmd [options]: run specified benchmark for MBM, MBA and CMT\n");
|
||||
printf("usage: resctrl_tests [-h] [-t test list] [-n no_of_bits] [-b benchmark_cmd [option]...]\n");
|
||||
printf("\t-b benchmark_cmd [option]...: run specified benchmark for MBM, MBA and CMT\n");
|
||||
printf("\t default benchmark is builtin fill_buf\n");
|
||||
printf("\t-t test list: run tests specified in the test list, ");
|
||||
printf("e.g. -t mbm,mba,cmt,cat\n");
|
||||
@ -70,72 +67,98 @@ void tests_cleanup(void)
|
||||
cat_test_cleanup();
|
||||
}
|
||||
|
||||
static void run_mbm_test(char **benchmark_cmd, size_t span,
|
||||
int cpu_no, char *bw_report)
|
||||
static int test_prepare(void)
|
||||
{
|
||||
int res;
|
||||
|
||||
res = signal_handler_register();
|
||||
if (res) {
|
||||
ksft_print_msg("Failed to register signal handler\n");
|
||||
return res;
|
||||
}
|
||||
|
||||
res = mount_resctrlfs();
|
||||
if (res) {
|
||||
signal_handler_unregister();
|
||||
ksft_print_msg("Failed to mount resctrl FS\n");
|
||||
return res;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void test_cleanup(void)
|
||||
{
|
||||
umount_resctrlfs();
|
||||
signal_handler_unregister();
|
||||
}
|
||||
|
||||
static void run_mbm_test(const char * const *benchmark_cmd, int cpu_no)
|
||||
{
|
||||
int res;
|
||||
|
||||
ksft_print_msg("Starting MBM BW change ...\n");
|
||||
|
||||
res = mount_resctrlfs();
|
||||
if (res) {
|
||||
ksft_exit_fail_msg("Failed to mount resctrl FS\n");
|
||||
if (test_prepare()) {
|
||||
ksft_exit_fail_msg("Abnormal failure when preparing for the test\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!validate_resctrl_feature_request(MBM_STR) || (get_vendor() != ARCH_INTEL)) {
|
||||
if (!validate_resctrl_feature_request("L3_MON", "mbm_total_bytes") ||
|
||||
!validate_resctrl_feature_request("L3_MON", "mbm_local_bytes") ||
|
||||
(get_vendor() != ARCH_INTEL)) {
|
||||
ksft_test_result_skip("Hardware does not support MBM or MBM is disabled\n");
|
||||
goto umount;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
res = mbm_bw_change(span, cpu_no, bw_report, benchmark_cmd);
|
||||
res = mbm_bw_change(cpu_no, benchmark_cmd);
|
||||
ksft_test_result(!res, "MBM: bw change\n");
|
||||
if ((get_vendor() == ARCH_INTEL) && res)
|
||||
ksft_print_msg("Intel MBM may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n");
|
||||
|
||||
umount:
|
||||
umount_resctrlfs();
|
||||
cleanup:
|
||||
test_cleanup();
|
||||
}
|
||||
|
||||
static void run_mba_test(char **benchmark_cmd, int cpu_no, char *bw_report)
|
||||
static void run_mba_test(const char * const *benchmark_cmd, int cpu_no)
|
||||
{
|
||||
int res;
|
||||
|
||||
ksft_print_msg("Starting MBA Schemata change ...\n");
|
||||
|
||||
res = mount_resctrlfs();
|
||||
if (res) {
|
||||
ksft_exit_fail_msg("Failed to mount resctrl FS\n");
|
||||
if (test_prepare()) {
|
||||
ksft_exit_fail_msg("Abnormal failure when preparing for the test\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!validate_resctrl_feature_request(MBA_STR) || (get_vendor() != ARCH_INTEL)) {
|
||||
if (!validate_resctrl_feature_request("MB", NULL) ||
|
||||
!validate_resctrl_feature_request("L3_MON", "mbm_local_bytes") ||
|
||||
(get_vendor() != ARCH_INTEL)) {
|
||||
ksft_test_result_skip("Hardware does not support MBA or MBA is disabled\n");
|
||||
goto umount;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
res = mba_schemata_change(cpu_no, bw_report, benchmark_cmd);
|
||||
res = mba_schemata_change(cpu_no, benchmark_cmd);
|
||||
ksft_test_result(!res, "MBA: schemata change\n");
|
||||
|
||||
umount:
|
||||
umount_resctrlfs();
|
||||
cleanup:
|
||||
test_cleanup();
|
||||
}
|
||||
|
||||
static void run_cmt_test(char **benchmark_cmd, int cpu_no)
|
||||
static void run_cmt_test(const char * const *benchmark_cmd, int cpu_no)
|
||||
{
|
||||
int res;
|
||||
|
||||
ksft_print_msg("Starting CMT test ...\n");
|
||||
|
||||
res = mount_resctrlfs();
|
||||
if (res) {
|
||||
ksft_exit_fail_msg("Failed to mount resctrl FS\n");
|
||||
if (test_prepare()) {
|
||||
ksft_exit_fail_msg("Abnormal failure when preparing for the test\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!validate_resctrl_feature_request(CMT_STR)) {
|
||||
if (!validate_resctrl_feature_request("L3_MON", "llc_occupancy") ||
|
||||
!validate_resctrl_feature_request("L3", NULL)) {
|
||||
ksft_test_result_skip("Hardware does not support CMT or CMT is disabled\n");
|
||||
goto umount;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
res = cmt_resctrl_val(cpu_no, 5, benchmark_cmd);
|
||||
@ -143,8 +166,8 @@ static void run_cmt_test(char **benchmark_cmd, int cpu_no)
|
||||
if ((get_vendor() == ARCH_INTEL) && res)
|
||||
ksft_print_msg("Intel CMT may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n");
|
||||
|
||||
umount:
|
||||
umount_resctrlfs();
|
||||
cleanup:
|
||||
test_cleanup();
|
||||
}
|
||||
|
||||
static void run_cat_test(int cpu_no, int no_of_bits)
|
||||
@ -153,48 +176,53 @@ static void run_cat_test(int cpu_no, int no_of_bits)
|
||||
|
||||
ksft_print_msg("Starting CAT test ...\n");
|
||||
|
||||
res = mount_resctrlfs();
|
||||
if (res) {
|
||||
ksft_exit_fail_msg("Failed to mount resctrl FS\n");
|
||||
if (test_prepare()) {
|
||||
ksft_exit_fail_msg("Abnormal failure when preparing for the test\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!validate_resctrl_feature_request(CAT_STR)) {
|
||||
if (!validate_resctrl_feature_request("L3", NULL)) {
|
||||
ksft_test_result_skip("Hardware does not support CAT or CAT is disabled\n");
|
||||
goto umount;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
res = cat_perf_miss_val(cpu_no, no_of_bits, "L3");
|
||||
ksft_test_result(!res, "CAT: test\n");
|
||||
|
||||
umount:
|
||||
umount_resctrlfs();
|
||||
cleanup:
|
||||
test_cleanup();
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
bool has_ben = false, mbm_test = true, mba_test = true, cmt_test = true;
|
||||
char *benchmark_cmd[BENCHMARK_ARGS], bw_report[64], bm_type[64];
|
||||
char benchmark_cmd_area[BENCHMARK_ARGS][BENCHMARK_ARG_SIZE];
|
||||
int c, cpu_no = 1, argc_new = argc, i, no_of_bits = 0;
|
||||
int ben_ind, ben_count, tests = 0;
|
||||
size_t span = 250 * MB;
|
||||
bool mbm_test = true, mba_test = true, cmt_test = true;
|
||||
const char *benchmark_cmd[BENCHMARK_ARGS] = {};
|
||||
int c, cpu_no = 1, i, no_of_bits = 0;
|
||||
char *span_str = NULL;
|
||||
bool cat_test = true;
|
||||
int tests = 0;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < argc; i++) {
|
||||
if (strcmp(argv[i], "-b") == 0) {
|
||||
ben_ind = i + 1;
|
||||
ben_count = argc - ben_ind;
|
||||
argc_new = ben_ind - 1;
|
||||
has_ben = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
while ((c = getopt(argc_new, argv, "ht:b:n:p:")) != -1) {
|
||||
while ((c = getopt(argc, argv, "ht:b:n:p:")) != -1) {
|
||||
char *token;
|
||||
|
||||
switch (c) {
|
||||
case 'b':
|
||||
/*
|
||||
* First move optind back to the (first) optarg and
|
||||
* then build the benchmark command using the
|
||||
* remaining arguments.
|
||||
*/
|
||||
optind--;
|
||||
if (argc - optind >= BENCHMARK_ARGS)
|
||||
ksft_exit_fail_msg("Too long benchmark command");
|
||||
|
||||
/* Extract benchmark command from command line. */
|
||||
for (i = 0; i < argc - optind; i++)
|
||||
benchmark_cmd[i] = argv[i + optind];
|
||||
benchmark_cmd[i] = NULL;
|
||||
|
||||
goto last_arg;
|
||||
case 't':
|
||||
token = strtok(optarg, ",");
|
||||
|
||||
@ -243,6 +271,7 @@ int main(int argc, char **argv)
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
last_arg:
|
||||
|
||||
ksft_print_header();
|
||||
|
||||
@ -254,29 +283,6 @@ int main(int argc, char **argv)
|
||||
if (geteuid() != 0)
|
||||
return ksft_exit_skip("Not running as root. Skipping...\n");
|
||||
|
||||
if (has_ben) {
|
||||
/* Extract benchmark command from command line. */
|
||||
for (i = ben_ind; i < argc; i++) {
|
||||
benchmark_cmd[i - ben_ind] = benchmark_cmd_area[i];
|
||||
sprintf(benchmark_cmd[i - ben_ind], "%s", argv[i]);
|
||||
}
|
||||
benchmark_cmd[ben_count] = NULL;
|
||||
} else {
|
||||
/* If no benchmark is given by "-b" argument, use fill_buf. */
|
||||
for (i = 0; i < 5; i++)
|
||||
benchmark_cmd[i] = benchmark_cmd_area[i];
|
||||
|
||||
strcpy(benchmark_cmd[0], "fill_buf");
|
||||
sprintf(benchmark_cmd[1], "%zu", span);
|
||||
strcpy(benchmark_cmd[2], "1");
|
||||
strcpy(benchmark_cmd[3], "0");
|
||||
strcpy(benchmark_cmd[4], "false");
|
||||
benchmark_cmd[5] = NULL;
|
||||
}
|
||||
|
||||
sprintf(bw_report, "reads");
|
||||
sprintf(bm_type, "fill_buf");
|
||||
|
||||
if (!check_resctrlfs_support())
|
||||
return ksft_exit_skip("resctrl FS does not exist. Enable X86_CPU_RESCTRL config option.\n");
|
||||
|
||||
@ -285,13 +291,26 @@ int main(int argc, char **argv)
|
||||
|
||||
filter_dmesg();
|
||||
|
||||
if (!benchmark_cmd[0]) {
|
||||
/* If no benchmark is given by "-b" argument, use fill_buf. */
|
||||
benchmark_cmd[0] = "fill_buf";
|
||||
ret = asprintf(&span_str, "%u", DEFAULT_SPAN);
|
||||
if (ret < 0)
|
||||
ksft_exit_fail_msg("Out of memory!\n");
|
||||
benchmark_cmd[1] = span_str;
|
||||
benchmark_cmd[2] = "1";
|
||||
benchmark_cmd[3] = "0";
|
||||
benchmark_cmd[4] = "false";
|
||||
benchmark_cmd[5] = NULL;
|
||||
}
|
||||
|
||||
ksft_set_plan(tests ? : 4);
|
||||
|
||||
if (mbm_test)
|
||||
run_mbm_test(benchmark_cmd, span, cpu_no, bw_report);
|
||||
run_mbm_test(benchmark_cmd, cpu_no);
|
||||
|
||||
if (mba_test)
|
||||
run_mba_test(benchmark_cmd, cpu_no, bw_report);
|
||||
run_mba_test(benchmark_cmd, cpu_no);
|
||||
|
||||
if (cmt_test)
|
||||
run_cmt_test(benchmark_cmd, cpu_no);
|
||||
@ -299,5 +318,6 @@ int main(int argc, char **argv)
|
||||
if (cat_test)
|
||||
run_cat_test(cpu_no, no_of_bits);
|
||||
|
||||
free(span_str);
|
||||
ksft_finished();
|
||||
}
|
||||
|
@ -468,7 +468,9 @@ pid_t bm_pid, ppid;
|
||||
|
||||
void ctrlc_handler(int signum, siginfo_t *info, void *ptr)
|
||||
{
|
||||
kill(bm_pid, SIGKILL);
|
||||
/* Only kill child after bm_pid is set after fork() */
|
||||
if (bm_pid)
|
||||
kill(bm_pid, SIGKILL);
|
||||
umount_resctrlfs();
|
||||
tests_cleanup();
|
||||
ksft_print_msg("Ending\n\n");
|
||||
@ -482,9 +484,11 @@ void ctrlc_handler(int signum, siginfo_t *info, void *ptr)
|
||||
*/
|
||||
int signal_handler_register(void)
|
||||
{
|
||||
struct sigaction sigact;
|
||||
struct sigaction sigact = {};
|
||||
int ret = 0;
|
||||
|
||||
bm_pid = 0;
|
||||
|
||||
sigact.sa_sigaction = ctrlc_handler;
|
||||
sigemptyset(&sigact.sa_mask);
|
||||
sigact.sa_flags = SA_SIGINFO;
|
||||
@ -504,7 +508,7 @@ int signal_handler_register(void)
|
||||
*/
|
||||
void signal_handler_unregister(void)
|
||||
{
|
||||
struct sigaction sigact;
|
||||
struct sigaction sigact = {};
|
||||
|
||||
sigact.sa_handler = SIG_DFL;
|
||||
sigemptyset(&sigact.sa_mask);
|
||||
@ -621,6 +625,56 @@ measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* run_benchmark - Run a specified benchmark or fill_buf (default benchmark)
|
||||
* in specified signal. Direct benchmark stdio to /dev/null.
|
||||
* @signum: signal number
|
||||
* @info: signal info
|
||||
* @ucontext: user context in signal handling
|
||||
*/
|
||||
static void run_benchmark(int signum, siginfo_t *info, void *ucontext)
|
||||
{
|
||||
int operation, ret, memflush;
|
||||
char **benchmark_cmd;
|
||||
size_t span;
|
||||
bool once;
|
||||
FILE *fp;
|
||||
|
||||
benchmark_cmd = info->si_ptr;
|
||||
|
||||
/*
|
||||
* Direct stdio of child to /dev/null, so that only parent writes to
|
||||
* stdio (console)
|
||||
*/
|
||||
fp = freopen("/dev/null", "w", stdout);
|
||||
if (!fp)
|
||||
PARENT_EXIT("Unable to direct benchmark status to /dev/null");
|
||||
|
||||
if (strcmp(benchmark_cmd[0], "fill_buf") == 0) {
|
||||
/* Execute default fill_buf benchmark */
|
||||
span = strtoul(benchmark_cmd[1], NULL, 10);
|
||||
memflush = atoi(benchmark_cmd[2]);
|
||||
operation = atoi(benchmark_cmd[3]);
|
||||
if (!strcmp(benchmark_cmd[4], "true"))
|
||||
once = true;
|
||||
else if (!strcmp(benchmark_cmd[4], "false"))
|
||||
once = false;
|
||||
else
|
||||
PARENT_EXIT("Invalid once parameter");
|
||||
|
||||
if (run_fill_buf(span, memflush, operation, once))
|
||||
fprintf(stderr, "Error in running fill buffer\n");
|
||||
} else {
|
||||
/* Execute specified benchmark */
|
||||
ret = execvp(benchmark_cmd[0], benchmark_cmd);
|
||||
if (ret)
|
||||
perror("wrong\n");
|
||||
}
|
||||
|
||||
fclose(stdout);
|
||||
PARENT_EXIT("Unable to run specified benchmark");
|
||||
}
|
||||
|
||||
/*
|
||||
* resctrl_val: execute benchmark and measure memory bandwidth on
|
||||
* the benchmark
|
||||
@ -629,7 +683,7 @@ measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
|
||||
*
|
||||
* Return: 0 on success. non-zero on failure.
|
||||
*/
|
||||
int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
|
||||
int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *param)
|
||||
{
|
||||
char *resctrl_val = param->resctrl_val;
|
||||
unsigned long bw_resc_start = 0;
|
||||
@ -706,28 +760,30 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
|
||||
|
||||
ksft_print_msg("Benchmark PID: %d\n", bm_pid);
|
||||
|
||||
ret = signal_handler_register();
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
value.sival_ptr = benchmark_cmd;
|
||||
/*
|
||||
* The cast removes constness but nothing mutates benchmark_cmd within
|
||||
* the context of this process. At the receiving process, it becomes
|
||||
* argv, which is mutable, on exec() but that's after fork() so it
|
||||
* doesn't matter for the process running the tests.
|
||||
*/
|
||||
value.sival_ptr = (void *)benchmark_cmd;
|
||||
|
||||
/* Taskset benchmark to specified cpu */
|
||||
ret = taskset_benchmark(bm_pid, param->cpu_no);
|
||||
if (ret)
|
||||
goto unregister;
|
||||
goto out;
|
||||
|
||||
/* Write benchmark to specified control&monitoring grp in resctrl FS */
|
||||
ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp,
|
||||
resctrl_val);
|
||||
if (ret)
|
||||
goto unregister;
|
||||
goto out;
|
||||
|
||||
if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
|
||||
!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
|
||||
ret = initialize_mem_bw_imc();
|
||||
if (ret)
|
||||
goto unregister;
|
||||
goto out;
|
||||
|
||||
initialize_mem_bw_resctrl(param->ctrlgrp, param->mongrp,
|
||||
param->cpu_no, resctrl_val);
|
||||
@ -742,7 +798,7 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
|
||||
sizeof(pipe_message)) {
|
||||
perror("# failed reading message from child process");
|
||||
close(pipefd[0]);
|
||||
goto unregister;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
close(pipefd[0]);
|
||||
@ -751,7 +807,7 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
|
||||
if (sigqueue(bm_pid, SIGUSR1, value) == -1) {
|
||||
perror("# sigqueue SIGUSR1 to child");
|
||||
ret = errno;
|
||||
goto unregister;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Give benchmark enough time to fully run */
|
||||
@ -780,8 +836,6 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
|
||||
}
|
||||
}
|
||||
|
||||
unregister:
|
||||
signal_handler_unregister();
|
||||
out:
|
||||
kill(bm_pid, SIGKILL);
|
||||
|
||||
|
@ -8,6 +8,9 @@
|
||||
* Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
|
||||
* Fenghua Yu <fenghua.yu@intel.com>
|
||||
*/
|
||||
#include <fcntl.h>
|
||||
#include <limits.h>
|
||||
|
||||
#include "resctrl.h"
|
||||
|
||||
static int find_resctrl_mount(char *buffer)
|
||||
@ -291,58 +294,6 @@ int taskset_benchmark(pid_t bm_pid, int cpu_no)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* run_benchmark - Run a specified benchmark or fill_buf (default benchmark)
|
||||
* in specified signal. Direct benchmark stdio to /dev/null.
|
||||
* @signum: signal number
|
||||
* @info: signal info
|
||||
* @ucontext: user context in signal handling
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
void run_benchmark(int signum, siginfo_t *info, void *ucontext)
|
||||
{
|
||||
int operation, ret, memflush;
|
||||
char **benchmark_cmd;
|
||||
size_t span;
|
||||
bool once;
|
||||
FILE *fp;
|
||||
|
||||
benchmark_cmd = info->si_ptr;
|
||||
|
||||
/*
|
||||
* Direct stdio of child to /dev/null, so that only parent writes to
|
||||
* stdio (console)
|
||||
*/
|
||||
fp = freopen("/dev/null", "w", stdout);
|
||||
if (!fp)
|
||||
PARENT_EXIT("Unable to direct benchmark status to /dev/null");
|
||||
|
||||
if (strcmp(benchmark_cmd[0], "fill_buf") == 0) {
|
||||
/* Execute default fill_buf benchmark */
|
||||
span = strtoul(benchmark_cmd[1], NULL, 10);
|
||||
memflush = atoi(benchmark_cmd[2]);
|
||||
operation = atoi(benchmark_cmd[3]);
|
||||
if (!strcmp(benchmark_cmd[4], "true"))
|
||||
once = true;
|
||||
else if (!strcmp(benchmark_cmd[4], "false"))
|
||||
once = false;
|
||||
else
|
||||
PARENT_EXIT("Invalid once parameter");
|
||||
|
||||
if (run_fill_buf(span, memflush, operation, once))
|
||||
fprintf(stderr, "Error in running fill buffer\n");
|
||||
} else {
|
||||
/* Execute specified benchmark */
|
||||
ret = execvp(benchmark_cmd[0], benchmark_cmd);
|
||||
if (ret)
|
||||
perror("wrong\n");
|
||||
}
|
||||
|
||||
fclose(stdout);
|
||||
PARENT_EXIT("Unable to run specified benchmark");
|
||||
}
|
||||
|
||||
/*
|
||||
* create_grp - Create a group only if one doesn't exist
|
||||
* @grp_name: Name of the group
|
||||
@ -488,9 +439,8 @@ out:
|
||||
*/
|
||||
int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val)
|
||||
{
|
||||
char controlgroup[1024], schema[1024], reason[64];
|
||||
int resource_id, ret = 0;
|
||||
FILE *fp;
|
||||
char controlgroup[1024], reason[128], schema[1024] = {};
|
||||
int resource_id, fd, schema_len = -1, ret = 0;
|
||||
|
||||
if (strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) &&
|
||||
strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) &&
|
||||
@ -518,28 +468,39 @@ int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val)
|
||||
|
||||
if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)) ||
|
||||
!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
|
||||
sprintf(schema, "%s%d%c%s", "L3:", resource_id, '=', schemata);
|
||||
schema_len = snprintf(schema, sizeof(schema), "%s%d%c%s\n",
|
||||
"L3:", resource_id, '=', schemata);
|
||||
if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) ||
|
||||
!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)))
|
||||
sprintf(schema, "%s%d%c%s", "MB:", resource_id, '=', schemata);
|
||||
|
||||
fp = fopen(controlgroup, "w");
|
||||
if (!fp) {
|
||||
sprintf(reason, "Failed to open control group");
|
||||
schema_len = snprintf(schema, sizeof(schema), "%s%d%c%s\n",
|
||||
"MB:", resource_id, '=', schemata);
|
||||
if (schema_len < 0 || schema_len >= sizeof(schema)) {
|
||||
snprintf(reason, sizeof(reason),
|
||||
"snprintf() failed with return value : %d", schema_len);
|
||||
ret = -1;
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (fprintf(fp, "%s\n", schema) < 0) {
|
||||
sprintf(reason, "Failed to write schemata in control group");
|
||||
fclose(fp);
|
||||
fd = open(controlgroup, O_WRONLY);
|
||||
if (fd < 0) {
|
||||
snprintf(reason, sizeof(reason),
|
||||
"open() failed : %s", strerror(errno));
|
||||
ret = -1;
|
||||
|
||||
goto out;
|
||||
goto err_schema_not_empty;
|
||||
}
|
||||
fclose(fp);
|
||||
if (write(fd, schema, schema_len) < 0) {
|
||||
snprintf(reason, sizeof(reason),
|
||||
"write() failed : %s", strerror(errno));
|
||||
close(fd);
|
||||
ret = -1;
|
||||
|
||||
goto err_schema_not_empty;
|
||||
}
|
||||
close(fd);
|
||||
|
||||
err_schema_not_empty:
|
||||
schema[schema_len - 1] = 0;
|
||||
out:
|
||||
ksft_print_msg("Write schema \"%s\" to resctrl FS%s%s\n",
|
||||
schema, ret ? " # " : "",
|
||||
@ -604,63 +565,46 @@ char *fgrep(FILE *inf, const char *str)
|
||||
|
||||
/*
|
||||
* validate_resctrl_feature_request - Check if requested feature is valid.
|
||||
* @resctrl_val: Requested feature
|
||||
* @resource: Required resource (e.g., MB, L3, L2, L3_MON, etc.)
|
||||
* @feature: Required monitor feature (in mon_features file). Can only be
|
||||
* set for L3_MON. Must be NULL for all other resources.
|
||||
*
|
||||
* Return: True if the feature is supported, else false. False is also
|
||||
* returned if resctrl FS is not mounted.
|
||||
* Return: True if the resource/feature is supported, else false. False is
|
||||
* also returned if resctrl FS is not mounted.
|
||||
*/
|
||||
bool validate_resctrl_feature_request(const char *resctrl_val)
|
||||
bool validate_resctrl_feature_request(const char *resource, const char *feature)
|
||||
{
|
||||
char res_path[PATH_MAX];
|
||||
struct stat statbuf;
|
||||
bool found = false;
|
||||
char *res;
|
||||
FILE *inf;
|
||||
int ret;
|
||||
|
||||
if (!resctrl_val)
|
||||
if (!resource)
|
||||
return false;
|
||||
|
||||
ret = find_resctrl_mount(NULL);
|
||||
if (ret)
|
||||
return false;
|
||||
|
||||
if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
|
||||
if (!stat(L3_PATH, &statbuf))
|
||||
return true;
|
||||
} else if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
|
||||
if (!stat(MB_PATH, &statbuf))
|
||||
return true;
|
||||
} else if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
|
||||
!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
|
||||
if (!stat(L3_MON_PATH, &statbuf)) {
|
||||
inf = fopen(L3_MON_FEATURES_PATH, "r");
|
||||
if (!inf)
|
||||
return false;
|
||||
snprintf(res_path, sizeof(res_path), "%s/%s", INFO_PATH, resource);
|
||||
|
||||
if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
|
||||
res = fgrep(inf, "llc_occupancy");
|
||||
if (res) {
|
||||
found = true;
|
||||
free(res);
|
||||
}
|
||||
}
|
||||
if (stat(res_path, &statbuf))
|
||||
return false;
|
||||
|
||||
if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
|
||||
res = fgrep(inf, "mbm_total_bytes");
|
||||
if (res) {
|
||||
free(res);
|
||||
res = fgrep(inf, "mbm_local_bytes");
|
||||
if (res) {
|
||||
found = true;
|
||||
free(res);
|
||||
}
|
||||
}
|
||||
}
|
||||
fclose(inf);
|
||||
}
|
||||
}
|
||||
if (!feature)
|
||||
return true;
|
||||
|
||||
return found;
|
||||
snprintf(res_path, sizeof(res_path), "%s/%s/mon_features", INFO_PATH, resource);
|
||||
inf = fopen(res_path, "r");
|
||||
if (!inf)
|
||||
return false;
|
||||
|
||||
res = fgrep(inf, feature);
|
||||
free(res);
|
||||
fclose(inf);
|
||||
|
||||
return !!res;
|
||||
}
|
||||
|
||||
int filter_dmesg(void)
|
||||
|
@ -1231,7 +1231,7 @@ void *test_membarrier_worker_thread(void *arg)
|
||||
}
|
||||
|
||||
/* Wait for initialization. */
|
||||
while (!atomic_load(&args->percpu_list_ptr)) {}
|
||||
while (!__atomic_load_n(&args->percpu_list_ptr, __ATOMIC_ACQUIRE)) {}
|
||||
|
||||
for (i = 0; i < iters; ++i) {
|
||||
int ret;
|
||||
@ -1299,22 +1299,22 @@ void *test_membarrier_manager_thread(void *arg)
|
||||
test_membarrier_init_percpu_list(&list_a);
|
||||
test_membarrier_init_percpu_list(&list_b);
|
||||
|
||||
atomic_store(&args->percpu_list_ptr, (intptr_t)&list_a);
|
||||
__atomic_store_n(&args->percpu_list_ptr, (intptr_t)&list_a, __ATOMIC_RELEASE);
|
||||
|
||||
while (!atomic_load(&args->stop)) {
|
||||
while (!__atomic_load_n(&args->stop, __ATOMIC_ACQUIRE)) {
|
||||
/* list_a is "active". */
|
||||
cpu_a = rand() % CPU_SETSIZE;
|
||||
/*
|
||||
* As list_b is "inactive", we should never see changes
|
||||
* to list_b.
|
||||
*/
|
||||
if (expect_b != atomic_load(&list_b.c[cpu_b].head->data)) {
|
||||
if (expect_b != __atomic_load_n(&list_b.c[cpu_b].head->data, __ATOMIC_ACQUIRE)) {
|
||||
fprintf(stderr, "Membarrier test failed\n");
|
||||
abort();
|
||||
}
|
||||
|
||||
/* Make list_b "active". */
|
||||
atomic_store(&args->percpu_list_ptr, (intptr_t)&list_b);
|
||||
__atomic_store_n(&args->percpu_list_ptr, (intptr_t)&list_b, __ATOMIC_RELEASE);
|
||||
if (rseq_membarrier_expedited(cpu_a) &&
|
||||
errno != ENXIO /* missing CPU */) {
|
||||
perror("sys_membarrier");
|
||||
@ -1324,27 +1324,27 @@ void *test_membarrier_manager_thread(void *arg)
|
||||
* Cpu A should now only modify list_b, so the values
|
||||
* in list_a should be stable.
|
||||
*/
|
||||
expect_a = atomic_load(&list_a.c[cpu_a].head->data);
|
||||
expect_a = __atomic_load_n(&list_a.c[cpu_a].head->data, __ATOMIC_ACQUIRE);
|
||||
|
||||
cpu_b = rand() % CPU_SETSIZE;
|
||||
/*
|
||||
* As list_a is "inactive", we should never see changes
|
||||
* to list_a.
|
||||
*/
|
||||
if (expect_a != atomic_load(&list_a.c[cpu_a].head->data)) {
|
||||
if (expect_a != __atomic_load_n(&list_a.c[cpu_a].head->data, __ATOMIC_ACQUIRE)) {
|
||||
fprintf(stderr, "Membarrier test failed\n");
|
||||
abort();
|
||||
}
|
||||
|
||||
/* Make list_a "active". */
|
||||
atomic_store(&args->percpu_list_ptr, (intptr_t)&list_a);
|
||||
__atomic_store_n(&args->percpu_list_ptr, (intptr_t)&list_a, __ATOMIC_RELEASE);
|
||||
if (rseq_membarrier_expedited(cpu_b) &&
|
||||
errno != ENXIO /* missing CPU*/) {
|
||||
perror("sys_membarrier");
|
||||
abort();
|
||||
}
|
||||
/* Remember a value from list_b. */
|
||||
expect_b = atomic_load(&list_b.c[cpu_b].head->data);
|
||||
expect_b = __atomic_load_n(&list_b.c[cpu_b].head->data, __ATOMIC_ACQUIRE);
|
||||
}
|
||||
|
||||
test_membarrier_free_percpu_list(&list_a);
|
||||
@ -1401,7 +1401,7 @@ void test_membarrier(void)
|
||||
}
|
||||
}
|
||||
|
||||
atomic_store(&thread_args.stop, 1);
|
||||
__atomic_store_n(&thread_args.stop, 1, __ATOMIC_RELEASE);
|
||||
ret = pthread_join(manager_thread, NULL);
|
||||
if (ret) {
|
||||
errno = ret;
|
||||
|
@ -111,7 +111,7 @@ int main(void)
|
||||
|
||||
/* Make sure more than the required minimum. */
|
||||
stack_size = getauxval(AT_MINSIGSTKSZ) + SIGSTKSZ;
|
||||
ksft_print_msg("[NOTE]\tthe stack size is %lu\n", stack_size);
|
||||
ksft_print_msg("[NOTE]\tthe stack size is %u\n", stack_size);
|
||||
|
||||
ksft_print_header();
|
||||
ksft_set_plan(3);
|
||||
|
@ -6,18 +6,18 @@
|
||||
ksft_skip=4
|
||||
|
||||
if ! /sbin/modprobe -q -n test_static_key_base; then
|
||||
echo "static_key: module test_static_key_base is not found [SKIP]"
|
||||
echo "static_keys: module test_static_key_base is not found [SKIP]"
|
||||
exit $ksft_skip
|
||||
fi
|
||||
|
||||
if ! /sbin/modprobe -q -n test_static_keys; then
|
||||
echo "static_key: module test_static_keys is not found [SKIP]"
|
||||
echo "static_keys: module test_static_keys is not found [SKIP]"
|
||||
exit $ksft_skip
|
||||
fi
|
||||
|
||||
if /sbin/modprobe -q test_static_key_base; then
|
||||
if /sbin/modprobe -q test_static_keys; then
|
||||
echo "static_key: ok"
|
||||
echo "static_keys: ok"
|
||||
/sbin/modprobe -q -r test_static_keys
|
||||
/sbin/modprobe -q -r test_static_key_base
|
||||
else
|
||||
@ -25,6 +25,6 @@ if /sbin/modprobe -q test_static_key_base; then
|
||||
/sbin/modprobe -q -r test_static_key_base
|
||||
fi
|
||||
else
|
||||
echo "static_key: [FAIL]"
|
||||
echo "static_keys: [FAIL]"
|
||||
exit 1
|
||||
fi
|
||||
|
1
tools/testing/selftests/tdx/.gitignore
vendored
Normal file
1
tools/testing/selftests/tdx/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
tdx_guest_test
|
@ -118,7 +118,7 @@ int nanosleep_lat_test(int clockid, long long ns)
|
||||
clock_gettime(clockid, &end);
|
||||
|
||||
if (((timespec_sub(start, end)/count)-ns) > UNRESONABLE_LATENCY) {
|
||||
printf("Large rel latency: %lld ns :", (timespec_sub(start, end)/count)-ns);
|
||||
ksft_print_msg("Large rel latency: %lld ns :", (timespec_sub(start, end)/count)-ns);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -132,20 +132,23 @@ int nanosleep_lat_test(int clockid, long long ns)
|
||||
}
|
||||
|
||||
if (latency/count > UNRESONABLE_LATENCY) {
|
||||
printf("Large abs latency: %lld ns :", latency/count);
|
||||
ksft_print_msg("Large abs latency: %lld ns :", latency/count);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#define SKIPPED_CLOCK_COUNT 3
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
long long length;
|
||||
int clockid, ret;
|
||||
|
||||
ksft_print_header();
|
||||
ksft_set_plan(NR_CLOCKIDS - CLOCK_REALTIME - SKIPPED_CLOCK_COUNT);
|
||||
|
||||
for (clockid = CLOCK_REALTIME; clockid < NR_CLOCKIDS; clockid++) {
|
||||
|
||||
/* Skip cputime clockids since nanosleep won't increment cputime */
|
||||
@ -154,9 +157,6 @@ int main(int argc, char **argv)
|
||||
clockid == CLOCK_HWSPECIFIC)
|
||||
continue;
|
||||
|
||||
printf("nsleep latency %-26s ", clockstring(clockid));
|
||||
fflush(stdout);
|
||||
|
||||
length = 10;
|
||||
while (length <= (NSEC_PER_SEC * 10)) {
|
||||
ret = nanosleep_lat_test(clockid, length);
|
||||
@ -167,14 +167,12 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
if (ret == UNSUPPORTED) {
|
||||
printf("[UNSUPPORTED]\n");
|
||||
continue;
|
||||
ksft_test_result_skip("%s\n", clockstring(clockid));
|
||||
} else {
|
||||
ksft_test_result(ret >= 0, "%s\n",
|
||||
clockstring(clockid));
|
||||
}
|
||||
if (ret < 0) {
|
||||
printf("[FAILED]\n");
|
||||
return ksft_exit_fail();
|
||||
}
|
||||
printf("[OK]\n");
|
||||
}
|
||||
return ksft_exit_pass();
|
||||
|
||||
ksft_finished();
|
||||
}
|
||||
|
@ -76,22 +76,21 @@ static int check_diff(struct timeval start, struct timeval end)
|
||||
|
||||
static int check_itimer(int which)
|
||||
{
|
||||
const char *name;
|
||||
int err;
|
||||
struct timeval start, end;
|
||||
struct itimerval val = {
|
||||
.it_value.tv_sec = DELAY,
|
||||
};
|
||||
|
||||
printf("Check itimer ");
|
||||
|
||||
if (which == ITIMER_VIRTUAL)
|
||||
printf("virtual... ");
|
||||
name = "ITIMER_VIRTUAL";
|
||||
else if (which == ITIMER_PROF)
|
||||
printf("prof... ");
|
||||
name = "ITIMER_PROF";
|
||||
else if (which == ITIMER_REAL)
|
||||
printf("real... ");
|
||||
|
||||
fflush(stdout);
|
||||
name = "ITIMER_REAL";
|
||||
else
|
||||
return -1;
|
||||
|
||||
done = 0;
|
||||
|
||||
@ -104,13 +103,13 @@ static int check_itimer(int which)
|
||||
|
||||
err = gettimeofday(&start, NULL);
|
||||
if (err < 0) {
|
||||
perror("Can't call gettimeofday()\n");
|
||||
ksft_perror("Can't call gettimeofday()");
|
||||
return -1;
|
||||
}
|
||||
|
||||
err = setitimer(which, &val, NULL);
|
||||
if (err < 0) {
|
||||
perror("Can't set timer\n");
|
||||
ksft_perror("Can't set timer");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -123,20 +122,18 @@ static int check_itimer(int which)
|
||||
|
||||
err = gettimeofday(&end, NULL);
|
||||
if (err < 0) {
|
||||
perror("Can't call gettimeofday()\n");
|
||||
ksft_perror("Can't call gettimeofday()");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!check_diff(start, end))
|
||||
printf("[OK]\n");
|
||||
else
|
||||
printf("[FAIL]\n");
|
||||
ksft_test_result(check_diff(start, end) == 0, "%s\n", name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_timer_create(int which)
|
||||
{
|
||||
const char *type;
|
||||
int err;
|
||||
timer_t id;
|
||||
struct timeval start, end;
|
||||
@ -144,31 +141,32 @@ static int check_timer_create(int which)
|
||||
.it_value.tv_sec = DELAY,
|
||||
};
|
||||
|
||||
printf("Check timer_create() ");
|
||||
if (which == CLOCK_THREAD_CPUTIME_ID) {
|
||||
printf("per thread... ");
|
||||
type = "thread";
|
||||
} else if (which == CLOCK_PROCESS_CPUTIME_ID) {
|
||||
printf("per process... ");
|
||||
type = "process";
|
||||
} else {
|
||||
ksft_print_msg("Unknown timer_create() type %d\n", which);
|
||||
return -1;
|
||||
}
|
||||
fflush(stdout);
|
||||
|
||||
done = 0;
|
||||
err = timer_create(which, NULL, &id);
|
||||
if (err < 0) {
|
||||
perror("Can't create timer\n");
|
||||
ksft_perror("Can't create timer");
|
||||
return -1;
|
||||
}
|
||||
signal(SIGALRM, sig_handler);
|
||||
|
||||
err = gettimeofday(&start, NULL);
|
||||
if (err < 0) {
|
||||
perror("Can't call gettimeofday()\n");
|
||||
ksft_perror("Can't call gettimeofday()");
|
||||
return -1;
|
||||
}
|
||||
|
||||
err = timer_settime(id, 0, &val, NULL);
|
||||
if (err < 0) {
|
||||
perror("Can't set timer\n");
|
||||
ksft_perror("Can't set timer");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -176,14 +174,12 @@ static int check_timer_create(int which)
|
||||
|
||||
err = gettimeofday(&end, NULL);
|
||||
if (err < 0) {
|
||||
perror("Can't call gettimeofday()\n");
|
||||
ksft_perror("Can't call gettimeofday()");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!check_diff(start, end))
|
||||
printf("[OK]\n");
|
||||
else
|
||||
printf("[FAIL]\n");
|
||||
ksft_test_result(check_diff(start, end) == 0,
|
||||
"timer_create() per %s\n", type);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -220,25 +216,25 @@ static int check_timer_distribution(void)
|
||||
.it_interval.tv_nsec = 1000 * 1000,
|
||||
};
|
||||
|
||||
printf("Check timer_create() per process signal distribution... ");
|
||||
fflush(stdout);
|
||||
|
||||
remain = nthreads + 1; /* worker threads + this thread */
|
||||
signal(SIGALRM, distribution_handler);
|
||||
err = timer_create(CLOCK_PROCESS_CPUTIME_ID, NULL, &id);
|
||||
if (err < 0) {
|
||||
perror("Can't create timer\n");
|
||||
ksft_perror("Can't create timer");
|
||||
return -1;
|
||||
}
|
||||
err = timer_settime(id, 0, &val, NULL);
|
||||
if (err < 0) {
|
||||
perror("Can't set timer\n");
|
||||
ksft_perror("Can't set timer");
|
||||
return -1;
|
||||
}
|
||||
|
||||
for (i = 0; i < nthreads; i++) {
|
||||
if (pthread_create(&threads[i], NULL, distribution_thread, NULL)) {
|
||||
perror("Can't create thread\n");
|
||||
err = pthread_create(&threads[i], NULL, distribution_thread,
|
||||
NULL);
|
||||
if (err) {
|
||||
ksft_print_msg("Can't create thread: %s (%d)\n",
|
||||
strerror(errno), errno);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@ -247,25 +243,30 @@ static int check_timer_distribution(void)
|
||||
while (__atomic_load_n(&remain, __ATOMIC_RELAXED));
|
||||
|
||||
for (i = 0; i < nthreads; i++) {
|
||||
if (pthread_join(threads[i], NULL)) {
|
||||
perror("Can't join thread\n");
|
||||
err = pthread_join(threads[i], NULL);
|
||||
if (err) {
|
||||
ksft_print_msg("Can't join thread: %s (%d)\n",
|
||||
strerror(errno), errno);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (timer_delete(id)) {
|
||||
perror("Can't delete timer\n");
|
||||
ksft_perror("Can't delete timer");
|
||||
return -1;
|
||||
}
|
||||
|
||||
printf("[OK]\n");
|
||||
ksft_test_result_pass("check_timer_distribution\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
printf("Testing posix timers. False negative may happen on CPU execution \n");
|
||||
printf("based timers if other threads run on the CPU...\n");
|
||||
ksft_print_header();
|
||||
ksft_set_plan(6);
|
||||
|
||||
ksft_print_msg("Testing posix timers. False negative may happen on CPU execution \n");
|
||||
ksft_print_msg("based timers if other threads run on the CPU...\n");
|
||||
|
||||
if (check_itimer(ITIMER_VIRTUAL) < 0)
|
||||
return ksft_exit_fail();
|
||||
@ -294,5 +295,5 @@ int main(int argc, char **argv)
|
||||
if (check_timer_distribution() < 0)
|
||||
return ksft_exit_fail();
|
||||
|
||||
return ksft_exit_pass();
|
||||
ksft_finished();
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ static int uevent_listener(unsigned long post_flags, bool expect_uevent,
|
||||
{
|
||||
int sk_fd, ret;
|
||||
socklen_t sk_addr_len;
|
||||
int fret = -1, rcv_buf_sz = __UEVENT_BUFFER_SIZE;
|
||||
int rcv_buf_sz = __UEVENT_BUFFER_SIZE;
|
||||
uint64_t sync_add = 1;
|
||||
struct sockaddr_nl sk_addr = { 0 }, rcv_addr = { 0 };
|
||||
char buf[__UEVENT_BUFFER_SIZE] = { 0 };
|
||||
@ -121,6 +121,7 @@ static int uevent_listener(unsigned long post_flags, bool expect_uevent,
|
||||
|
||||
if ((size_t)sk_addr_len != sizeof(sk_addr)) {
|
||||
fprintf(stderr, "Invalid socket address size\n");
|
||||
ret = -1;
|
||||
goto on_error;
|
||||
}
|
||||
|
||||
@ -147,11 +148,12 @@ static int uevent_listener(unsigned long post_flags, bool expect_uevent,
|
||||
ret = write_nointr(sync_fd, &sync_add, sizeof(sync_add));
|
||||
close(sync_fd);
|
||||
if (ret != sizeof(sync_add)) {
|
||||
ret = -1;
|
||||
fprintf(stderr, "Failed to synchronize with parent process\n");
|
||||
goto on_error;
|
||||
}
|
||||
|
||||
fret = 0;
|
||||
ret = 0;
|
||||
for (;;) {
|
||||
ssize_t r;
|
||||
|
||||
@ -187,7 +189,7 @@ static int uevent_listener(unsigned long post_flags, bool expect_uevent,
|
||||
on_error:
|
||||
close(sk_fd);
|
||||
|
||||
return fret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int trigger_uevent(unsigned int times)
|
||||
|
4
tools/testing/selftests/user_events/.gitignore
vendored
Normal file
4
tools/testing/selftests/user_events/.gitignore
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
abi_test
|
||||
dyn_test
|
||||
ftrace_test
|
||||
perf_test
|
Loading…
Reference in New Issue
Block a user