mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 21:38:32 +08:00
kmod: throttle kmod thread limit
If we reach the limit of modprobe_limit threads running the next request_module() call will fail. The original reason for adding a kill was to do away with possible issues with in old circumstances which would create a recursive series of request_module() calls. We can do better than just be super aggressive and reject calls once we've reached the limit by simply making pending callers wait until the threshold has been reduced, and then throttling them in, one by one. This throttling enables requests over the kmod concurrent limit to be processed once a pending request completes. Only the first item queued up to wait is woken up. The assumption here is once a task is woken it will have no other option to also kick the queue to check if there are more pending tasks -- regardless of whether or not it was successful. By throttling and processing only max kmod concurrent tasks we ensure we avoid unexpected fatal request_module() calls, and we keep memory consumption on module loading to a minimum. With x86_64 qemu, with 4 cores, 4 GiB of RAM it takes the following run time to run both tests: time ./kmod.sh -t 0008 real 0m16.366s user 0m0.883s sys 0m8.916s time ./kmod.sh -t 0009 real 0m50.803s user 0m0.791s sys 0m9.852s Link: http://lkml.kernel.org/r/20170628223155.26472-4-mcgrof@kernel.org Signed-off-by: Luis R. Rodriguez <mcgrof@kernel.org> Reviewed-by: Petr Mladek <pmladek@suse.com> Cc: Jessica Yu <jeyu@redhat.com> Cc: Shuah Khan <shuah@kernel.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Michal Marek <mmarek@suse.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
d9c6a72d6f
commit
6d7964a722
@ -68,6 +68,7 @@ static DECLARE_RWSEM(umhelper_sem);
|
|||||||
*/
|
*/
|
||||||
#define MAX_KMOD_CONCURRENT 50
|
#define MAX_KMOD_CONCURRENT 50
|
||||||
static atomic_t kmod_concurrent_max = ATOMIC_INIT(MAX_KMOD_CONCURRENT);
|
static atomic_t kmod_concurrent_max = ATOMIC_INIT(MAX_KMOD_CONCURRENT);
|
||||||
|
static DECLARE_WAIT_QUEUE_HEAD(kmod_wq);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
modprobe_path is set via /proc/sys.
|
modprobe_path is set via /proc/sys.
|
||||||
@ -140,7 +141,6 @@ int __request_module(bool wait, const char *fmt, ...)
|
|||||||
va_list args;
|
va_list args;
|
||||||
char module_name[MODULE_NAME_LEN];
|
char module_name[MODULE_NAME_LEN];
|
||||||
int ret;
|
int ret;
|
||||||
static int kmod_loop_msg;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We don't allow synchronous module loading from async. Module
|
* We don't allow synchronous module loading from async. Module
|
||||||
@ -164,14 +164,11 @@ int __request_module(bool wait, const char *fmt, ...)
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (atomic_dec_if_positive(&kmod_concurrent_max) < 0) {
|
if (atomic_dec_if_positive(&kmod_concurrent_max) < 0) {
|
||||||
/* We may be blaming an innocent here, but unlikely */
|
pr_warn_ratelimited("request_module: kmod_concurrent_max (%u) close to 0 (max_modprobes: %u), for module %s, throttling...",
|
||||||
if (kmod_loop_msg < 5) {
|
atomic_read(&kmod_concurrent_max),
|
||||||
printk(KERN_ERR
|
MAX_KMOD_CONCURRENT, module_name);
|
||||||
"request_module: runaway loop modprobe %s\n",
|
wait_event_interruptible(kmod_wq,
|
||||||
module_name);
|
atomic_dec_if_positive(&kmod_concurrent_max) >= 0);
|
||||||
kmod_loop_msg++;
|
|
||||||
}
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_module_request(module_name, wait, _RET_IP_);
|
trace_module_request(module_name, wait, _RET_IP_);
|
||||||
@ -179,6 +176,7 @@ int __request_module(bool wait, const char *fmt, ...)
|
|||||||
ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
|
ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
|
||||||
|
|
||||||
atomic_inc(&kmod_concurrent_max);
|
atomic_inc(&kmod_concurrent_max);
|
||||||
|
wake_up(&kmod_wq);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -59,28 +59,8 @@ ALL_TESTS="$ALL_TESTS 0004:1:1"
|
|||||||
ALL_TESTS="$ALL_TESTS 0005:10:1"
|
ALL_TESTS="$ALL_TESTS 0005:10:1"
|
||||||
ALL_TESTS="$ALL_TESTS 0006:10:1"
|
ALL_TESTS="$ALL_TESTS 0006:10:1"
|
||||||
ALL_TESTS="$ALL_TESTS 0007:5:1"
|
ALL_TESTS="$ALL_TESTS 0007:5:1"
|
||||||
|
ALL_TESTS="$ALL_TESTS 0008:150:1"
|
||||||
# Disabled tests:
|
ALL_TESTS="$ALL_TESTS 0009:150:1"
|
||||||
#
|
|
||||||
# 0008 x 150 - multithreaded - push kmod_concurrent over max_modprobes for request_module()"
|
|
||||||
# Current best-effort failure interpretation:
|
|
||||||
# Enough module requests get loaded in place fast enough to reach over the
|
|
||||||
# max_modprobes limit and trigger a failure -- before we're even able to
|
|
||||||
# start processing pending requests.
|
|
||||||
ALL_TESTS="$ALL_TESTS 0008:150:0"
|
|
||||||
|
|
||||||
# 0009 x 150 - multithreaded - push kmod_concurrent over max_modprobes for get_fs_type()"
|
|
||||||
# Current best-effort failure interpretation:
|
|
||||||
#
|
|
||||||
# get_fs_type() requests modules using aliases as such the optimization in
|
|
||||||
# place today to look for already loaded modules will not take effect and
|
|
||||||
# we end up requesting a new module to load, this bumps the kmod_concurrent,
|
|
||||||
# and in certain circumstances can lead to pushing the kmod_concurrent over
|
|
||||||
# the max_modprobe limit.
|
|
||||||
#
|
|
||||||
# This test fails much easier than test 0008 since the alias optimizations
|
|
||||||
# are not in place.
|
|
||||||
ALL_TESTS="$ALL_TESTS 0009:150:0"
|
|
||||||
|
|
||||||
test_modprobe()
|
test_modprobe()
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user