kmod: use system_unbound_wq instead of khelper

We need to launch the usermodehelper kernel threads with the widest
affinity and this is partly why we use khelper.  This workqueue has
unbound properties and thus a wide affinity inherited by all its children.

Now khelper also has special properties that we aren't much interested in:
ordered and singlethread.  There is really no need about ordering as all
we do is creating kernel threads.  This can be done concurrently.  And
singlethread is a useless limitation as well.

The workqueue engine already proposes generic unbound workqueues that
don't share these useless properties and handle well parallel jobs.

The only worrysome specific is their affinity to the node of the current
CPU.  It's fine for creating the usermodehelper kernel threads but those
inherit this affinity for longer jobs such as requesting modules.

This patch proposes to use these node affine unbound workqueues assuming
that a node is sufficient to handle several parallel usermodehelper
requests.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Rik van Riel <riel@redhat.com>
Reviewed-by: Oleg Nesterov <oleg@redhat.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Frederic Weisbecker 2015-09-09 15:38:22 -07:00 committed by Linus Torvalds
parent b639e86bae
commit 90f023030e
3 changed files with 17 additions and 26 deletions

View File

@ -85,8 +85,6 @@ enum umh_disable_depth {
UMH_DISABLED, UMH_DISABLED,
}; };
extern void usermodehelper_init(void);
extern int __usermodehelper_disable(enum umh_disable_depth depth); extern int __usermodehelper_disable(enum umh_disable_depth depth);
extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth); extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth);

View File

@ -877,7 +877,6 @@ static void __init do_initcalls(void)
static void __init do_basic_setup(void) static void __init do_basic_setup(void)
{ {
cpuset_init_smp(); cpuset_init_smp();
usermodehelper_init();
shmem_init(); shmem_init();
driver_init(); driver_init();
init_irq_proc(); init_irq_proc();

View File

@ -45,8 +45,6 @@
extern int max_threads; extern int max_threads;
static struct workqueue_struct *khelper_wq;
#define CAP_BSET (void *)1 #define CAP_BSET (void *)1
#define CAP_PI (void *)2 #define CAP_PI (void *)2
@ -225,7 +223,7 @@ static int call_usermodehelper_exec_async(void *data)
spin_unlock_irq(&current->sighand->siglock); spin_unlock_irq(&current->sighand->siglock);
/* /*
* Our parent is khelper which runs with elevated scheduling * Our parent (unbound workqueue) runs with elevated scheduling
* priority. Avoid propagating that into the userspace child. * priority. Avoid propagating that into the userspace child.
*/ */
set_user_nice(current, 0); set_user_nice(current, 0);
@ -268,9 +266,10 @@ out:
} }
/* /*
* Handles UMH_WAIT_PROC. Our parent khelper can't wait for usermodehelper * Handles UMH_WAIT_PROC. Our parent (unbound workqueue) might not be able to
* completion without blocking every other pending requests. That's why * run enough instances to handle usermodehelper completions without blocking
* we use a kernel thread dedicated for that purpose. * some other pending requests. That's why we use a kernel thread dedicated for
* that purpose.
*/ */
static int call_usermodehelper_exec_sync(void *data) static int call_usermodehelper_exec_sync(void *data)
{ {
@ -312,14 +311,15 @@ static int call_usermodehelper_exec_sync(void *data)
/* /*
* This function doesn't strictly needs to be called asynchronously. But we * This function doesn't strictly needs to be called asynchronously. But we
* need to create the usermodehelper kernel threads from a task that is affine * need to create the usermodehelper kernel threads from a task that is affine
* to all CPUs (or nohz housekeeping ones) such that they inherit a widest * to an optimized set of CPUs (or nohz housekeeping ones) such that they
* affinity irrespective of call_usermodehelper() callers with possibly reduced * inherit a widest affinity irrespective of call_usermodehelper() callers with
* affinity (eg: per-cpu workqueues). We don't want usermodehelper targets to * possibly reduced affinity (eg: per-cpu workqueues). We don't want
* contend any busy CPU. * usermodehelper targets to contend a busy CPU.
* Khelper provides such wide affinity.
* *
* Besides, khelper provides the privilege level that caller might not have to * Unbound workqueues provide such wide affinity.
* perform the usermodehelper request. *
* Besides, workqueues provide the privilege level that caller might not have
* to perform the usermodehelper request.
* *
*/ */
static void call_usermodehelper_exec_work(struct work_struct *work) static void call_usermodehelper_exec_work(struct work_struct *work)
@ -549,8 +549,8 @@ EXPORT_SYMBOL(call_usermodehelper_setup);
* from interrupt context. * from interrupt context.
* *
* Runs a user-space application. The application is started * Runs a user-space application. The application is started
* asynchronously if wait is not set, and runs as a child of khelper. * asynchronously if wait is not set, and runs as a child of system workqueues.
* (ie. it runs with full root capabilities and wide affinity). * (ie. it runs with full root capabilities and optimized affinity).
*/ */
int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
{ {
@ -562,7 +562,7 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
return -EINVAL; return -EINVAL;
} }
helper_lock(); helper_lock();
if (!khelper_wq || usermodehelper_disabled) { if (usermodehelper_disabled) {
retval = -EBUSY; retval = -EBUSY;
goto out; goto out;
} }
@ -574,7 +574,7 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done; sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done;
sub_info->wait = wait; sub_info->wait = wait;
queue_work(khelper_wq, &sub_info->work); queue_work(system_unbound_wq, &sub_info->work);
if (wait == UMH_NO_WAIT) /* task has freed sub_info */ if (wait == UMH_NO_WAIT) /* task has freed sub_info */
goto unlock; goto unlock;
@ -704,9 +704,3 @@ struct ctl_table usermodehelper_table[] = {
}, },
{ } { }
}; };
void __init usermodehelper_init(void)
{
khelper_wq = create_singlethread_workqueue("khelper");
BUG_ON(!khelper_wq);
}