rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
/*
|
|
|
|
* Read-Copy Update mechanism for mutual exclusion (tree-based version)
|
|
|
|
* Internal non-public definitions that provide either classic
|
2011-03-03 05:15:15 +08:00
|
|
|
* or preemptible semantics.
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
2013-12-04 02:02:52 +08:00
|
|
|
* along with this program; if not, you can access it online at
|
|
|
|
* http://www.gnu.org/licenses/gpl-2.0.html.
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
*
|
|
|
|
* Copyright Red Hat, 2009
|
|
|
|
* Copyright IBM Corporation, 2009
|
|
|
|
*
|
|
|
|
* Author: Ingo Molnar <mingo@elte.hu>
|
|
|
|
* Paul E. McKenney <paulmck@linux.vnet.ibm.com>
|
|
|
|
*/
|
|
|
|
|
2009-12-03 04:10:15 +08:00
|
|
|
#include <linux/delay.h>
|
2012-08-20 12:35:53 +08:00
|
|
|
#include <linux/gfp.h>
|
2012-06-12 08:39:43 +08:00
|
|
|
#include <linux/oom.h>
|
2012-07-16 18:42:38 +08:00
|
|
|
#include <linux/smpboot.h>
|
2013-10-09 11:23:47 +08:00
|
|
|
#include "../time/tick-internal.h"
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
|
2011-08-20 02:39:11 +08:00
|
|
|
#ifdef CONFIG_RCU_BOOST
|
2014-09-03 03:30:58 +08:00
|
|
|
|
2014-06-13 04:30:25 +08:00
|
|
|
#include "../locking/rtmutex_common.h"
|
2014-09-13 10:21:09 +08:00
|
|
|
|
2014-09-03 03:30:58 +08:00
|
|
|
/*
|
|
|
|
* Control variables for per-CPU and per-rcu_node kthreads. These
|
|
|
|
* handle all flavors of RCU.
|
|
|
|
*/
|
|
|
|
static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
|
|
|
|
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
|
|
|
|
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
|
|
|
|
DEFINE_PER_CPU(char, rcu_cpu_has_work);
|
|
|
|
|
2014-09-13 10:21:09 +08:00
|
|
|
#endif /* #ifdef CONFIG_RCU_BOOST */
|
2011-08-20 02:39:11 +08:00
|
|
|
|
2012-08-20 12:35:53 +08:00
|
|
|
#ifdef CONFIG_RCU_NOCB_CPU
|
|
|
|
static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
|
|
|
|
static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
|
rcu: Make rcu_nocb_poll an early_param instead of module_param
The as-documented rcu_nocb_poll will fail to enable this feature
for two reasons. (1) there is an extra "s" in the documented
name which is not in the code, and (2) since it uses module_param,
it really is expecting a prefix, akin to "rcutree.fanout_leaf"
and the prefix isn't documented.
However, there are several reasons why we might not want to
simply fix the typo and add the prefix:
1) we'd end up with rcutree.rcu_nocb_poll, and rather probably make
a change to rcutree.nocb_poll
2) if we did #1, then the prefix wouldn't be consistent with the
rcu_nocbs=<cpumap> parameter (i.e. one with, one without prefix)
3) the use of module_param in a header file is less than desired,
since it isn't immediately obvious that it will get processed
via rcutree.c and get the prefix from that (although use of
module_param_named() could clarify that.)
4) the implied export of /sys/module/rcutree/parameters/rcu_nocb_poll
data to userspace via module_param() doesn't really buy us anything,
as it is read-only and we can tell if it is enabled already without
it, since there is a printk at early boot telling us so.
In light of all that, just change it from a module_param() to an
early_setup() call, and worry about adding it to /sys later on if
we decide to allow a dynamic setting of it.
Also change the variable to be tagged as read_mostly, since it
will only ever be fiddled with at most, once at boot.
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2012-12-21 05:19:22 +08:00
|
|
|
static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
|
2012-08-20 12:35:53 +08:00
|
|
|
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
|
|
|
|
|
2010-04-14 05:19:23 +08:00
|
|
|
/*
|
|
|
|
* Check the RCU kernel configuration parameters and print informative
|
|
|
|
* messages about anything out of the ordinary. If you like #ifdef, you
|
|
|
|
* will love this function.
|
|
|
|
*/
|
|
|
|
static void __init rcu_bootup_announce_oddness(void)
|
|
|
|
{
|
2015-01-22 08:58:06 +08:00
|
|
|
if (IS_ENABLED(CONFIG_RCU_TRACE))
|
|
|
|
pr_info("\tRCU debugfs-based tracing is enabled.\n");
|
2015-04-21 05:27:43 +08:00
|
|
|
if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) ||
|
|
|
|
(!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32))
|
2015-01-22 08:58:06 +08:00
|
|
|
pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
|
2015-04-21 05:27:43 +08:00
|
|
|
RCU_FANOUT);
|
2015-04-21 01:27:15 +08:00
|
|
|
if (rcu_fanout_exact)
|
2015-01-22 08:58:06 +08:00
|
|
|
pr_info("\tHierarchical RCU autobalancing is disabled.\n");
|
|
|
|
if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ))
|
|
|
|
pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n");
|
|
|
|
if (IS_ENABLED(CONFIG_PROVE_RCU))
|
|
|
|
pr_info("\tRCU lockdep checking is enabled.\n");
|
|
|
|
if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_RUNNABLE))
|
|
|
|
pr_info("\tRCU torture testing starts during boot.\n");
|
|
|
|
if (IS_ENABLED(CONFIG_RCU_CPU_STALL_INFO))
|
|
|
|
pr_info("\tAdditional per-CPU info printed with stalls.\n");
|
|
|
|
if (NUM_RCU_LVL_4 != 0)
|
|
|
|
pr_info("\tFour-level hierarchy is enabled.\n");
|
2015-01-22 12:58:57 +08:00
|
|
|
if (CONFIG_RCU_FANOUT_LEAF != 16)
|
|
|
|
pr_info("\tBuild-time adjustment of leaf fanout to %d.\n",
|
|
|
|
CONFIG_RCU_FANOUT_LEAF);
|
2012-04-24 06:52:53 +08:00
|
|
|
if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF)
|
2013-03-29 11:48:36 +08:00
|
|
|
pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
|
2012-05-09 12:00:28 +08:00
|
|
|
if (nr_cpu_ids != NR_CPUS)
|
2013-03-19 07:24:11 +08:00
|
|
|
pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
|
2015-01-22 08:58:06 +08:00
|
|
|
if (IS_ENABLED(CONFIG_RCU_BOOST))
|
|
|
|
pr_info("\tRCU kthread priority: %d.\n", kthread_prio);
|
2010-04-14 05:19:23 +08:00
|
|
|
}
|
|
|
|
|
2014-09-23 02:00:48 +08:00
|
|
|
#ifdef CONFIG_PREEMPT_RCU
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
|
2013-07-13 05:00:28 +08:00
|
|
|
RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
|
2014-03-24 13:32:09 +08:00
|
|
|
static struct rcu_state *rcu_state_p = &rcu_preempt_state;
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
|
2009-12-03 04:10:15 +08:00
|
|
|
static int rcu_preempted_readers_exp(struct rcu_node *rnp);
|
rcu: Don't migrate blocked tasks even if all corresponding CPUs offline
When the last CPU associated with a given leaf rcu_node structure
goes offline, something must be done about the tasks queued on that
rcu_node structure. Each of these tasks has been preempted on one of
the leaf rcu_node structure's CPUs while in an RCU read-side critical
section that it have not yet exited. Handling these tasks is the job of
rcu_preempt_offline_tasks(), which migrates them from the leaf rcu_node
structure to the root rcu_node structure.
Unfortunately, this migration has to be done one task at a time because
each tasks allegiance must be shifted from the original leaf rcu_node to
the root, so that future attempts to deal with these tasks will acquire
the root rcu_node structure's ->lock rather than that of the leaf.
Worse yet, this migration must be done with interrupts disabled, which
is not so good for realtime response, especially given that there is
no bound on the number of tasks on a given rcu_node structure's list.
(OK, OK, there is a bound, it is just that it is unreasonably large,
especially on 64-bit systems.) This was not considered a problem back
when rcu_preempt_offline_tasks() was first written because realtime
systems were assumed not to do CPU-hotplug operations while real-time
applications were running. This assumption has proved of dubious validity
given that people are starting to run multiple realtime applications
on a single SMP system and that it is common practice to offline then
online a CPU before starting its real-time application in order to clear
extraneous processing off of that CPU. So we now need CPU hotplug
operations to avoid undue latencies.
This commit therefore avoids migrating these tasks, instead letting
them be dequeued one by one from the original leaf rcu_node structure
by rcu_read_unlock_special(). This means that the clearing of bits
from the upper-level rcu_node structures must be deferred until the
last such task has been dequeued, because otherwise subsequent grace
periods won't wait on them. This commit has the beneficial side effect
of simplifying the CPU-hotplug code for TREE_PREEMPT_RCU, especially in
CONFIG_RCU_BOOST builds.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-11-01 03:56:16 +08:00
|
|
|
static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
|
bool wake);
|
2009-12-03 04:10:15 +08:00
|
|
|
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
/*
|
|
|
|
* Tell them what RCU they are running.
|
|
|
|
*/
|
2009-11-12 03:28:06 +08:00
|
|
|
static void __init rcu_bootup_announce(void)
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
{
|
2013-03-19 07:24:11 +08:00
|
|
|
pr_info("Preemptible hierarchical RCU implementation.\n");
|
2010-04-14 05:19:23 +08:00
|
|
|
rcu_bootup_announce_oddness();
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2011-03-03 05:15:15 +08:00
|
|
|
* Record a preemptible-RCU quiescent state for the specified CPU. Note
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
* that this just means that the task currently running on the CPU is
|
|
|
|
* not in a quiescent state. There might be any number of tasks blocked
|
|
|
|
* while in an RCU read-side critical section.
|
rcu: refactor RCU's context-switch handling
The addition of preemptible RCU to treercu resulted in a bit of
confusion and inefficiency surrounding the handling of context switches
for RCU-sched and for RCU-preempt. For RCU-sched, a context switch
is a quiescent state, pure and simple, just like it always has been.
For RCU-preempt, a context switch is in no way a quiescent state, but
special handling is required when a task blocks in an RCU read-side
critical section.
However, the callout from the scheduler and the outer loop in ksoftirqd
still calls something named rcu_sched_qs(), whose name is no longer
accurate. Furthermore, when rcu_check_callbacks() notes an RCU-sched
quiescent state, it ends up unnecessarily (though harmlessly, aside
from the performance hit) enqueuing the current task if it happens to
be running in an RCU-preempt read-side critical section. This not only
increases the maximum latency of scheduler_tick(), it also needlessly
increases the overhead of the next outermost rcu_read_unlock() invocation.
This patch addresses this situation by separating the notion of RCU's
context-switch handling from that of RCU-sched's quiescent states.
The context-switch handling is covered by rcu_note_context_switch() in
general and by rcu_preempt_note_context_switch() for preemptible RCU.
This permits rcu_sched_qs() to handle quiescent states and only quiescent
states. It also reduces the maximum latency of scheduler_tick(), though
probably by much less than a microsecond. Finally, it means that tasks
within preemptible-RCU read-side critical sections avoid incurring the
overhead of queuing unless there really is a context switch.
Suggested-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Acked-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <peterz@infradead.org>
2010-04-02 08:37:01 +08:00
|
|
|
*
|
2014-08-15 07:01:53 +08:00
|
|
|
* As with the other rcu_*_qs() functions, callers to this function
|
|
|
|
* must disable preemption.
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
*/
|
2014-08-15 07:38:46 +08:00
|
|
|
static void rcu_preempt_qs(void)
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
{
|
2014-08-15 07:38:46 +08:00
|
|
|
if (!__this_cpu_read(rcu_preempt_data.passed_quiesce)) {
|
|
|
|
trace_rcu_grace_period(TPS("rcu_preempt"),
|
|
|
|
__this_cpu_read(rcu_preempt_data.gpnum),
|
|
|
|
TPS("cpuqs"));
|
|
|
|
__this_cpu_write(rcu_preempt_data.passed_quiesce, 1);
|
|
|
|
barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */
|
|
|
|
current->rcu_read_unlock_special.b.need_qs = false;
|
|
|
|
}
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2009-09-14 00:15:10 +08:00
|
|
|
* We have entered the scheduler, and the current task might soon be
|
|
|
|
* context-switched away from. If this task is in an RCU read-side
|
|
|
|
* critical section, we will no longer be able to rely on the CPU to
|
2010-11-30 13:56:39 +08:00
|
|
|
* record that fact, so we enqueue the task on the blkd_tasks list.
|
|
|
|
* The task will dequeue itself when it exits the outermost enclosing
|
|
|
|
* RCU read-side critical section. Therefore, the current grace period
|
|
|
|
* cannot be permitted to complete until the blkd_tasks list entries
|
|
|
|
* predating the current grace period drain, in other words, until
|
|
|
|
* rnp->gp_tasks becomes NULL.
|
2009-09-14 00:15:10 +08:00
|
|
|
*
|
|
|
|
* Caller must disable preemption.
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
*/
|
2014-10-22 03:50:04 +08:00
|
|
|
static void rcu_preempt_note_context_switch(void)
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
{
|
|
|
|
struct task_struct *t = current;
|
2009-09-14 00:15:10 +08:00
|
|
|
unsigned long flags;
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
struct rcu_data *rdp;
|
|
|
|
struct rcu_node *rnp;
|
|
|
|
|
rcu: protect __rcu_read_unlock() against scheduler-using irq handlers
The addition of RCU read-side critical sections within runqueue and
priority-inheritance lock critical sections introduced some deadlock
cycles, for example, involving interrupts from __rcu_read_unlock()
where the interrupt handlers call wake_up(). This situation can cause
the instance of __rcu_read_unlock() invoked from interrupt to do some
of the processing that would otherwise have been carried out by the
task-level instance of __rcu_read_unlock(). When the interrupt-level
instance of __rcu_read_unlock() is called with a scheduler lock held
from interrupt-entry/exit situations where in_irq() returns false,
deadlock can result.
This commit resolves these deadlocks by using negative values of
the per-task ->rcu_read_lock_nesting counter to indicate that an
instance of __rcu_read_unlock() is in flight, which in turn prevents
instances from interrupt handlers from doing any special processing.
This patch is inspired by Steven Rostedt's earlier patch that similarly
made __rcu_read_unlock() guard against interrupt-mediated recursion
(see https://lkml.org/lkml/2011/7/15/326), but this commit refines
Steven's approach to avoid the need for preemption disabling on the
__rcu_read_unlock() fastpath and to also avoid the need for manipulating
a separate per-CPU variable.
This patch avoids need for preempt_disable() by instead using negative
values of the per-task ->rcu_read_lock_nesting counter. Note that nested
rcu_read_lock()/rcu_read_unlock() pairs are still permitted, but they will
never see ->rcu_read_lock_nesting go to zero, and will therefore never
invoke rcu_read_unlock_special(), thus preventing them from seeing the
RCU_READ_UNLOCK_BLOCKED bit should it be set in ->rcu_read_unlock_special.
This patch also adds a check for ->rcu_read_unlock_special being negative
in rcu_check_callbacks(), thus preventing the RCU_READ_UNLOCK_NEED_QS
bit from being set should a scheduling-clock interrupt occur while
__rcu_read_unlock() is exiting from an outermost RCU read-side critical
section.
Of course, __rcu_read_unlock() can be preempted during the time that
->rcu_read_lock_nesting is negative. This could result in the setting
of the RCU_READ_UNLOCK_BLOCKED bit after __rcu_read_unlock() checks it,
and would also result it this task being queued on the corresponding
rcu_node structure's blkd_tasks list. Therefore, some later RCU read-side
critical section would enter rcu_read_unlock_special() to clean up --
which could result in deadlock if that critical section happened to be in
the scheduler where the runqueue or priority-inheritance locks were held.
This situation is dealt with by making rcu_preempt_note_context_switch()
check for negative ->rcu_read_lock_nesting, thus refraining from
queuing the task (and from setting RCU_READ_UNLOCK_BLOCKED) if we are
already exiting from the outermost RCU read-side critical section (in
other words, we really are no longer actually in that RCU read-side
critical section). In addition, rcu_preempt_note_context_switch()
invokes rcu_read_unlock_special() to carry out the cleanup in this case,
which clears out the ->rcu_read_unlock_special bits and dequeues the task
(if necessary), in turn avoiding needless delay of the current RCU grace
period and needless RCU priority boosting.
It is still illegal to call rcu_read_unlock() while holding a scheduler
lock if the prior RCU read-side critical section has ever had either
preemption or irqs enabled. However, the common use case is legal,
namely where then entire RCU read-side critical section executes with
irqs disabled, for example, when the scheduler lock is held across the
entire lifetime of the RCU read-side critical section.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2011-07-18 12:14:35 +08:00
|
|
|
if (t->rcu_read_lock_nesting > 0 &&
|
2014-08-15 07:01:53 +08:00
|
|
|
!t->rcu_read_unlock_special.b.blocked) {
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
|
|
|
|
/* Possibly blocking in an RCU read-side critical section. */
|
2014-10-22 03:50:04 +08:00
|
|
|
rdp = this_cpu_ptr(rcu_preempt_state.rda);
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
rnp = rdp->mynode;
|
2010-02-23 09:05:02 +08:00
|
|
|
raw_spin_lock_irqsave(&rnp->lock, flags);
|
2013-12-12 05:59:10 +08:00
|
|
|
smp_mb__after_unlock_lock();
|
2014-08-15 07:01:53 +08:00
|
|
|
t->rcu_read_unlock_special.b.blocked = true;
|
2009-08-28 06:00:12 +08:00
|
|
|
t->rcu_blocked_node = rnp;
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If this CPU has already checked in, then this task
|
|
|
|
* will hold up the next grace period rather than the
|
|
|
|
* current grace period. Queue the task accordingly.
|
|
|
|
* If the task is queued for the current grace period
|
|
|
|
* (i.e., this CPU has not yet passed through a quiescent
|
|
|
|
* state for the current grace period), then as long
|
|
|
|
* as that task remains queued, the current grace period
|
2010-11-30 13:56:39 +08:00
|
|
|
* cannot end. Note that there is some uncertainty as
|
|
|
|
* to exactly when the current grace period started.
|
|
|
|
* We take a conservative approach, which can result
|
|
|
|
* in unnecessarily waiting on tasks that started very
|
|
|
|
* slightly after the current grace period began. C'est
|
|
|
|
* la vie!!!
|
2009-09-14 00:15:09 +08:00
|
|
|
*
|
|
|
|
* But first, note that the current CPU must still be
|
|
|
|
* on line!
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
*/
|
rcu: Process offlining and onlining only at grace-period start
Races between CPU hotplug and grace periods can be difficult to resolve,
so the ->onoff_mutex is used to exclude the two events. Unfortunately,
this means that it is impossible for an outgoing CPU to perform the
last bits of its offlining from its last pass through the idle loop,
because sleeplocks cannot be acquired in that context.
This commit avoids these problems by buffering online and offline events
in a new ->qsmaskinitnext field in the leaf rcu_node structures. When a
grace period starts, the events accumulated in this mask are applied to
the ->qsmaskinit field, and, if needed, up the rcu_node tree. The special
case of all CPUs corresponding to a given leaf rcu_node structure being
offline while there are still elements in that structure's ->blkd_tasks
list is handled using a new ->wait_blkd_tasks field. In this case,
propagating the offline bits up the tree is deferred until the beginning
of the grace period after all of the tasks have exited their RCU read-side
critical sections and removed themselves from the list, at which point
the ->wait_blkd_tasks flag is cleared. If one of that leaf rcu_node
structure's CPUs comes back online before the list empties, then the
->wait_blkd_tasks flag is simply cleared.
This of course means that RCU's notion of which CPUs are offline can be
out of date. This is OK because RCU need only wait on CPUs that were
online at the time that the grace period started. In addition, RCU's
force-quiescent-state actions will handle the case where a CPU goes
offline after the grace period starts.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2015-01-24 13:52:37 +08:00
|
|
|
WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0);
|
2009-09-19 00:50:18 +08:00
|
|
|
WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
|
2010-11-30 13:56:39 +08:00
|
|
|
if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
|
|
|
|
list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
|
|
|
|
rnp->gp_tasks = &t->rcu_node_entry;
|
2011-02-08 04:47:15 +08:00
|
|
|
#ifdef CONFIG_RCU_BOOST
|
|
|
|
if (rnp->boost_tasks != NULL)
|
|
|
|
rnp->boost_tasks = rnp->gp_tasks;
|
|
|
|
#endif /* #ifdef CONFIG_RCU_BOOST */
|
2010-11-30 13:56:39 +08:00
|
|
|
} else {
|
|
|
|
list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
|
|
|
|
if (rnp->qsmask & rdp->grpmask)
|
|
|
|
rnp->gp_tasks = &t->rcu_node_entry;
|
|
|
|
}
|
rcu: Add grace-period, quiescent-state, and call_rcu trace events
Add trace events to record grace-period start and end, quiescent states,
CPUs noticing grace-period start and end, grace-period initialization,
call_rcu() invocation, tasks blocking in RCU read-side critical sections,
tasks exiting those same critical sections, force_quiescent_state()
detection of dyntick-idle and offline CPUs, CPUs entering and leaving
dyntick-idle mode (except from NMIs), CPUs coming online and going
offline, and CPUs being kicked for staying in dyntick-idle mode for too
long (as in many weeks, even on 32-bit systems).
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
rcu: Add the rcu flavor to callback trace events
The earlier trace events for registering RCU callbacks and for invoking
them did not include the RCU flavor (rcu_bh, rcu_preempt, or rcu_sched).
This commit adds the RCU flavor to those trace events.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2011-06-25 21:36:56 +08:00
|
|
|
trace_rcu_preempt_task(rdp->rsp->name,
|
|
|
|
t->pid,
|
|
|
|
(rnp->qsmask & rdp->grpmask)
|
|
|
|
? rnp->gpnum
|
|
|
|
: rnp->gpnum + 1);
|
2010-02-23 09:05:02 +08:00
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
rcu: protect __rcu_read_unlock() against scheduler-using irq handlers
The addition of RCU read-side critical sections within runqueue and
priority-inheritance lock critical sections introduced some deadlock
cycles, for example, involving interrupts from __rcu_read_unlock()
where the interrupt handlers call wake_up(). This situation can cause
the instance of __rcu_read_unlock() invoked from interrupt to do some
of the processing that would otherwise have been carried out by the
task-level instance of __rcu_read_unlock(). When the interrupt-level
instance of __rcu_read_unlock() is called with a scheduler lock held
from interrupt-entry/exit situations where in_irq() returns false,
deadlock can result.
This commit resolves these deadlocks by using negative values of
the per-task ->rcu_read_lock_nesting counter to indicate that an
instance of __rcu_read_unlock() is in flight, which in turn prevents
instances from interrupt handlers from doing any special processing.
This patch is inspired by Steven Rostedt's earlier patch that similarly
made __rcu_read_unlock() guard against interrupt-mediated recursion
(see https://lkml.org/lkml/2011/7/15/326), but this commit refines
Steven's approach to avoid the need for preemption disabling on the
__rcu_read_unlock() fastpath and to also avoid the need for manipulating
a separate per-CPU variable.
This patch avoids need for preempt_disable() by instead using negative
values of the per-task ->rcu_read_lock_nesting counter. Note that nested
rcu_read_lock()/rcu_read_unlock() pairs are still permitted, but they will
never see ->rcu_read_lock_nesting go to zero, and will therefore never
invoke rcu_read_unlock_special(), thus preventing them from seeing the
RCU_READ_UNLOCK_BLOCKED bit should it be set in ->rcu_read_unlock_special.
This patch also adds a check for ->rcu_read_unlock_special being negative
in rcu_check_callbacks(), thus preventing the RCU_READ_UNLOCK_NEED_QS
bit from being set should a scheduling-clock interrupt occur while
__rcu_read_unlock() is exiting from an outermost RCU read-side critical
section.
Of course, __rcu_read_unlock() can be preempted during the time that
->rcu_read_lock_nesting is negative. This could result in the setting
of the RCU_READ_UNLOCK_BLOCKED bit after __rcu_read_unlock() checks it,
and would also result it this task being queued on the corresponding
rcu_node structure's blkd_tasks list. Therefore, some later RCU read-side
critical section would enter rcu_read_unlock_special() to clean up --
which could result in deadlock if that critical section happened to be in
the scheduler where the runqueue or priority-inheritance locks were held.
This situation is dealt with by making rcu_preempt_note_context_switch()
check for negative ->rcu_read_lock_nesting, thus refraining from
queuing the task (and from setting RCU_READ_UNLOCK_BLOCKED) if we are
already exiting from the outermost RCU read-side critical section (in
other words, we really are no longer actually in that RCU read-side
critical section). In addition, rcu_preempt_note_context_switch()
invokes rcu_read_unlock_special() to carry out the cleanup in this case,
which clears out the ->rcu_read_unlock_special bits and dequeues the task
(if necessary), in turn avoiding needless delay of the current RCU grace
period and needless RCU priority boosting.
It is still illegal to call rcu_read_unlock() while holding a scheduler
lock if the prior RCU read-side critical section has ever had either
preemption or irqs enabled. However, the common use case is legal,
namely where then entire RCU read-side critical section executes with
irqs disabled, for example, when the scheduler lock is held across the
entire lifetime of the RCU read-side critical section.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2011-07-18 12:14:35 +08:00
|
|
|
} else if (t->rcu_read_lock_nesting < 0 &&
|
2014-08-15 07:01:53 +08:00
|
|
|
t->rcu_read_unlock_special.s) {
|
rcu: protect __rcu_read_unlock() against scheduler-using irq handlers
The addition of RCU read-side critical sections within runqueue and
priority-inheritance lock critical sections introduced some deadlock
cycles, for example, involving interrupts from __rcu_read_unlock()
where the interrupt handlers call wake_up(). This situation can cause
the instance of __rcu_read_unlock() invoked from interrupt to do some
of the processing that would otherwise have been carried out by the
task-level instance of __rcu_read_unlock(). When the interrupt-level
instance of __rcu_read_unlock() is called with a scheduler lock held
from interrupt-entry/exit situations where in_irq() returns false,
deadlock can result.
This commit resolves these deadlocks by using negative values of
the per-task ->rcu_read_lock_nesting counter to indicate that an
instance of __rcu_read_unlock() is in flight, which in turn prevents
instances from interrupt handlers from doing any special processing.
This patch is inspired by Steven Rostedt's earlier patch that similarly
made __rcu_read_unlock() guard against interrupt-mediated recursion
(see https://lkml.org/lkml/2011/7/15/326), but this commit refines
Steven's approach to avoid the need for preemption disabling on the
__rcu_read_unlock() fastpath and to also avoid the need for manipulating
a separate per-CPU variable.
This patch avoids need for preempt_disable() by instead using negative
values of the per-task ->rcu_read_lock_nesting counter. Note that nested
rcu_read_lock()/rcu_read_unlock() pairs are still permitted, but they will
never see ->rcu_read_lock_nesting go to zero, and will therefore never
invoke rcu_read_unlock_special(), thus preventing them from seeing the
RCU_READ_UNLOCK_BLOCKED bit should it be set in ->rcu_read_unlock_special.
This patch also adds a check for ->rcu_read_unlock_special being negative
in rcu_check_callbacks(), thus preventing the RCU_READ_UNLOCK_NEED_QS
bit from being set should a scheduling-clock interrupt occur while
__rcu_read_unlock() is exiting from an outermost RCU read-side critical
section.
Of course, __rcu_read_unlock() can be preempted during the time that
->rcu_read_lock_nesting is negative. This could result in the setting
of the RCU_READ_UNLOCK_BLOCKED bit after __rcu_read_unlock() checks it,
and would also result it this task being queued on the corresponding
rcu_node structure's blkd_tasks list. Therefore, some later RCU read-side
critical section would enter rcu_read_unlock_special() to clean up --
which could result in deadlock if that critical section happened to be in
the scheduler where the runqueue or priority-inheritance locks were held.
This situation is dealt with by making rcu_preempt_note_context_switch()
check for negative ->rcu_read_lock_nesting, thus refraining from
queuing the task (and from setting RCU_READ_UNLOCK_BLOCKED) if we are
already exiting from the outermost RCU read-side critical section (in
other words, we really are no longer actually in that RCU read-side
critical section). In addition, rcu_preempt_note_context_switch()
invokes rcu_read_unlock_special() to carry out the cleanup in this case,
which clears out the ->rcu_read_unlock_special bits and dequeues the task
(if necessary), in turn avoiding needless delay of the current RCU grace
period and needless RCU priority boosting.
It is still illegal to call rcu_read_unlock() while holding a scheduler
lock if the prior RCU read-side critical section has ever had either
preemption or irqs enabled. However, the common use case is legal,
namely where then entire RCU read-side critical section executes with
irqs disabled, for example, when the scheduler lock is held across the
entire lifetime of the RCU read-side critical section.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2011-07-18 12:14:35 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Complete exit from RCU read-side critical section on
|
|
|
|
* behalf of preempted instance of __rcu_read_unlock().
|
|
|
|
*/
|
|
|
|
rcu_read_unlock_special(t);
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Either we were not in an RCU read-side critical section to
|
|
|
|
* begin with, or we have now recorded that critical section
|
|
|
|
* globally. Either way, we can now note a quiescent state
|
|
|
|
* for this CPU. Again, if we were in an RCU read-side critical
|
|
|
|
* section, and if that critical section was blocking the current
|
|
|
|
* grace period, then the fact that the task has been enqueued
|
|
|
|
* means that we continue to block the current grace period.
|
|
|
|
*/
|
2014-08-15 07:38:46 +08:00
|
|
|
rcu_preempt_qs();
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
}
|
|
|
|
|
2009-09-24 00:50:41 +08:00
|
|
|
/*
|
|
|
|
* Check for preempted RCU readers blocking the current grace period
|
|
|
|
* for the specified rcu_node structure. If the caller needs a reliable
|
|
|
|
* answer, it must hold the rcu_node's ->lock.
|
|
|
|
*/
|
2011-02-08 04:47:15 +08:00
|
|
|
static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
|
2009-09-24 00:50:41 +08:00
|
|
|
{
|
2010-11-30 13:56:39 +08:00
|
|
|
return rnp->gp_tasks != NULL;
|
2009-09-24 00:50:41 +08:00
|
|
|
}
|
|
|
|
|
2010-11-30 13:56:39 +08:00
|
|
|
/*
|
|
|
|
* Advance a ->blkd_tasks-list pointer to the next entry, instead
|
|
|
|
* returning NULL if at the end of the list.
|
|
|
|
*/
|
|
|
|
static struct list_head *rcu_next_node_entry(struct task_struct *t,
|
|
|
|
struct rcu_node *rnp)
|
|
|
|
{
|
|
|
|
struct list_head *np;
|
|
|
|
|
|
|
|
np = t->rcu_node_entry.next;
|
|
|
|
if (np == &rnp->blkd_tasks)
|
|
|
|
np = NULL;
|
|
|
|
return np;
|
|
|
|
}
|
|
|
|
|
2014-11-01 02:22:37 +08:00
|
|
|
/*
|
|
|
|
* Return true if the specified rcu_node structure has tasks that were
|
|
|
|
* preempted within an RCU read-side critical section.
|
|
|
|
*/
|
|
|
|
static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
|
|
|
|
{
|
|
|
|
return !list_empty(&rnp->blkd_tasks);
|
|
|
|
}
|
|
|
|
|
rcu: Fix grace-period-stall bug on large systems with CPU hotplug
When the last CPU of a given leaf rcu_node structure goes
offline, all of the tasks queued on that leaf rcu_node structure
(due to having blocked in their current RCU read-side critical
sections) are requeued onto the root rcu_node structure. This
requeuing is carried out by rcu_preempt_offline_tasks().
However, it is possible that these queued tasks are the only
thing preventing the leaf rcu_node structure from reporting a
quiescent state up the rcu_node hierarchy. Unfortunately, the
old code would fail to do this reporting, resulting in a
grace-period stall given the following sequence of events:
1. Kernel built for more than 32 CPUs on 32-bit systems or for more
than 64 CPUs on 64-bit systems, so that there is more than one
rcu_node structure. (Or CONFIG_RCU_FANOUT is artificially set
to a number smaller than CONFIG_NR_CPUS.)
2. The kernel is built with CONFIG_TREE_PREEMPT_RCU.
3. A task running on a CPU associated with a given leaf rcu_node
structure blocks while in an RCU read-side critical section
-and- that CPU has not yet passed through a quiescent state
for the current RCU grace period. This will cause the task
to be queued on the leaf rcu_node's blocked_tasks[] array, in
particular, on the element of this array corresponding to the
current grace period.
4. Each of the remaining CPUs corresponding to this same leaf rcu_node
structure pass through a quiescent state. However, the task is
still in its RCU read-side critical section, so these quiescent
states cannot be reported further up the rcu_node hierarchy.
Nevertheless, all bits in the leaf rcu_node structure's ->qsmask
field are now zero.
5. Each of the remaining CPUs go offline. (The events in step
#4 and #5 can happen in any order as long as each CPU passes
through a quiescent state before going offline.)
6. When the last CPU goes offline, __rcu_offline_cpu() will invoke
rcu_preempt_offline_tasks(), which will move the task to the
root rcu_node structure, but without reporting a quiescent state
up the rcu_node hierarchy (and this failure to report a quiescent
state is the bug).
But because this leaf rcu_node structure's ->qsmask field is
already zero and its ->block_tasks[] entries are all empty,
force_quiescent_state() will skip this rcu_node structure.
Therefore, grace periods are now hung.
This patch abstracts some code out of rcu_read_unlock_special(),
calling the result task_quiet() by analogy with cpu_quiet(), and
invokes task_quiet() from both rcu_read_lock_special() and
__rcu_offline_cpu(). Invoking task_quiet() from
__rcu_offline_cpu() reports the quiescent state up the rcu_node
hierarchy, fixing the bug. This ends up requiring a separate
lock_class_key per level of the rcu_node hierarchy, which this
patch also provides.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <12589088301770-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-11-23 00:53:48 +08:00
|
|
|
/*
|
|
|
|
* Handle special cases during rcu_read_unlock(), such as needing to
|
|
|
|
* notify RCU core processing or task having blocked during the RCU
|
|
|
|
* read-side critical section.
|
|
|
|
*/
|
2012-05-22 02:58:36 +08:00
|
|
|
void rcu_read_unlock_special(struct task_struct *t)
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
{
|
2014-11-01 03:05:04 +08:00
|
|
|
bool empty_exp;
|
|
|
|
bool empty_norm;
|
|
|
|
bool empty_exp_now;
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
unsigned long flags;
|
2010-11-30 13:56:39 +08:00
|
|
|
struct list_head *np;
|
2011-08-04 22:55:34 +08:00
|
|
|
#ifdef CONFIG_RCU_BOOST
|
2014-06-13 04:30:25 +08:00
|
|
|
bool drop_boost_mutex = false;
|
2011-08-04 22:55:34 +08:00
|
|
|
#endif /* #ifdef CONFIG_RCU_BOOST */
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
struct rcu_node *rnp;
|
2014-08-15 07:01:53 +08:00
|
|
|
union rcu_special special;
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
|
|
|
|
/* NMI handlers cannot block and cannot safely manipulate state. */
|
|
|
|
if (in_nmi())
|
|
|
|
return;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If RCU core is waiting for this CPU to exit critical section,
|
2014-08-15 07:01:53 +08:00
|
|
|
* let it know that we have done so. Because irqs are disabled,
|
|
|
|
* t->rcu_read_unlock_special cannot change.
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
*/
|
|
|
|
special = t->rcu_read_unlock_special;
|
2014-08-15 07:01:53 +08:00
|
|
|
if (special.b.need_qs) {
|
2014-08-15 07:38:46 +08:00
|
|
|
rcu_preempt_qs();
|
rcu: Clear need_qs flag to prevent splat
If the scheduling-clock interrupt sets the current tasks need_qs flag,
but if the current CPU passes through a quiescent state in the meantime,
then rcu_preempt_qs() will fail to clear the need_qs flag, which can fool
RCU into thinking that additional rcu_read_unlock_special() processing
is needed. This commit therefore clears the need_qs flag before checking
for additional processing.
For this problem to occur, we need rcu_preempt_data.passed_quiesce equal
to true and current->rcu_read_unlock_special.b.need_qs also equal to true.
This condition can occur as follows:
1. CPU 0 is aware of the current preemptible RCU grace period,
but has not yet passed through a quiescent state. Among other
things, this means that rcu_preempt_data.passed_quiesce is false.
2. Task A running on CPU 0 enters a preemptible RCU read-side
critical section.
3. CPU 0 takes a scheduling-clock interrupt, which notices the
RCU read-side critical section and the need for a quiescent state,
and thus sets current->rcu_read_unlock_special.b.need_qs to true.
4. Task A is preempted, enters the scheduler, eventually invoking
rcu_preempt_note_context_switch() which in turn invokes
rcu_preempt_qs().
Because rcu_preempt_data.passed_quiesce is false,
control enters the body of the "if" statement, which sets
rcu_preempt_data.passed_quiesce to true.
5. At this point, CPU 0 takes an interrupt. The interrupt
handler contains an RCU read-side critical section, and
the rcu_read_unlock() notes that current->rcu_read_unlock_special
is nonzero, and thus invokes rcu_read_unlock_special().
6. Once in rcu_read_unlock_special(), the fact that
current->rcu_read_unlock_special.b.need_qs is true becomes
apparent, so rcu_read_unlock_special() invokes rcu_preempt_qs().
Recursively, given that we interrupted out of that same
function in the preceding step.
7. Because rcu_preempt_data.passed_quiesce is now true,
rcu_preempt_qs() does nothing, and simply returns.
8. Upon return to rcu_read_unlock_special(), it is noted that
current->rcu_read_unlock_special is still nonzero (because
the interrupted rcu_preempt_qs() had not yet gotten around
to clearing current->rcu_read_unlock_special.b.need_qs).
9. Execution proceeds to the WARN_ON_ONCE(), which notes that
we are in an interrupt handler and thus duly splats.
The solution, as noted above, is to make rcu_read_unlock_special()
clear out current->rcu_read_unlock_special.b.need_qs after calling
rcu_preempt_qs(). The interrupted rcu_preempt_qs() will clear it again,
but this is harmless. The worst that happens is that we clobber another
attempt to set this field, but this is not a problem because we just
got done reporting a quiescent state.
Reported-by: Sasha Levin <sasha.levin@oracle.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
[ paulmck: Fix embarrassing build bug noted by Sasha Levin. ]
Tested-by: Sasha Levin <sasha.levin@oracle.com>
2015-01-23 14:47:14 +08:00
|
|
|
t->rcu_read_unlock_special.b.need_qs = false;
|
2014-08-15 07:01:53 +08:00
|
|
|
if (!t->rcu_read_unlock_special.s) {
|
2013-10-30 19:13:22 +08:00
|
|
|
local_irq_restore(flags);
|
|
|
|
return;
|
|
|
|
}
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
}
|
|
|
|
|
2013-10-30 19:13:22 +08:00
|
|
|
/* Hardware IRQ handlers cannot block, complain if they get here. */
|
2015-01-22 07:26:03 +08:00
|
|
|
if (in_irq() || in_serving_softirq()) {
|
|
|
|
lockdep_rcu_suspicious(__FILE__, __LINE__,
|
|
|
|
"rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n");
|
|
|
|
pr_alert("->rcu_read_unlock_special: %#x (b: %d, nq: %d)\n",
|
|
|
|
t->rcu_read_unlock_special.s,
|
|
|
|
t->rcu_read_unlock_special.b.blocked,
|
|
|
|
t->rcu_read_unlock_special.b.need_qs);
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
local_irq_restore(flags);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clean up if blocked during RCU read-side critical section. */
|
2014-08-15 07:01:53 +08:00
|
|
|
if (special.b.blocked) {
|
|
|
|
t->rcu_read_unlock_special.b.blocked = false;
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
|
2009-08-28 05:58:16 +08:00
|
|
|
/*
|
|
|
|
* Remove this task from the list it blocked on. The
|
|
|
|
* task can migrate while we acquire the lock, but at
|
|
|
|
* most one time. So at most two passes through loop.
|
|
|
|
*/
|
|
|
|
for (;;) {
|
2009-08-28 06:00:12 +08:00
|
|
|
rnp = t->rcu_blocked_node;
|
2010-02-23 09:05:02 +08:00
|
|
|
raw_spin_lock(&rnp->lock); /* irqs already disabled. */
|
2013-12-12 05:59:10 +08:00
|
|
|
smp_mb__after_unlock_lock();
|
2009-08-28 06:00:12 +08:00
|
|
|
if (rnp == t->rcu_blocked_node)
|
2009-08-28 05:58:16 +08:00
|
|
|
break;
|
2010-02-23 09:05:02 +08:00
|
|
|
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
2009-08-28 05:58:16 +08:00
|
|
|
}
|
2014-10-31 12:08:53 +08:00
|
|
|
empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
|
2009-12-03 04:10:15 +08:00
|
|
|
empty_exp = !rcu_preempted_readers_exp(rnp);
|
|
|
|
smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
|
2010-11-30 13:56:39 +08:00
|
|
|
np = rcu_next_node_entry(t, rnp);
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
list_del_init(&t->rcu_node_entry);
|
2011-08-04 22:55:34 +08:00
|
|
|
t->rcu_blocked_node = NULL;
|
2013-07-13 05:18:47 +08:00
|
|
|
trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
|
rcu: Add grace-period, quiescent-state, and call_rcu trace events
Add trace events to record grace-period start and end, quiescent states,
CPUs noticing grace-period start and end, grace-period initialization,
call_rcu() invocation, tasks blocking in RCU read-side critical sections,
tasks exiting those same critical sections, force_quiescent_state()
detection of dyntick-idle and offline CPUs, CPUs entering and leaving
dyntick-idle mode (except from NMIs), CPUs coming online and going
offline, and CPUs being kicked for staying in dyntick-idle mode for too
long (as in many weeks, even on 32-bit systems).
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
rcu: Add the rcu flavor to callback trace events
The earlier trace events for registering RCU callbacks and for invoking
them did not include the RCU flavor (rcu_bh, rcu_preempt, or rcu_sched).
This commit adds the RCU flavor to those trace events.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2011-06-25 21:36:56 +08:00
|
|
|
rnp->gpnum, t->pid);
|
2010-11-30 13:56:39 +08:00
|
|
|
if (&t->rcu_node_entry == rnp->gp_tasks)
|
|
|
|
rnp->gp_tasks = np;
|
|
|
|
if (&t->rcu_node_entry == rnp->exp_tasks)
|
|
|
|
rnp->exp_tasks = np;
|
2011-02-08 04:47:15 +08:00
|
|
|
#ifdef CONFIG_RCU_BOOST
|
|
|
|
if (&t->rcu_node_entry == rnp->boost_tasks)
|
|
|
|
rnp->boost_tasks = np;
|
2014-06-13 04:30:25 +08:00
|
|
|
/* Snapshot ->boost_mtx ownership with rcu_node lock held. */
|
|
|
|
drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
|
2011-02-08 04:47:15 +08:00
|
|
|
#endif /* #ifdef CONFIG_RCU_BOOST */
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If this was the last task on the current list, and if
|
|
|
|
* we aren't waiting on any CPUs, report the quiescent state.
|
2011-09-22 05:41:37 +08:00
|
|
|
* Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
|
|
|
|
* so we must take a snapshot of the expedited state.
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
*/
|
2011-09-22 05:41:37 +08:00
|
|
|
empty_exp_now = !rcu_preempted_readers_exp(rnp);
|
2014-10-31 12:08:53 +08:00
|
|
|
if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
|
2013-07-13 05:18:47 +08:00
|
|
|
trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
|
rcu: Add grace-period, quiescent-state, and call_rcu trace events
Add trace events to record grace-period start and end, quiescent states,
CPUs noticing grace-period start and end, grace-period initialization,
call_rcu() invocation, tasks blocking in RCU read-side critical sections,
tasks exiting those same critical sections, force_quiescent_state()
detection of dyntick-idle and offline CPUs, CPUs entering and leaving
dyntick-idle mode (except from NMIs), CPUs coming online and going
offline, and CPUs being kicked for staying in dyntick-idle mode for too
long (as in many weeks, even on 32-bit systems).
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
rcu: Add the rcu flavor to callback trace events
The earlier trace events for registering RCU callbacks and for invoking
them did not include the RCU flavor (rcu_bh, rcu_preempt, or rcu_sched).
This commit adds the RCU flavor to those trace events.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2011-06-25 21:36:56 +08:00
|
|
|
rnp->gpnum,
|
|
|
|
0, rnp->qsmask,
|
|
|
|
rnp->level,
|
|
|
|
rnp->grplo,
|
|
|
|
rnp->grphi,
|
|
|
|
!!rnp->gp_tasks);
|
2015-02-24 00:59:29 +08:00
|
|
|
rcu_report_unblock_qs_rnp(&rcu_preempt_state,
|
|
|
|
rnp, flags);
|
2012-06-28 23:08:25 +08:00
|
|
|
} else {
|
rcu: Add grace-period, quiescent-state, and call_rcu trace events
Add trace events to record grace-period start and end, quiescent states,
CPUs noticing grace-period start and end, grace-period initialization,
call_rcu() invocation, tasks blocking in RCU read-side critical sections,
tasks exiting those same critical sections, force_quiescent_state()
detection of dyntick-idle and offline CPUs, CPUs entering and leaving
dyntick-idle mode (except from NMIs), CPUs coming online and going
offline, and CPUs being kicked for staying in dyntick-idle mode for too
long (as in many weeks, even on 32-bit systems).
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
rcu: Add the rcu flavor to callback trace events
The earlier trace events for registering RCU callbacks and for invoking
them did not include the RCU flavor (rcu_bh, rcu_preempt, or rcu_sched).
This commit adds the RCU flavor to those trace events.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2011-06-25 21:36:56 +08:00
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
2012-06-28 23:08:25 +08:00
|
|
|
}
|
2009-12-03 04:10:15 +08:00
|
|
|
|
2011-02-08 04:47:15 +08:00
|
|
|
#ifdef CONFIG_RCU_BOOST
|
|
|
|
/* Unboost if we were boosted. */
|
2014-11-18 16:30:01 +08:00
|
|
|
if (drop_boost_mutex)
|
2014-06-13 04:30:25 +08:00
|
|
|
rt_mutex_unlock(&rnp->boost_mtx);
|
2011-02-08 04:47:15 +08:00
|
|
|
#endif /* #ifdef CONFIG_RCU_BOOST */
|
|
|
|
|
2009-12-03 04:10:15 +08:00
|
|
|
/*
|
|
|
|
* If this was the last task on the expedited lists,
|
|
|
|
* then we need to report up the rcu_node hierarchy.
|
|
|
|
*/
|
2011-09-22 05:41:37 +08:00
|
|
|
if (!empty_exp && empty_exp_now)
|
2011-10-22 22:12:34 +08:00
|
|
|
rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
|
rcu: Fix grace-period-stall bug on large systems with CPU hotplug
When the last CPU of a given leaf rcu_node structure goes
offline, all of the tasks queued on that leaf rcu_node structure
(due to having blocked in their current RCU read-side critical
sections) are requeued onto the root rcu_node structure. This
requeuing is carried out by rcu_preempt_offline_tasks().
However, it is possible that these queued tasks are the only
thing preventing the leaf rcu_node structure from reporting a
quiescent state up the rcu_node hierarchy. Unfortunately, the
old code would fail to do this reporting, resulting in a
grace-period stall given the following sequence of events:
1. Kernel built for more than 32 CPUs on 32-bit systems or for more
than 64 CPUs on 64-bit systems, so that there is more than one
rcu_node structure. (Or CONFIG_RCU_FANOUT is artificially set
to a number smaller than CONFIG_NR_CPUS.)
2. The kernel is built with CONFIG_TREE_PREEMPT_RCU.
3. A task running on a CPU associated with a given leaf rcu_node
structure blocks while in an RCU read-side critical section
-and- that CPU has not yet passed through a quiescent state
for the current RCU grace period. This will cause the task
to be queued on the leaf rcu_node's blocked_tasks[] array, in
particular, on the element of this array corresponding to the
current grace period.
4. Each of the remaining CPUs corresponding to this same leaf rcu_node
structure pass through a quiescent state. However, the task is
still in its RCU read-side critical section, so these quiescent
states cannot be reported further up the rcu_node hierarchy.
Nevertheless, all bits in the leaf rcu_node structure's ->qsmask
field are now zero.
5. Each of the remaining CPUs go offline. (The events in step
#4 and #5 can happen in any order as long as each CPU passes
through a quiescent state before going offline.)
6. When the last CPU goes offline, __rcu_offline_cpu() will invoke
rcu_preempt_offline_tasks(), which will move the task to the
root rcu_node structure, but without reporting a quiescent state
up the rcu_node hierarchy (and this failure to report a quiescent
state is the bug).
But because this leaf rcu_node structure's ->qsmask field is
already zero and its ->block_tasks[] entries are all empty,
force_quiescent_state() will skip this rcu_node structure.
Therefore, grace periods are now hung.
This patch abstracts some code out of rcu_read_unlock_special(),
calling the result task_quiet() by analogy with cpu_quiet(), and
invokes task_quiet() from both rcu_read_lock_special() and
__rcu_offline_cpu(). Invoking task_quiet() from
__rcu_offline_cpu() reports the quiescent state up the rcu_node
hierarchy, fixing the bug. This ends up requiring a separate
lock_class_key per level of the rcu_node hierarchy, which this
patch also provides.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <12589088301770-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-11-23 00:53:48 +08:00
|
|
|
} else {
|
|
|
|
local_irq_restore(flags);
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-02-23 09:05:05 +08:00
|
|
|
/*
|
|
|
|
* Dump detailed information for all tasks blocking the current RCU
|
|
|
|
* grace period on the specified rcu_node structure.
|
|
|
|
*/
|
|
|
|
static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
struct task_struct *t;
|
|
|
|
|
2010-11-30 13:56:39 +08:00
|
|
|
raw_spin_lock_irqsave(&rnp->lock, flags);
|
2012-08-11 07:00:11 +08:00
|
|
|
if (!rcu_preempt_blocked_readers_cgp(rnp)) {
|
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
|
return;
|
|
|
|
}
|
2010-11-30 13:56:39 +08:00
|
|
|
t = list_entry(rnp->gp_tasks,
|
|
|
|
struct task_struct, rcu_node_entry);
|
|
|
|
list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
|
|
|
|
sched_show_task(t);
|
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
2010-02-23 09:05:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Dump detailed information for all tasks blocking the current RCU
|
|
|
|
* grace period.
|
|
|
|
*/
|
|
|
|
static void rcu_print_detail_task_stall(struct rcu_state *rsp)
|
|
|
|
{
|
|
|
|
struct rcu_node *rnp = rcu_get_root(rsp);
|
|
|
|
|
|
|
|
rcu_print_detail_task_stall_rnp(rnp);
|
|
|
|
rcu_for_each_leaf_node(rsp, rnp)
|
|
|
|
rcu_print_detail_task_stall_rnp(rnp);
|
|
|
|
}
|
|
|
|
|
2012-01-17 05:29:10 +08:00
|
|
|
#ifdef CONFIG_RCU_CPU_STALL_INFO
|
|
|
|
|
|
|
|
static void rcu_print_task_stall_begin(struct rcu_node *rnp)
|
|
|
|
{
|
2013-03-19 07:24:11 +08:00
|
|
|
pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
|
2012-01-17 05:29:10 +08:00
|
|
|
rnp->level, rnp->grplo, rnp->grphi);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rcu_print_task_stall_end(void)
|
|
|
|
{
|
2013-03-19 07:24:11 +08:00
|
|
|
pr_cont("\n");
|
2012-01-17 05:29:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
|
|
|
|
|
|
|
|
static void rcu_print_task_stall_begin(struct rcu_node *rnp)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rcu_print_task_stall_end(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
|
|
|
|
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
/*
|
|
|
|
* Scan the current list of tasks blocked within RCU read-side critical
|
|
|
|
* sections, printing out the tid of each.
|
|
|
|
*/
|
2011-08-14 04:31:47 +08:00
|
|
|
static int rcu_print_task_stall(struct rcu_node *rnp)
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
{
|
|
|
|
struct task_struct *t;
|
2011-08-14 04:31:47 +08:00
|
|
|
int ndetected = 0;
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
|
2011-02-08 04:47:15 +08:00
|
|
|
if (!rcu_preempt_blocked_readers_cgp(rnp))
|
2011-08-14 04:31:47 +08:00
|
|
|
return 0;
|
2012-01-17 05:29:10 +08:00
|
|
|
rcu_print_task_stall_begin(rnp);
|
2010-11-30 13:56:39 +08:00
|
|
|
t = list_entry(rnp->gp_tasks,
|
|
|
|
struct task_struct, rcu_node_entry);
|
2011-08-14 04:31:47 +08:00
|
|
|
list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
|
2013-03-19 07:24:11 +08:00
|
|
|
pr_cont(" P%d", t->pid);
|
2011-08-14 04:31:47 +08:00
|
|
|
ndetected++;
|
|
|
|
}
|
2012-01-17 05:29:10 +08:00
|
|
|
rcu_print_task_stall_end();
|
2011-08-14 04:31:47 +08:00
|
|
|
return ndetected;
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
}
|
|
|
|
|
2009-09-14 00:15:09 +08:00
|
|
|
/*
|
|
|
|
* Check that the list of blocked tasks for the newly completed grace
|
|
|
|
* period is in fact empty. It is a serious bug to complete a grace
|
|
|
|
* period that still has RCU readers blocked! This function must be
|
|
|
|
* invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
|
|
|
|
* must be held by the caller.
|
2010-11-30 13:56:39 +08:00
|
|
|
*
|
|
|
|
* Also, if there are blocked tasks on the list, they automatically
|
|
|
|
* block the newly created grace period, so set up ->gp_tasks accordingly.
|
2009-09-14 00:15:09 +08:00
|
|
|
*/
|
|
|
|
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
|
|
|
|
{
|
2011-02-08 04:47:15 +08:00
|
|
|
WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
|
2014-11-01 05:09:23 +08:00
|
|
|
if (rcu_preempt_has_tasks(rnp))
|
2010-11-30 13:56:39 +08:00
|
|
|
rnp->gp_tasks = rnp->blkd_tasks.next;
|
2009-09-19 00:50:17 +08:00
|
|
|
WARN_ON_ONCE(rnp->qsmask);
|
2009-09-14 00:15:09 +08:00
|
|
|
}
|
|
|
|
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
/*
|
|
|
|
* Check for a quiescent state from the current CPU. When a task blocks,
|
|
|
|
* the task is recorded in the corresponding CPU's rcu_node structure,
|
|
|
|
* which is checked elsewhere.
|
|
|
|
*
|
|
|
|
* Caller must disable hard irqs.
|
|
|
|
*/
|
2014-10-21 23:12:00 +08:00
|
|
|
static void rcu_preempt_check_callbacks(void)
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
{
|
|
|
|
struct task_struct *t = current;
|
|
|
|
|
|
|
|
if (t->rcu_read_lock_nesting == 0) {
|
2014-08-15 07:38:46 +08:00
|
|
|
rcu_preempt_qs();
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
return;
|
|
|
|
}
|
rcu: protect __rcu_read_unlock() against scheduler-using irq handlers
The addition of RCU read-side critical sections within runqueue and
priority-inheritance lock critical sections introduced some deadlock
cycles, for example, involving interrupts from __rcu_read_unlock()
where the interrupt handlers call wake_up(). This situation can cause
the instance of __rcu_read_unlock() invoked from interrupt to do some
of the processing that would otherwise have been carried out by the
task-level instance of __rcu_read_unlock(). When the interrupt-level
instance of __rcu_read_unlock() is called with a scheduler lock held
from interrupt-entry/exit situations where in_irq() returns false,
deadlock can result.
This commit resolves these deadlocks by using negative values of
the per-task ->rcu_read_lock_nesting counter to indicate that an
instance of __rcu_read_unlock() is in flight, which in turn prevents
instances from interrupt handlers from doing any special processing.
This patch is inspired by Steven Rostedt's earlier patch that similarly
made __rcu_read_unlock() guard against interrupt-mediated recursion
(see https://lkml.org/lkml/2011/7/15/326), but this commit refines
Steven's approach to avoid the need for preemption disabling on the
__rcu_read_unlock() fastpath and to also avoid the need for manipulating
a separate per-CPU variable.
This patch avoids need for preempt_disable() by instead using negative
values of the per-task ->rcu_read_lock_nesting counter. Note that nested
rcu_read_lock()/rcu_read_unlock() pairs are still permitted, but they will
never see ->rcu_read_lock_nesting go to zero, and will therefore never
invoke rcu_read_unlock_special(), thus preventing them from seeing the
RCU_READ_UNLOCK_BLOCKED bit should it be set in ->rcu_read_unlock_special.
This patch also adds a check for ->rcu_read_unlock_special being negative
in rcu_check_callbacks(), thus preventing the RCU_READ_UNLOCK_NEED_QS
bit from being set should a scheduling-clock interrupt occur while
__rcu_read_unlock() is exiting from an outermost RCU read-side critical
section.
Of course, __rcu_read_unlock() can be preempted during the time that
->rcu_read_lock_nesting is negative. This could result in the setting
of the RCU_READ_UNLOCK_BLOCKED bit after __rcu_read_unlock() checks it,
and would also result it this task being queued on the corresponding
rcu_node structure's blkd_tasks list. Therefore, some later RCU read-side
critical section would enter rcu_read_unlock_special() to clean up --
which could result in deadlock if that critical section happened to be in
the scheduler where the runqueue or priority-inheritance locks were held.
This situation is dealt with by making rcu_preempt_note_context_switch()
check for negative ->rcu_read_lock_nesting, thus refraining from
queuing the task (and from setting RCU_READ_UNLOCK_BLOCKED) if we are
already exiting from the outermost RCU read-side critical section (in
other words, we really are no longer actually in that RCU read-side
critical section). In addition, rcu_preempt_note_context_switch()
invokes rcu_read_unlock_special() to carry out the cleanup in this case,
which clears out the ->rcu_read_unlock_special bits and dequeues the task
(if necessary), in turn avoiding needless delay of the current RCU grace
period and needless RCU priority boosting.
It is still illegal to call rcu_read_unlock() while holding a scheduler
lock if the prior RCU read-side critical section has ever had either
preemption or irqs enabled. However, the common use case is legal,
namely where then entire RCU read-side critical section executes with
irqs disabled, for example, when the scheduler lock is held across the
entire lifetime of the RCU read-side critical section.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2011-07-18 12:14:35 +08:00
|
|
|
if (t->rcu_read_lock_nesting > 0 &&
|
2014-10-21 23:12:00 +08:00
|
|
|
__this_cpu_read(rcu_preempt_data.qs_pending) &&
|
|
|
|
!__this_cpu_read(rcu_preempt_data.passed_quiesce))
|
2014-08-15 07:01:53 +08:00
|
|
|
t->rcu_read_unlock_special.b.need_qs = true;
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
}
|
|
|
|
|
2011-06-16 06:47:09 +08:00
|
|
|
#ifdef CONFIG_RCU_BOOST
|
|
|
|
|
rcu: Use softirq to address performance regression
Commit a26ac2455ffcf3(rcu: move TREE_RCU from softirq to kthread)
introduced performance regression. In an AIM7 test, this commit degraded
performance by about 40%.
The commit runs rcu callbacks in a kthread instead of softirq. We observed
high rate of context switch which is caused by this. Out test system has
64 CPUs and HZ is 1000, so we saw more than 64k context switch per second
which is caused by RCU's per-CPU kthread. A trace showed that most of
the time the RCU per-CPU kthread doesn't actually handle any callbacks,
but instead just does a very small amount of work handling grace periods.
This means that RCU's per-CPU kthreads are making the scheduler do quite
a bit of work in order to allow a very small amount of RCU-related
processing to be done.
Alex Shi's analysis determined that this slowdown is due to lock
contention within the scheduler. Unfortunately, as Peter Zijlstra points
out, the scheduler's real-time semantics require global action, which
means that this contention is inherent in real-time scheduling. (Yes,
perhaps someone will come up with a workaround -- otherwise, -rt is not
going to do well on large SMP systems -- but this patch will work around
this issue in the meantime. And "the meantime" might well be forever.)
This patch therefore re-introduces softirq processing to RCU, but only
for core RCU work. RCU callbacks are still executed in kthread context,
so that only a small amount of RCU work runs in softirq context in the
common case. This should minimize ksoftirqd execution, allowing us to
skip boosting of ksoftirqd for CONFIG_RCU_BOOST=y kernels.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Tested-by: "Alex,Shi" <alex.shi@intel.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2011-06-14 13:26:25 +08:00
|
|
|
static void rcu_preempt_do_callbacks(void)
|
|
|
|
{
|
rcu: Replace __get_cpu_var() uses
__get_cpu_var() is used for multiple purposes in the kernel source. One
of them is address calculation via the form &__get_cpu_var(x). This
calculates the address for the instance of the percpu variable of the
current processor based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
__get_cpu_var() always only does an address determination. However,
store and retrieve operations could use a segment prefix (or global
register on other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into
a percpu area and use optimized assembly code to read and write per
cpu variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations
that use the offset. Thereby address calcualtions are avoided and less
registers are used when code is generated.
At the end of the patchset all uses of __get_cpu_var have been removed
so the macro is removed too.
The patchset includes passes over all arches as well. Once these
operations are used throughout then specialized macros can be defined in
non -x86 arches as well in order to optimize per cpu access by f.e. using
a global register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, u);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(this_cpu_ptr(&x), y, sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
this_cpu_inc(y)
Signed-off-by: Christoph Lameter <cl@linux.com>
[ paulmck: Address conflicts. ]
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2013-09-01 04:34:10 +08:00
|
|
|
rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data));
|
rcu: Use softirq to address performance regression
Commit a26ac2455ffcf3(rcu: move TREE_RCU from softirq to kthread)
introduced performance regression. In an AIM7 test, this commit degraded
performance by about 40%.
The commit runs rcu callbacks in a kthread instead of softirq. We observed
high rate of context switch which is caused by this. Out test system has
64 CPUs and HZ is 1000, so we saw more than 64k context switch per second
which is caused by RCU's per-CPU kthread. A trace showed that most of
the time the RCU per-CPU kthread doesn't actually handle any callbacks,
but instead just does a very small amount of work handling grace periods.
This means that RCU's per-CPU kthreads are making the scheduler do quite
a bit of work in order to allow a very small amount of RCU-related
processing to be done.
Alex Shi's analysis determined that this slowdown is due to lock
contention within the scheduler. Unfortunately, as Peter Zijlstra points
out, the scheduler's real-time semantics require global action, which
means that this contention is inherent in real-time scheduling. (Yes,
perhaps someone will come up with a workaround -- otherwise, -rt is not
going to do well on large SMP systems -- but this patch will work around
this issue in the meantime. And "the meantime" might well be forever.)
This patch therefore re-introduces softirq processing to RCU, but only
for core RCU work. RCU callbacks are still executed in kthread context,
so that only a small amount of RCU work runs in softirq context in the
common case. This should minimize ksoftirqd execution, allowing us to
skip boosting of ksoftirqd for CONFIG_RCU_BOOST=y kernels.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Tested-by: "Alex,Shi" <alex.shi@intel.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2011-06-14 13:26:25 +08:00
|
|
|
}
|
|
|
|
|
2011-06-16 06:47:09 +08:00
|
|
|
#endif /* #ifdef CONFIG_RCU_BOOST */
|
|
|
|
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
/*
|
2011-03-03 05:15:15 +08:00
|
|
|
* Queue a preemptible-RCU callback for invocation after a grace period.
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
*/
|
|
|
|
void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
|
|
|
|
{
|
2012-08-20 12:35:53 +08:00
|
|
|
__call_rcu(head, func, &rcu_preempt_state, -1, 0);
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(call_rcu);
|
|
|
|
|
2009-11-23 00:53:50 +08:00
|
|
|
/**
|
|
|
|
* synchronize_rcu - wait until a grace period has elapsed.
|
|
|
|
*
|
|
|
|
* Control will return to the caller some time after a full grace
|
|
|
|
* period has elapsed, in other words after all currently executing RCU
|
2010-07-09 08:38:59 +08:00
|
|
|
* read-side critical sections have completed. Note, however, that
|
|
|
|
* upon return from synchronize_rcu(), the caller might well be executing
|
|
|
|
* concurrently with new RCU read-side critical sections that began while
|
|
|
|
* synchronize_rcu() was waiting. RCU read-side critical sections are
|
|
|
|
* delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
|
2012-10-24 04:47:01 +08:00
|
|
|
*
|
|
|
|
* See the description of synchronize_sched() for more detailed information
|
|
|
|
* on memory ordering guarantees.
|
2009-11-23 00:53:50 +08:00
|
|
|
*/
|
|
|
|
void synchronize_rcu(void)
|
|
|
|
{
|
2012-01-05 05:30:33 +08:00
|
|
|
rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
|
|
|
|
!lock_is_held(&rcu_lock_map) &&
|
|
|
|
!lock_is_held(&rcu_sched_lock_map),
|
|
|
|
"Illegal synchronize_rcu() in RCU read-side critical section");
|
2009-11-23 00:53:50 +08:00
|
|
|
if (!rcu_scheduler_active)
|
|
|
|
return;
|
2015-02-19 08:39:09 +08:00
|
|
|
if (rcu_gp_is_expedited())
|
2012-10-05 14:59:15 +08:00
|
|
|
synchronize_rcu_expedited();
|
|
|
|
else
|
|
|
|
wait_rcu_gp(call_rcu);
|
2009-11-23 00:53:50 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(synchronize_rcu);
|
|
|
|
|
2009-12-03 04:10:15 +08:00
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
|
2012-07-24 07:03:51 +08:00
|
|
|
static unsigned long sync_rcu_preempt_exp_count;
|
2009-12-03 04:10:15 +08:00
|
|
|
static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return non-zero if there are any tasks in RCU read-side critical
|
|
|
|
* sections blocking the current preemptible-RCU expedited grace period.
|
|
|
|
* If there is no preemptible-RCU expedited grace period currently in
|
|
|
|
* progress, returns zero unconditionally.
|
|
|
|
*/
|
|
|
|
static int rcu_preempted_readers_exp(struct rcu_node *rnp)
|
|
|
|
{
|
2010-11-30 13:56:39 +08:00
|
|
|
return rnp->exp_tasks != NULL;
|
2009-12-03 04:10:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* return non-zero if there is no RCU expedited grace period in progress
|
|
|
|
* for the specified rcu_node structure, in other words, if all CPUs and
|
|
|
|
* tasks covered by the specified rcu_node structure have done their bit
|
|
|
|
* for the current expedited grace period. Works only for preemptible
|
|
|
|
* RCU -- other RCU implementation use other means.
|
|
|
|
*
|
|
|
|
* Caller must hold sync_rcu_preempt_exp_mutex.
|
|
|
|
*/
|
|
|
|
static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
|
|
|
|
{
|
|
|
|
return !rcu_preempted_readers_exp(rnp) &&
|
2015-03-04 06:57:58 +08:00
|
|
|
READ_ONCE(rnp->expmask) == 0;
|
2009-12-03 04:10:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Report the exit from RCU read-side critical section for the last task
|
|
|
|
* that queued itself during or before the current expedited preemptible-RCU
|
|
|
|
* grace period. This event is reported either to the rcu_node structure on
|
|
|
|
* which the task was queued or to one of that rcu_node structure's ancestors,
|
|
|
|
* recursively up the tree. (Calm down, calm down, we do the recursion
|
|
|
|
* iteratively!)
|
|
|
|
*
|
|
|
|
* Caller must hold sync_rcu_preempt_exp_mutex.
|
|
|
|
*/
|
2011-10-22 22:12:34 +08:00
|
|
|
static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
|
bool wake)
|
2009-12-03 04:10:15 +08:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
unsigned long mask;
|
|
|
|
|
2010-02-23 09:05:02 +08:00
|
|
|
raw_spin_lock_irqsave(&rnp->lock, flags);
|
2013-12-12 05:59:10 +08:00
|
|
|
smp_mb__after_unlock_lock();
|
2009-12-03 04:10:15 +08:00
|
|
|
for (;;) {
|
2011-07-17 17:05:49 +08:00
|
|
|
if (!sync_rcu_preempt_exp_done(rnp)) {
|
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
2009-12-03 04:10:15 +08:00
|
|
|
break;
|
2011-07-17 17:05:49 +08:00
|
|
|
}
|
2009-12-03 04:10:15 +08:00
|
|
|
if (rnp->parent == NULL) {
|
2011-07-17 17:05:49 +08:00
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
2013-09-25 06:04:06 +08:00
|
|
|
if (wake) {
|
|
|
|
smp_mb(); /* EGP done before wake_up(). */
|
2011-10-22 22:12:34 +08:00
|
|
|
wake_up(&sync_rcu_preempt_exp_wq);
|
2013-09-25 06:04:06 +08:00
|
|
|
}
|
2009-12-03 04:10:15 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
mask = rnp->grpmask;
|
2010-02-23 09:05:02 +08:00
|
|
|
raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
|
2009-12-03 04:10:15 +08:00
|
|
|
rnp = rnp->parent;
|
2010-02-23 09:05:02 +08:00
|
|
|
raw_spin_lock(&rnp->lock); /* irqs already disabled */
|
2013-12-12 05:59:10 +08:00
|
|
|
smp_mb__after_unlock_lock();
|
2009-12-03 04:10:15 +08:00
|
|
|
rnp->expmask &= ~mask;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Snapshot the tasks blocking the newly started preemptible-RCU expedited
|
2015-02-14 02:52:28 +08:00
|
|
|
* grace period for the specified rcu_node structure, phase 1. If there
|
|
|
|
* are such tasks, set the ->expmask bits up the rcu_node tree and also
|
|
|
|
* set the ->expmask bits on the leaf rcu_node structures to tell phase 2
|
|
|
|
* that work is needed here.
|
2009-12-03 04:10:15 +08:00
|
|
|
*
|
2015-02-14 02:52:28 +08:00
|
|
|
* Caller must hold sync_rcu_preempt_exp_mutex.
|
2009-12-03 04:10:15 +08:00
|
|
|
*/
|
|
|
|
static void
|
2015-02-14 02:52:28 +08:00
|
|
|
sync_rcu_preempt_exp_init1(struct rcu_state *rsp, struct rcu_node *rnp)
|
2009-12-03 04:10:15 +08:00
|
|
|
{
|
2011-05-05 12:43:49 +08:00
|
|
|
unsigned long flags;
|
2015-02-14 02:52:28 +08:00
|
|
|
unsigned long mask;
|
|
|
|
struct rcu_node *rnp_up;
|
2009-12-03 04:10:15 +08:00
|
|
|
|
2011-05-05 12:43:49 +08:00
|
|
|
raw_spin_lock_irqsave(&rnp->lock, flags);
|
2013-12-12 05:59:10 +08:00
|
|
|
smp_mb__after_unlock_lock();
|
2015-02-14 02:52:28 +08:00
|
|
|
WARN_ON_ONCE(rnp->expmask);
|
|
|
|
WARN_ON_ONCE(rnp->exp_tasks);
|
2014-11-01 05:09:23 +08:00
|
|
|
if (!rcu_preempt_has_tasks(rnp)) {
|
2015-02-14 02:52:28 +08:00
|
|
|
/* No blocked tasks, nothing to do. */
|
2011-05-05 12:43:49 +08:00
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
2015-02-14 02:52:28 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* Call for Phase 2 and propagate ->expmask bits up the tree. */
|
|
|
|
rnp->expmask = 1;
|
|
|
|
rnp_up = rnp;
|
|
|
|
while (rnp_up->parent) {
|
|
|
|
mask = rnp_up->grpmask;
|
|
|
|
rnp_up = rnp_up->parent;
|
|
|
|
if (rnp_up->expmask & mask)
|
|
|
|
break;
|
|
|
|
raw_spin_lock(&rnp_up->lock); /* irqs already off */
|
|
|
|
smp_mb__after_unlock_lock();
|
|
|
|
rnp_up->expmask |= mask;
|
|
|
|
raw_spin_unlock(&rnp_up->lock); /* irqs still off */
|
|
|
|
}
|
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Snapshot the tasks blocking the newly started preemptible-RCU expedited
|
|
|
|
* grace period for the specified rcu_node structure, phase 2. If the
|
|
|
|
* leaf rcu_node structure has its ->expmask field set, check for tasks.
|
|
|
|
* If there are some, clear ->expmask and set ->exp_tasks accordingly,
|
|
|
|
* then initiate RCU priority boosting. Otherwise, clear ->expmask and
|
|
|
|
* invoke rcu_report_exp_rnp() to clear out the upper-level ->expmask bits,
|
|
|
|
* enabling rcu_read_unlock_special() to do the bit-clearing.
|
|
|
|
*
|
|
|
|
* Caller must hold sync_rcu_preempt_exp_mutex.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
sync_rcu_preempt_exp_init2(struct rcu_state *rsp, struct rcu_node *rnp)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&rnp->lock, flags);
|
|
|
|
smp_mb__after_unlock_lock();
|
|
|
|
if (!rnp->expmask) {
|
|
|
|
/* Phase 1 didn't do anything, so Phase 2 doesn't either. */
|
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Phase 1 is over. */
|
|
|
|
rnp->expmask = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If there are still blocked tasks, set up ->exp_tasks so that
|
|
|
|
* rcu_read_unlock_special() will wake us and then boost them.
|
|
|
|
*/
|
|
|
|
if (rcu_preempt_has_tasks(rnp)) {
|
2010-11-30 13:56:39 +08:00
|
|
|
rnp->exp_tasks = rnp->blkd_tasks.next;
|
2011-05-05 12:43:49 +08:00
|
|
|
rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
|
2015-02-14 02:52:28 +08:00
|
|
|
return;
|
2010-11-30 13:56:39 +08:00
|
|
|
}
|
2015-02-14 02:52:28 +08:00
|
|
|
|
|
|
|
/* No longer any blocked tasks, so undo bit setting. */
|
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
|
rcu_report_exp_rnp(rsp, rnp, false);
|
2009-12-03 04:10:15 +08:00
|
|
|
}
|
|
|
|
|
2012-02-01 06:00:41 +08:00
|
|
|
/**
|
|
|
|
* synchronize_rcu_expedited - Brute-force RCU grace period
|
|
|
|
*
|
|
|
|
* Wait for an RCU-preempt grace period, but expedite it. The basic
|
|
|
|
* idea is to invoke synchronize_sched_expedited() to push all the tasks to
|
|
|
|
* the ->blkd_tasks lists and wait for this list to drain. This consumes
|
|
|
|
* significant time on all CPUs and is unfriendly to real-time workloads,
|
|
|
|
* so is thus not recommended for any sort of common-case code.
|
|
|
|
* In fact, if you are using synchronize_rcu_expedited() in a loop,
|
|
|
|
* please restructure your code to batch your updates, and then Use a
|
|
|
|
* single synchronize_rcu() instead.
|
2009-10-15 01:15:56 +08:00
|
|
|
*/
|
|
|
|
void synchronize_rcu_expedited(void)
|
|
|
|
{
|
2009-12-03 04:10:15 +08:00
|
|
|
struct rcu_node *rnp;
|
|
|
|
struct rcu_state *rsp = &rcu_preempt_state;
|
2012-07-24 07:03:51 +08:00
|
|
|
unsigned long snap;
|
2009-12-03 04:10:15 +08:00
|
|
|
int trycount = 0;
|
|
|
|
|
|
|
|
smp_mb(); /* Caller's modifications seen first by other CPUs. */
|
2015-03-04 06:57:58 +08:00
|
|
|
snap = READ_ONCE(sync_rcu_preempt_exp_count) + 1;
|
2009-12-03 04:10:15 +08:00
|
|
|
smp_mb(); /* Above access cannot bleed into critical section. */
|
|
|
|
|
2012-07-31 08:19:25 +08:00
|
|
|
/*
|
|
|
|
* Block CPU-hotplug operations. This means that any CPU-hotplug
|
|
|
|
* operation that finds an rcu_node structure with tasks in the
|
|
|
|
* process of being boosted will know that all tasks blocking
|
|
|
|
* this expedited grace period will already be in the process of
|
|
|
|
* being boosted. This simplifies the process of moving tasks
|
|
|
|
* from leaf to root rcu_node structures.
|
|
|
|
*/
|
2014-08-26 11:25:06 +08:00
|
|
|
if (!try_get_online_cpus()) {
|
|
|
|
/* CPU-hotplug operation in flight, fall back to normal GP. */
|
|
|
|
wait_rcu_gp(call_rcu);
|
|
|
|
return;
|
|
|
|
}
|
2012-07-31 08:19:25 +08:00
|
|
|
|
2009-12-03 04:10:15 +08:00
|
|
|
/*
|
|
|
|
* Acquire lock, falling back to synchronize_rcu() if too many
|
|
|
|
* lock-acquisition failures. Of course, if someone does the
|
|
|
|
* expedited grace period for us, just leave.
|
|
|
|
*/
|
|
|
|
while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
|
2012-07-31 08:19:25 +08:00
|
|
|
if (ULONG_CMP_LT(snap,
|
2015-03-04 06:57:58 +08:00
|
|
|
READ_ONCE(sync_rcu_preempt_exp_count))) {
|
2012-07-31 08:19:25 +08:00
|
|
|
put_online_cpus();
|
|
|
|
goto mb_ret; /* Others did our work for us. */
|
|
|
|
}
|
2012-06-28 23:08:25 +08:00
|
|
|
if (trycount++ < 10) {
|
2009-12-03 04:10:15 +08:00
|
|
|
udelay(trycount * num_online_cpus());
|
2012-06-28 23:08:25 +08:00
|
|
|
} else {
|
2012-07-31 08:19:25 +08:00
|
|
|
put_online_cpus();
|
2012-10-05 14:59:15 +08:00
|
|
|
wait_rcu_gp(call_rcu);
|
2009-12-03 04:10:15 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2015-03-04 06:57:58 +08:00
|
|
|
if (ULONG_CMP_LT(snap, READ_ONCE(sync_rcu_preempt_exp_count))) {
|
2012-07-31 08:19:25 +08:00
|
|
|
put_online_cpus();
|
2009-12-03 04:10:15 +08:00
|
|
|
goto unlock_mb_ret; /* Others did our work for us. */
|
2012-07-31 08:19:25 +08:00
|
|
|
}
|
2009-12-03 04:10:15 +08:00
|
|
|
|
2010-11-30 13:56:39 +08:00
|
|
|
/* force all RCU readers onto ->blkd_tasks lists. */
|
2009-12-03 04:10:15 +08:00
|
|
|
synchronize_sched_expedited();
|
|
|
|
|
2015-02-14 02:52:28 +08:00
|
|
|
/*
|
|
|
|
* Snapshot current state of ->blkd_tasks lists into ->expmask.
|
|
|
|
* Phase 1 sets bits and phase 2 permits rcu_read_unlock_special()
|
|
|
|
* to start clearing them. Doing this in one phase leads to
|
|
|
|
* strange races between setting and clearing bits, so just say "no"!
|
|
|
|
*/
|
2009-12-03 04:10:15 +08:00
|
|
|
rcu_for_each_leaf_node(rsp, rnp)
|
2015-02-14 02:52:28 +08:00
|
|
|
sync_rcu_preempt_exp_init1(rsp, rnp);
|
2009-12-03 04:10:15 +08:00
|
|
|
rcu_for_each_leaf_node(rsp, rnp)
|
2015-02-14 02:52:28 +08:00
|
|
|
sync_rcu_preempt_exp_init2(rsp, rnp);
|
2009-12-03 04:10:15 +08:00
|
|
|
|
2012-07-31 08:19:25 +08:00
|
|
|
put_online_cpus();
|
2009-12-03 04:10:15 +08:00
|
|
|
|
2010-11-30 13:56:39 +08:00
|
|
|
/* Wait for snapshotted ->blkd_tasks lists to drain. */
|
2009-12-03 04:10:15 +08:00
|
|
|
rnp = rcu_get_root(rsp);
|
|
|
|
wait_event(sync_rcu_preempt_exp_wq,
|
|
|
|
sync_rcu_preempt_exp_done(rnp));
|
|
|
|
|
|
|
|
/* Clean up and exit. */
|
|
|
|
smp_mb(); /* ensure expedited GP seen before counter increment. */
|
2015-03-04 06:57:58 +08:00
|
|
|
WRITE_ONCE(sync_rcu_preempt_exp_count, sync_rcu_preempt_exp_count + 1);
|
2009-12-03 04:10:15 +08:00
|
|
|
unlock_mb_ret:
|
|
|
|
mutex_unlock(&sync_rcu_preempt_exp_mutex);
|
|
|
|
mb_ret:
|
|
|
|
smp_mb(); /* ensure subsequent action seen after grace period. */
|
2009-10-15 01:15:56 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
|
|
|
|
|
2009-10-07 12:48:17 +08:00
|
|
|
/**
|
|
|
|
* rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
|
2012-10-24 04:47:01 +08:00
|
|
|
*
|
|
|
|
* Note that this primitive does not necessarily wait for an RCU grace period
|
|
|
|
* to complete. For example, if there are no RCU callbacks queued anywhere
|
|
|
|
* in the system, then rcu_barrier() is within its rights to return
|
|
|
|
* immediately, without waiting for anything, much less an RCU grace period.
|
2009-10-07 12:48:17 +08:00
|
|
|
*/
|
|
|
|
void rcu_barrier(void)
|
|
|
|
{
|
2012-05-29 14:26:01 +08:00
|
|
|
_rcu_barrier(&rcu_preempt_state);
|
2009-10-07 12:48:17 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rcu_barrier);
|
|
|
|
|
2009-09-24 00:50:42 +08:00
|
|
|
/*
|
2011-03-03 05:15:15 +08:00
|
|
|
* Initialize preemptible RCU's state structures.
|
2009-09-24 00:50:42 +08:00
|
|
|
*/
|
|
|
|
static void __init __rcu_init_preempt(void)
|
|
|
|
{
|
2010-06-28 16:25:04 +08:00
|
|
|
rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
|
2009-09-24 00:50:42 +08:00
|
|
|
}
|
|
|
|
|
2013-04-12 01:15:52 +08:00
|
|
|
/*
|
|
|
|
* Check for a task exiting while in a preemptible-RCU read-side
|
|
|
|
* critical section, clean up if so. No need to issue warnings,
|
|
|
|
* as debug_check_no_locks_held() already does this if lockdep
|
|
|
|
* is enabled.
|
|
|
|
*/
|
|
|
|
void exit_rcu(void)
|
|
|
|
{
|
|
|
|
struct task_struct *t = current;
|
|
|
|
|
|
|
|
if (likely(list_empty(¤t->rcu_node_entry)))
|
|
|
|
return;
|
|
|
|
t->rcu_read_lock_nesting = 1;
|
|
|
|
barrier();
|
2014-08-15 07:01:53 +08:00
|
|
|
t->rcu_read_unlock_special.b.blocked = true;
|
2013-04-12 01:15:52 +08:00
|
|
|
__rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
2014-09-23 02:00:48 +08:00
|
|
|
#else /* #ifdef CONFIG_PREEMPT_RCU */
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
|
2014-03-24 13:32:09 +08:00
|
|
|
static struct rcu_state *rcu_state_p = &rcu_sched_state;
|
2011-02-08 04:47:15 +08:00
|
|
|
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
/*
|
|
|
|
* Tell them what RCU they are running.
|
|
|
|
*/
|
2009-11-12 03:28:06 +08:00
|
|
|
static void __init rcu_bootup_announce(void)
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
{
|
2013-03-19 07:24:11 +08:00
|
|
|
pr_info("Hierarchical RCU implementation.\n");
|
2010-04-14 05:19:23 +08:00
|
|
|
rcu_bootup_announce_oddness();
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
}
|
|
|
|
|
2012-07-02 22:08:42 +08:00
|
|
|
/*
|
|
|
|
* Because preemptible RCU does not exist, we never have to check for
|
|
|
|
* CPUs being in quiescent states.
|
|
|
|
*/
|
2014-10-22 03:50:04 +08:00
|
|
|
static void rcu_preempt_note_context_switch(void)
|
2012-07-02 22:08:42 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2009-09-24 00:50:41 +08:00
|
|
|
/*
|
2011-03-03 05:15:15 +08:00
|
|
|
* Because preemptible RCU does not exist, there are never any preempted
|
2009-09-24 00:50:41 +08:00
|
|
|
* RCU readers.
|
|
|
|
*/
|
2011-02-08 04:47:15 +08:00
|
|
|
static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
|
2009-09-24 00:50:41 +08:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-11-01 02:22:37 +08:00
|
|
|
/*
|
|
|
|
* Because there is no preemptible RCU, there can be no readers blocked.
|
|
|
|
*/
|
|
|
|
static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
|
rcu: Fix grace-period-stall bug on large systems with CPU hotplug
When the last CPU of a given leaf rcu_node structure goes
offline, all of the tasks queued on that leaf rcu_node structure
(due to having blocked in their current RCU read-side critical
sections) are requeued onto the root rcu_node structure. This
requeuing is carried out by rcu_preempt_offline_tasks().
However, it is possible that these queued tasks are the only
thing preventing the leaf rcu_node structure from reporting a
quiescent state up the rcu_node hierarchy. Unfortunately, the
old code would fail to do this reporting, resulting in a
grace-period stall given the following sequence of events:
1. Kernel built for more than 32 CPUs on 32-bit systems or for more
than 64 CPUs on 64-bit systems, so that there is more than one
rcu_node structure. (Or CONFIG_RCU_FANOUT is artificially set
to a number smaller than CONFIG_NR_CPUS.)
2. The kernel is built with CONFIG_TREE_PREEMPT_RCU.
3. A task running on a CPU associated with a given leaf rcu_node
structure blocks while in an RCU read-side critical section
-and- that CPU has not yet passed through a quiescent state
for the current RCU grace period. This will cause the task
to be queued on the leaf rcu_node's blocked_tasks[] array, in
particular, on the element of this array corresponding to the
current grace period.
4. Each of the remaining CPUs corresponding to this same leaf rcu_node
structure pass through a quiescent state. However, the task is
still in its RCU read-side critical section, so these quiescent
states cannot be reported further up the rcu_node hierarchy.
Nevertheless, all bits in the leaf rcu_node structure's ->qsmask
field are now zero.
5. Each of the remaining CPUs go offline. (The events in step
#4 and #5 can happen in any order as long as each CPU passes
through a quiescent state before going offline.)
6. When the last CPU goes offline, __rcu_offline_cpu() will invoke
rcu_preempt_offline_tasks(), which will move the task to the
root rcu_node structure, but without reporting a quiescent state
up the rcu_node hierarchy (and this failure to report a quiescent
state is the bug).
But because this leaf rcu_node structure's ->qsmask field is
already zero and its ->block_tasks[] entries are all empty,
force_quiescent_state() will skip this rcu_node structure.
Therefore, grace periods are now hung.
This patch abstracts some code out of rcu_read_unlock_special(),
calling the result task_quiet() by analogy with cpu_quiet(), and
invokes task_quiet() from both rcu_read_lock_special() and
__rcu_offline_cpu(). Invoking task_quiet() from
__rcu_offline_cpu() reports the quiescent state up the rcu_node
hierarchy, fixing the bug. This ends up requiring a separate
lock_class_key per level of the rcu_node hierarchy, which this
patch also provides.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <12589088301770-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-11-23 00:53:48 +08:00
|
|
|
{
|
2014-11-01 02:22:37 +08:00
|
|
|
return false;
|
rcu: Fix grace-period-stall bug on large systems with CPU hotplug
When the last CPU of a given leaf rcu_node structure goes
offline, all of the tasks queued on that leaf rcu_node structure
(due to having blocked in their current RCU read-side critical
sections) are requeued onto the root rcu_node structure. This
requeuing is carried out by rcu_preempt_offline_tasks().
However, it is possible that these queued tasks are the only
thing preventing the leaf rcu_node structure from reporting a
quiescent state up the rcu_node hierarchy. Unfortunately, the
old code would fail to do this reporting, resulting in a
grace-period stall given the following sequence of events:
1. Kernel built for more than 32 CPUs on 32-bit systems or for more
than 64 CPUs on 64-bit systems, so that there is more than one
rcu_node structure. (Or CONFIG_RCU_FANOUT is artificially set
to a number smaller than CONFIG_NR_CPUS.)
2. The kernel is built with CONFIG_TREE_PREEMPT_RCU.
3. A task running on a CPU associated with a given leaf rcu_node
structure blocks while in an RCU read-side critical section
-and- that CPU has not yet passed through a quiescent state
for the current RCU grace period. This will cause the task
to be queued on the leaf rcu_node's blocked_tasks[] array, in
particular, on the element of this array corresponding to the
current grace period.
4. Each of the remaining CPUs corresponding to this same leaf rcu_node
structure pass through a quiescent state. However, the task is
still in its RCU read-side critical section, so these quiescent
states cannot be reported further up the rcu_node hierarchy.
Nevertheless, all bits in the leaf rcu_node structure's ->qsmask
field are now zero.
5. Each of the remaining CPUs go offline. (The events in step
#4 and #5 can happen in any order as long as each CPU passes
through a quiescent state before going offline.)
6. When the last CPU goes offline, __rcu_offline_cpu() will invoke
rcu_preempt_offline_tasks(), which will move the task to the
root rcu_node structure, but without reporting a quiescent state
up the rcu_node hierarchy (and this failure to report a quiescent
state is the bug).
But because this leaf rcu_node structure's ->qsmask field is
already zero and its ->block_tasks[] entries are all empty,
force_quiescent_state() will skip this rcu_node structure.
Therefore, grace periods are now hung.
This patch abstracts some code out of rcu_read_unlock_special(),
calling the result task_quiet() by analogy with cpu_quiet(), and
invokes task_quiet() from both rcu_read_lock_special() and
__rcu_offline_cpu(). Invoking task_quiet() from
__rcu_offline_cpu() reports the quiescent state up the rcu_node
hierarchy, fixing the bug. This ends up requiring a separate
lock_class_key per level of the rcu_node hierarchy, which this
patch also provides.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <12589088301770-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-11-23 00:53:48 +08:00
|
|
|
}
|
|
|
|
|
2010-02-23 09:05:05 +08:00
|
|
|
/*
|
2011-03-03 05:15:15 +08:00
|
|
|
* Because preemptible RCU does not exist, we never have to check for
|
2010-02-23 09:05:05 +08:00
|
|
|
* tasks blocked within RCU read-side critical sections.
|
|
|
|
*/
|
|
|
|
static void rcu_print_detail_task_stall(struct rcu_state *rsp)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
/*
|
2011-03-03 05:15:15 +08:00
|
|
|
* Because preemptible RCU does not exist, we never have to check for
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
* tasks blocked within RCU read-side critical sections.
|
|
|
|
*/
|
2011-08-14 04:31:47 +08:00
|
|
|
static int rcu_print_task_stall(struct rcu_node *rnp)
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
{
|
2011-08-14 04:31:47 +08:00
|
|
|
return 0;
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
}
|
|
|
|
|
2009-09-14 00:15:09 +08:00
|
|
|
/*
|
2011-03-03 05:15:15 +08:00
|
|
|
* Because there is no preemptible RCU, there can be no readers blocked,
|
2009-09-19 00:50:19 +08:00
|
|
|
* so there is no need to check for blocked tasks. So check only for
|
|
|
|
* bogus qsmask values.
|
2009-09-14 00:15:09 +08:00
|
|
|
*/
|
|
|
|
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
|
|
|
|
{
|
2009-09-19 00:50:19 +08:00
|
|
|
WARN_ON_ONCE(rnp->qsmask);
|
2009-09-14 00:15:09 +08:00
|
|
|
}
|
|
|
|
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
/*
|
2011-03-03 05:15:15 +08:00
|
|
|
* Because preemptible RCU does not exist, it never has any callbacks
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
* to check.
|
|
|
|
*/
|
2014-10-21 23:12:00 +08:00
|
|
|
static void rcu_preempt_check_callbacks(void)
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-23 04:56:52 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2009-10-15 01:15:56 +08:00
|
|
|
/*
|
|
|
|
* Wait for an rcu-preempt grace period, but make it happen quickly.
|
2011-03-03 05:15:15 +08:00
|
|
|
* But because preemptible RCU does not exist, map to rcu-sched.
|
2009-10-15 01:15:56 +08:00
|
|
|
*/
|
|
|
|
void synchronize_rcu_expedited(void)
|
|
|
|
{
|
|
|
|
synchronize_sched_expedited();
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
|
|
|
|
|
2009-10-07 12:48:17 +08:00
|
|
|
/*
|
2011-03-03 05:15:15 +08:00
|
|
|
* Because preemptible RCU does not exist, rcu_barrier() is just
|
2009-10-07 12:48:17 +08:00
|
|
|
* another name for rcu_barrier_sched().
|
|
|
|
*/
|
|
|
|
void rcu_barrier(void)
|
|
|
|
{
|
|
|
|
rcu_barrier_sched();
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rcu_barrier);
|
|
|
|
|
2009-09-24 00:50:42 +08:00
|
|
|
/*
|
2011-03-03 05:15:15 +08:00
|
|
|
* Because preemptible RCU does not exist, it need not be initialized.
|
2009-09-24 00:50:42 +08:00
|
|
|
*/
|
|
|
|
static void __init __rcu_init_preempt(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2013-04-12 01:15:52 +08:00
|
|
|
/*
|
|
|
|
* Because preemptible RCU does not exist, tasks cannot possibly exit
|
|
|
|
* while in preemptible RCU read-side critical sections.
|
|
|
|
*/
|
|
|
|
void exit_rcu(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2014-09-23 02:00:48 +08:00
|
|
|
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
|
2010-02-23 09:04:59 +08:00
|
|
|
|
2011-02-08 04:47:15 +08:00
|
|
|
#ifdef CONFIG_RCU_BOOST
|
|
|
|
|
2013-11-01 01:18:19 +08:00
|
|
|
#include "../locking/rtmutex_common.h"
|
2011-02-08 04:47:15 +08:00
|
|
|
|
2011-02-23 05:42:43 +08:00
|
|
|
#ifdef CONFIG_RCU_TRACE
|
|
|
|
|
|
|
|
static void rcu_initiate_boost_trace(struct rcu_node *rnp)
|
|
|
|
{
|
2014-11-01 05:09:23 +08:00
|
|
|
if (!rcu_preempt_has_tasks(rnp))
|
2011-02-23 05:42:43 +08:00
|
|
|
rnp->n_balk_blkd_tasks++;
|
|
|
|
else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
|
|
|
|
rnp->n_balk_exp_gp_tasks++;
|
|
|
|
else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
|
|
|
|
rnp->n_balk_boost_tasks++;
|
|
|
|
else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
|
|
|
|
rnp->n_balk_notblocked++;
|
|
|
|
else if (rnp->gp_tasks != NULL &&
|
2011-05-02 18:46:10 +08:00
|
|
|
ULONG_CMP_LT(jiffies, rnp->boost_time))
|
2011-02-23 05:42:43 +08:00
|
|
|
rnp->n_balk_notyet++;
|
|
|
|
else
|
|
|
|
rnp->n_balk_nos++;
|
|
|
|
}
|
|
|
|
|
|
|
|
#else /* #ifdef CONFIG_RCU_TRACE */
|
|
|
|
|
|
|
|
static void rcu_initiate_boost_trace(struct rcu_node *rnp)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* #else #ifdef CONFIG_RCU_TRACE */
|
|
|
|
|
2012-07-16 18:42:35 +08:00
|
|
|
static void rcu_wake_cond(struct task_struct *t, int status)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If the thread is yielding, only wake it when this
|
|
|
|
* is invoked from idle
|
|
|
|
*/
|
|
|
|
if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
|
|
|
|
wake_up_process(t);
|
|
|
|
}
|
|
|
|
|
2011-02-08 04:47:15 +08:00
|
|
|
/*
|
|
|
|
* Carry out RCU priority boosting on the task indicated by ->exp_tasks
|
|
|
|
* or ->boost_tasks, advancing the pointer to the next task in the
|
|
|
|
* ->blkd_tasks list.
|
|
|
|
*
|
|
|
|
* Note that irqs must be enabled: boosting the task can block.
|
|
|
|
* Returns 1 if there are more tasks needing to be boosted.
|
|
|
|
*/
|
|
|
|
static int rcu_boost(struct rcu_node *rnp)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
struct task_struct *t;
|
|
|
|
struct list_head *tb;
|
|
|
|
|
2015-03-04 06:57:58 +08:00
|
|
|
if (READ_ONCE(rnp->exp_tasks) == NULL &&
|
|
|
|
READ_ONCE(rnp->boost_tasks) == NULL)
|
2011-02-08 04:47:15 +08:00
|
|
|
return 0; /* Nothing left to boost. */
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&rnp->lock, flags);
|
2013-12-12 05:59:10 +08:00
|
|
|
smp_mb__after_unlock_lock();
|
2011-02-08 04:47:15 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Recheck under the lock: all tasks in need of boosting
|
|
|
|
* might exit their RCU read-side critical sections on their own.
|
|
|
|
*/
|
|
|
|
if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
|
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Preferentially boost tasks blocking expedited grace periods.
|
|
|
|
* This cannot starve the normal grace periods because a second
|
|
|
|
* expedited grace period must boost all blocked tasks, including
|
|
|
|
* those blocking the pre-existing normal grace period.
|
|
|
|
*/
|
2011-02-23 05:42:43 +08:00
|
|
|
if (rnp->exp_tasks != NULL) {
|
2011-02-08 04:47:15 +08:00
|
|
|
tb = rnp->exp_tasks;
|
2011-02-23 05:42:43 +08:00
|
|
|
rnp->n_exp_boosts++;
|
|
|
|
} else {
|
2011-02-08 04:47:15 +08:00
|
|
|
tb = rnp->boost_tasks;
|
2011-02-23 05:42:43 +08:00
|
|
|
rnp->n_normal_boosts++;
|
|
|
|
}
|
|
|
|
rnp->n_tasks_boosted++;
|
2011-02-08 04:47:15 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We boost task t by manufacturing an rt_mutex that appears to
|
|
|
|
* be held by task t. We leave a pointer to that rt_mutex where
|
|
|
|
* task t can find it, and task t will release the mutex when it
|
|
|
|
* exits its outermost RCU read-side critical section. Then
|
|
|
|
* simply acquiring this artificial rt_mutex will boost task
|
|
|
|
* t's priority. (Thanks to tglx for suggesting this approach!)
|
|
|
|
*
|
|
|
|
* Note that task t must acquire rnp->lock to remove itself from
|
|
|
|
* the ->blkd_tasks list, which it will do from exit() if from
|
|
|
|
* nowhere else. We therefore are guaranteed that task t will
|
|
|
|
* stay around at least until we drop rnp->lock. Note that
|
|
|
|
* rnp->lock also resolves races between our priority boosting
|
|
|
|
* and task t's exiting its outermost RCU read-side critical
|
|
|
|
* section.
|
|
|
|
*/
|
|
|
|
t = container_of(tb, struct task_struct, rcu_node_entry);
|
2014-06-13 04:30:25 +08:00
|
|
|
rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
|
2011-02-08 04:47:15 +08:00
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
2014-06-13 04:30:25 +08:00
|
|
|
/* Lock only for side effect: boosts task t's priority. */
|
|
|
|
rt_mutex_lock(&rnp->boost_mtx);
|
|
|
|
rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */
|
2011-02-08 04:47:15 +08:00
|
|
|
|
2015-03-04 06:57:58 +08:00
|
|
|
return READ_ONCE(rnp->exp_tasks) != NULL ||
|
|
|
|
READ_ONCE(rnp->boost_tasks) != NULL;
|
2011-02-08 04:47:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Priority-boosting kthread. One per leaf rcu_node and one for the
|
|
|
|
* root rcu_node.
|
|
|
|
*/
|
|
|
|
static int rcu_boost_kthread(void *arg)
|
|
|
|
{
|
|
|
|
struct rcu_node *rnp = (struct rcu_node *)arg;
|
|
|
|
int spincnt = 0;
|
|
|
|
int more2boost;
|
|
|
|
|
2013-07-13 05:18:47 +08:00
|
|
|
trace_rcu_utilization(TPS("Start boost kthread@init"));
|
2011-02-08 04:47:15 +08:00
|
|
|
for (;;) {
|
2011-03-30 08:48:28 +08:00
|
|
|
rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
|
2013-07-13 05:18:47 +08:00
|
|
|
trace_rcu_utilization(TPS("End boost kthread@rcu_wait"));
|
2011-05-21 07:06:29 +08:00
|
|
|
rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
|
2013-07-13 05:18:47 +08:00
|
|
|
trace_rcu_utilization(TPS("Start boost kthread@rcu_wait"));
|
2011-03-30 08:48:28 +08:00
|
|
|
rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
|
2011-02-08 04:47:15 +08:00
|
|
|
more2boost = rcu_boost(rnp);
|
|
|
|
if (more2boost)
|
|
|
|
spincnt++;
|
|
|
|
else
|
|
|
|
spincnt = 0;
|
|
|
|
if (spincnt > 10) {
|
2012-07-16 18:42:35 +08:00
|
|
|
rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
|
2013-07-13 05:18:47 +08:00
|
|
|
trace_rcu_utilization(TPS("End boost kthread@rcu_yield"));
|
2012-07-16 18:42:35 +08:00
|
|
|
schedule_timeout_interruptible(2);
|
2013-07-13 05:18:47 +08:00
|
|
|
trace_rcu_utilization(TPS("Start boost kthread@rcu_yield"));
|
2011-02-08 04:47:15 +08:00
|
|
|
spincnt = 0;
|
|
|
|
}
|
|
|
|
}
|
2011-05-05 12:43:49 +08:00
|
|
|
/* NOTREACHED */
|
2013-07-13 05:18:47 +08:00
|
|
|
trace_rcu_utilization(TPS("End boost kthread@notreached"));
|
2011-02-08 04:47:15 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check to see if it is time to start boosting RCU readers that are
|
|
|
|
* blocking the current grace period, and, if so, tell the per-rcu_node
|
|
|
|
* kthread to start boosting them. If there is an expedited grace
|
|
|
|
* period in progress, it is always time to boost.
|
|
|
|
*
|
2012-08-02 06:57:54 +08:00
|
|
|
* The caller must hold rnp->lock, which this function releases.
|
|
|
|
* The ->boost_kthread_task is immortal, so we don't need to worry
|
|
|
|
* about it going away.
|
2011-02-08 04:47:15 +08:00
|
|
|
*/
|
2011-05-05 12:43:49 +08:00
|
|
|
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
|
2014-06-12 04:39:40 +08:00
|
|
|
__releases(rnp->lock)
|
2011-02-08 04:47:15 +08:00
|
|
|
{
|
|
|
|
struct task_struct *t;
|
|
|
|
|
2011-02-23 05:42:43 +08:00
|
|
|
if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
|
|
|
|
rnp->n_balk_exp_gp_tasks++;
|
2011-05-05 12:43:49 +08:00
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
2011-02-08 04:47:15 +08:00
|
|
|
return;
|
2011-02-23 05:42:43 +08:00
|
|
|
}
|
2011-02-08 04:47:15 +08:00
|
|
|
if (rnp->exp_tasks != NULL ||
|
|
|
|
(rnp->gp_tasks != NULL &&
|
|
|
|
rnp->boost_tasks == NULL &&
|
|
|
|
rnp->qsmask == 0 &&
|
|
|
|
ULONG_CMP_GE(jiffies, rnp->boost_time))) {
|
|
|
|
if (rnp->exp_tasks == NULL)
|
|
|
|
rnp->boost_tasks = rnp->gp_tasks;
|
2011-05-05 12:43:49 +08:00
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
2011-02-08 04:47:15 +08:00
|
|
|
t = rnp->boost_kthread_task;
|
2012-07-16 18:42:35 +08:00
|
|
|
if (t)
|
|
|
|
rcu_wake_cond(t, rnp->boost_kthread_status);
|
2011-05-05 12:43:49 +08:00
|
|
|
} else {
|
2011-02-23 05:42:43 +08:00
|
|
|
rcu_initiate_boost_trace(rnp);
|
2011-05-05 12:43:49 +08:00
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
|
}
|
2011-02-08 04:47:15 +08:00
|
|
|
}
|
|
|
|
|
2011-06-16 06:47:09 +08:00
|
|
|
/*
|
|
|
|
* Wake up the per-CPU kthread to invoke RCU callbacks.
|
|
|
|
*/
|
|
|
|
static void invoke_rcu_callbacks_kthread(void)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
__this_cpu_write(rcu_cpu_has_work, 1);
|
2011-06-17 07:02:54 +08:00
|
|
|
if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
|
2012-07-16 18:42:35 +08:00
|
|
|
current != __this_cpu_read(rcu_cpu_kthread_task)) {
|
|
|
|
rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
|
|
|
|
__this_cpu_read(rcu_cpu_kthread_status));
|
|
|
|
}
|
2011-06-16 06:47:09 +08:00
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
2011-11-30 07:57:13 +08:00
|
|
|
/*
|
|
|
|
* Is the current CPU running the RCU-callbacks kthread?
|
|
|
|
* Caller must have preemption disabled.
|
|
|
|
*/
|
|
|
|
static bool rcu_is_callbacks_kthread(void)
|
|
|
|
{
|
rcu: Replace __get_cpu_var() uses
__get_cpu_var() is used for multiple purposes in the kernel source. One
of them is address calculation via the form &__get_cpu_var(x). This
calculates the address for the instance of the percpu variable of the
current processor based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
__get_cpu_var() always only does an address determination. However,
store and retrieve operations could use a segment prefix (or global
register on other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into
a percpu area and use optimized assembly code to read and write per
cpu variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations
that use the offset. Thereby address calcualtions are avoided and less
registers are used when code is generated.
At the end of the patchset all uses of __get_cpu_var have been removed
so the macro is removed too.
The patchset includes passes over all arches as well. Once these
operations are used throughout then specialized macros can be defined in
non -x86 arches as well in order to optimize per cpu access by f.e. using
a global register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, u);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(this_cpu_ptr(&x), y, sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
this_cpu_inc(y)
Signed-off-by: Christoph Lameter <cl@linux.com>
[ paulmck: Address conflicts. ]
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2013-09-01 04:34:10 +08:00
|
|
|
return __this_cpu_read(rcu_cpu_kthread_task) == current;
|
2011-11-30 07:57:13 +08:00
|
|
|
}
|
|
|
|
|
2011-02-08 04:47:15 +08:00
|
|
|
#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do priority-boost accounting for the start of a new grace period.
|
|
|
|
*/
|
|
|
|
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
|
|
|
|
{
|
|
|
|
rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create an RCU-boost kthread for the specified node if one does not
|
|
|
|
* already exist. We only create this kthread for preemptible RCU.
|
|
|
|
* Returns zero if all is well, a negated errno otherwise.
|
|
|
|
*/
|
2013-06-20 02:52:21 +08:00
|
|
|
static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
|
rcu: Process offlining and onlining only at grace-period start
Races between CPU hotplug and grace periods can be difficult to resolve,
so the ->onoff_mutex is used to exclude the two events. Unfortunately,
this means that it is impossible for an outgoing CPU to perform the
last bits of its offlining from its last pass through the idle loop,
because sleeplocks cannot be acquired in that context.
This commit avoids these problems by buffering online and offline events
in a new ->qsmaskinitnext field in the leaf rcu_node structures. When a
grace period starts, the events accumulated in this mask are applied to
the ->qsmaskinit field, and, if needed, up the rcu_node tree. The special
case of all CPUs corresponding to a given leaf rcu_node structure being
offline while there are still elements in that structure's ->blkd_tasks
list is handled using a new ->wait_blkd_tasks field. In this case,
propagating the offline bits up the tree is deferred until the beginning
of the grace period after all of the tasks have exited their RCU read-side
critical sections and removed themselves from the list, at which point
the ->wait_blkd_tasks flag is cleared. If one of that leaf rcu_node
structure's CPUs comes back online before the list empties, then the
->wait_blkd_tasks flag is simply cleared.
This of course means that RCU's notion of which CPUs are offline can be
out of date. This is OK because RCU need only wait on CPUs that were
online at the time that the grace period started. In addition, RCU's
force-quiescent-state actions will handle the case where a CPU goes
offline after the grace period starts.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2015-01-24 13:52:37 +08:00
|
|
|
struct rcu_node *rnp)
|
2011-02-08 04:47:15 +08:00
|
|
|
{
|
2012-07-16 18:42:35 +08:00
|
|
|
int rnp_index = rnp - &rsp->node[0];
|
2011-02-08 04:47:15 +08:00
|
|
|
unsigned long flags;
|
|
|
|
struct sched_param sp;
|
|
|
|
struct task_struct *t;
|
|
|
|
|
|
|
|
if (&rcu_preempt_state != rsp)
|
|
|
|
return 0;
|
2012-07-16 18:42:35 +08:00
|
|
|
|
rcu: Process offlining and onlining only at grace-period start
Races between CPU hotplug and grace periods can be difficult to resolve,
so the ->onoff_mutex is used to exclude the two events. Unfortunately,
this means that it is impossible for an outgoing CPU to perform the
last bits of its offlining from its last pass through the idle loop,
because sleeplocks cannot be acquired in that context.
This commit avoids these problems by buffering online and offline events
in a new ->qsmaskinitnext field in the leaf rcu_node structures. When a
grace period starts, the events accumulated in this mask are applied to
the ->qsmaskinit field, and, if needed, up the rcu_node tree. The special
case of all CPUs corresponding to a given leaf rcu_node structure being
offline while there are still elements in that structure's ->blkd_tasks
list is handled using a new ->wait_blkd_tasks field. In this case,
propagating the offline bits up the tree is deferred until the beginning
of the grace period after all of the tasks have exited their RCU read-side
critical sections and removed themselves from the list, at which point
the ->wait_blkd_tasks flag is cleared. If one of that leaf rcu_node
structure's CPUs comes back online before the list empties, then the
->wait_blkd_tasks flag is simply cleared.
This of course means that RCU's notion of which CPUs are offline can be
out of date. This is OK because RCU need only wait on CPUs that were
online at the time that the grace period started. In addition, RCU's
force-quiescent-state actions will handle the case where a CPU goes
offline after the grace period starts.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2015-01-24 13:52:37 +08:00
|
|
|
if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0)
|
2012-07-16 18:42:35 +08:00
|
|
|
return 0;
|
|
|
|
|
2011-06-16 06:47:09 +08:00
|
|
|
rsp->boost = 1;
|
2011-02-08 04:47:15 +08:00
|
|
|
if (rnp->boost_kthread_task != NULL)
|
|
|
|
return 0;
|
|
|
|
t = kthread_create(rcu_boost_kthread, (void *)rnp,
|
2011-08-20 02:39:11 +08:00
|
|
|
"rcub/%d", rnp_index);
|
2011-02-08 04:47:15 +08:00
|
|
|
if (IS_ERR(t))
|
|
|
|
return PTR_ERR(t);
|
|
|
|
raw_spin_lock_irqsave(&rnp->lock, flags);
|
2013-12-12 05:59:10 +08:00
|
|
|
smp_mb__after_unlock_lock();
|
2011-02-08 04:47:15 +08:00
|
|
|
rnp->boost_kthread_task = t;
|
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
2014-09-13 10:21:09 +08:00
|
|
|
sp.sched_priority = kthread_prio;
|
2011-02-08 04:47:15 +08:00
|
|
|
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
2011-05-31 11:38:55 +08:00
|
|
|
wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
|
2011-02-08 04:47:15 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-06-16 23:26:32 +08:00
|
|
|
static void rcu_kthread_do_work(void)
|
|
|
|
{
|
rcu: Replace __get_cpu_var() uses
__get_cpu_var() is used for multiple purposes in the kernel source. One
of them is address calculation via the form &__get_cpu_var(x). This
calculates the address for the instance of the percpu variable of the
current processor based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
__get_cpu_var() always only does an address determination. However,
store and retrieve operations could use a segment prefix (or global
register on other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into
a percpu area and use optimized assembly code to read and write per
cpu variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations
that use the offset. Thereby address calcualtions are avoided and less
registers are used when code is generated.
At the end of the patchset all uses of __get_cpu_var have been removed
so the macro is removed too.
The patchset includes passes over all arches as well. Once these
operations are used throughout then specialized macros can be defined in
non -x86 arches as well in order to optimize per cpu access by f.e. using
a global register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, u);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(this_cpu_ptr(&x), y, sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
this_cpu_inc(y)
Signed-off-by: Christoph Lameter <cl@linux.com>
[ paulmck: Address conflicts. ]
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2013-09-01 04:34:10 +08:00
|
|
|
rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
|
|
|
|
rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data));
|
2011-06-16 23:26:32 +08:00
|
|
|
rcu_preempt_do_callbacks();
|
|
|
|
}
|
|
|
|
|
2012-07-16 18:42:38 +08:00
|
|
|
static void rcu_cpu_kthread_setup(unsigned int cpu)
|
2011-06-16 23:26:32 +08:00
|
|
|
{
|
|
|
|
struct sched_param sp;
|
|
|
|
|
2014-09-13 10:21:09 +08:00
|
|
|
sp.sched_priority = kthread_prio;
|
2012-07-16 18:42:38 +08:00
|
|
|
sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
|
2011-06-16 23:26:32 +08:00
|
|
|
}
|
|
|
|
|
2012-07-16 18:42:38 +08:00
|
|
|
static void rcu_cpu_kthread_park(unsigned int cpu)
|
2011-06-16 23:26:32 +08:00
|
|
|
{
|
2012-07-16 18:42:38 +08:00
|
|
|
per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
|
2011-06-16 23:26:32 +08:00
|
|
|
}
|
|
|
|
|
2012-07-16 18:42:38 +08:00
|
|
|
static int rcu_cpu_kthread_should_run(unsigned int cpu)
|
2011-06-16 23:26:32 +08:00
|
|
|
{
|
rcu: Replace __get_cpu_var() uses
__get_cpu_var() is used for multiple purposes in the kernel source. One
of them is address calculation via the form &__get_cpu_var(x). This
calculates the address for the instance of the percpu variable of the
current processor based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
__get_cpu_var() always only does an address determination. However,
store and retrieve operations could use a segment prefix (or global
register on other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into
a percpu area and use optimized assembly code to read and write per
cpu variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations
that use the offset. Thereby address calcualtions are avoided and less
registers are used when code is generated.
At the end of the patchset all uses of __get_cpu_var have been removed
so the macro is removed too.
The patchset includes passes over all arches as well. Once these
operations are used throughout then specialized macros can be defined in
non -x86 arches as well in order to optimize per cpu access by f.e. using
a global register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, u);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(this_cpu_ptr(&x), y, sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
this_cpu_inc(y)
Signed-off-by: Christoph Lameter <cl@linux.com>
[ paulmck: Address conflicts. ]
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2013-09-01 04:34:10 +08:00
|
|
|
return __this_cpu_read(rcu_cpu_has_work);
|
2011-06-16 23:26:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Per-CPU kernel thread that invokes RCU callbacks. This replaces the
|
2011-06-21 16:29:39 +08:00
|
|
|
* RCU softirq used in flavors and configurations of RCU that do not
|
|
|
|
* support RCU priority boosting.
|
2011-06-16 23:26:32 +08:00
|
|
|
*/
|
2012-07-16 18:42:38 +08:00
|
|
|
static void rcu_cpu_kthread(unsigned int cpu)
|
2011-06-16 23:26:32 +08:00
|
|
|
{
|
rcu: Replace __get_cpu_var() uses
__get_cpu_var() is used for multiple purposes in the kernel source. One
of them is address calculation via the form &__get_cpu_var(x). This
calculates the address for the instance of the percpu variable of the
current processor based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
__get_cpu_var() always only does an address determination. However,
store and retrieve operations could use a segment prefix (or global
register on other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into
a percpu area and use optimized assembly code to read and write per
cpu variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations
that use the offset. Thereby address calcualtions are avoided and less
registers are used when code is generated.
At the end of the patchset all uses of __get_cpu_var have been removed
so the macro is removed too.
The patchset includes passes over all arches as well. Once these
operations are used throughout then specialized macros can be defined in
non -x86 arches as well in order to optimize per cpu access by f.e. using
a global register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, u);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(this_cpu_ptr(&x), y, sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
this_cpu_inc(y)
Signed-off-by: Christoph Lameter <cl@linux.com>
[ paulmck: Address conflicts. ]
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2013-09-01 04:34:10 +08:00
|
|
|
unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
|
|
|
|
char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
|
2012-07-16 18:42:38 +08:00
|
|
|
int spincnt;
|
2011-06-16 23:26:32 +08:00
|
|
|
|
2012-07-16 18:42:38 +08:00
|
|
|
for (spincnt = 0; spincnt < 10; spincnt++) {
|
2013-07-13 05:18:47 +08:00
|
|
|
trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
|
2011-06-16 23:26:32 +08:00
|
|
|
local_bh_disable();
|
|
|
|
*statusp = RCU_KTHREAD_RUNNING;
|
2012-07-16 18:42:38 +08:00
|
|
|
this_cpu_inc(rcu_cpu_kthread_loops);
|
|
|
|
local_irq_disable();
|
2011-06-16 23:26:32 +08:00
|
|
|
work = *workp;
|
|
|
|
*workp = 0;
|
2012-07-16 18:42:38 +08:00
|
|
|
local_irq_enable();
|
2011-06-16 23:26:32 +08:00
|
|
|
if (work)
|
|
|
|
rcu_kthread_do_work();
|
|
|
|
local_bh_enable();
|
2012-07-16 18:42:38 +08:00
|
|
|
if (*workp == 0) {
|
2013-07-13 05:18:47 +08:00
|
|
|
trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
|
2012-07-16 18:42:38 +08:00
|
|
|
*statusp = RCU_KTHREAD_WAITING;
|
|
|
|
return;
|
2011-06-16 23:26:32 +08:00
|
|
|
}
|
|
|
|
}
|
2012-07-16 18:42:38 +08:00
|
|
|
*statusp = RCU_KTHREAD_YIELDING;
|
2013-07-13 05:18:47 +08:00
|
|
|
trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
|
2012-07-16 18:42:38 +08:00
|
|
|
schedule_timeout_interruptible(2);
|
2013-07-13 05:18:47 +08:00
|
|
|
trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
|
2012-07-16 18:42:38 +08:00
|
|
|
*statusp = RCU_KTHREAD_WAITING;
|
2011-06-16 23:26:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the per-rcu_node kthread's affinity to cover all CPUs that are
|
|
|
|
* served by the rcu_node in question. The CPU hotplug lock is still
|
|
|
|
* held, so the value of rnp->qsmaskinit will be stable.
|
|
|
|
*
|
|
|
|
* We don't include outgoingcpu in the affinity set, use -1 if there is
|
|
|
|
* no outgoing CPU. If there are no CPUs left in the affinity set,
|
|
|
|
* this function allows the kthread to execute on any CPU.
|
|
|
|
*/
|
2012-07-16 18:42:35 +08:00
|
|
|
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
|
2011-06-16 23:26:32 +08:00
|
|
|
{
|
2012-07-16 18:42:35 +08:00
|
|
|
struct task_struct *t = rnp->boost_kthread_task;
|
rcu: Process offlining and onlining only at grace-period start
Races between CPU hotplug and grace periods can be difficult to resolve,
so the ->onoff_mutex is used to exclude the two events. Unfortunately,
this means that it is impossible for an outgoing CPU to perform the
last bits of its offlining from its last pass through the idle loop,
because sleeplocks cannot be acquired in that context.
This commit avoids these problems by buffering online and offline events
in a new ->qsmaskinitnext field in the leaf rcu_node structures. When a
grace period starts, the events accumulated in this mask are applied to
the ->qsmaskinit field, and, if needed, up the rcu_node tree. The special
case of all CPUs corresponding to a given leaf rcu_node structure being
offline while there are still elements in that structure's ->blkd_tasks
list is handled using a new ->wait_blkd_tasks field. In this case,
propagating the offline bits up the tree is deferred until the beginning
of the grace period after all of the tasks have exited their RCU read-side
critical sections and removed themselves from the list, at which point
the ->wait_blkd_tasks flag is cleared. If one of that leaf rcu_node
structure's CPUs comes back online before the list empties, then the
->wait_blkd_tasks flag is simply cleared.
This of course means that RCU's notion of which CPUs are offline can be
out of date. This is OK because RCU need only wait on CPUs that were
online at the time that the grace period started. In addition, RCU's
force-quiescent-state actions will handle the case where a CPU goes
offline after the grace period starts.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2015-01-24 13:52:37 +08:00
|
|
|
unsigned long mask = rcu_rnp_online_cpus(rnp);
|
2011-06-16 23:26:32 +08:00
|
|
|
cpumask_var_t cm;
|
|
|
|
int cpu;
|
|
|
|
|
2012-07-16 18:42:35 +08:00
|
|
|
if (!t)
|
2011-06-16 23:26:32 +08:00
|
|
|
return;
|
2012-07-16 18:42:35 +08:00
|
|
|
if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
|
2011-06-16 23:26:32 +08:00
|
|
|
return;
|
|
|
|
for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
|
|
|
|
if ((mask & 0x1) && cpu != outgoingcpu)
|
|
|
|
cpumask_set_cpu(cpu, cm);
|
2014-11-11 00:07:08 +08:00
|
|
|
if (cpumask_weight(cm) == 0)
|
2011-06-16 23:26:32 +08:00
|
|
|
cpumask_setall(cm);
|
2012-07-16 18:42:35 +08:00
|
|
|
set_cpus_allowed_ptr(t, cm);
|
2011-06-16 23:26:32 +08:00
|
|
|
free_cpumask_var(cm);
|
|
|
|
}
|
|
|
|
|
2012-07-16 18:42:38 +08:00
|
|
|
static struct smp_hotplug_thread rcu_cpu_thread_spec = {
|
|
|
|
.store = &rcu_cpu_kthread_task,
|
|
|
|
.thread_should_run = rcu_cpu_kthread_should_run,
|
|
|
|
.thread_fn = rcu_cpu_kthread,
|
|
|
|
.thread_comm = "rcuc/%u",
|
|
|
|
.setup = rcu_cpu_kthread_setup,
|
|
|
|
.park = rcu_cpu_kthread_park,
|
|
|
|
};
|
2011-06-16 23:26:32 +08:00
|
|
|
|
|
|
|
/*
|
2014-07-14 03:00:53 +08:00
|
|
|
* Spawn boost kthreads -- called as soon as the scheduler is running.
|
2011-06-16 23:26:32 +08:00
|
|
|
*/
|
2014-07-14 03:00:53 +08:00
|
|
|
static void __init rcu_spawn_boost_kthreads(void)
|
2011-06-16 23:26:32 +08:00
|
|
|
{
|
|
|
|
struct rcu_node *rnp;
|
2012-07-16 18:42:35 +08:00
|
|
|
int cpu;
|
2011-06-16 23:26:32 +08:00
|
|
|
|
2012-07-16 18:42:38 +08:00
|
|
|
for_each_possible_cpu(cpu)
|
2011-06-16 23:26:32 +08:00
|
|
|
per_cpu(rcu_cpu_has_work, cpu) = 0;
|
2012-07-16 18:42:38 +08:00
|
|
|
BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
|
2014-11-04 10:15:17 +08:00
|
|
|
rcu_for_each_leaf_node(rcu_state_p, rnp)
|
|
|
|
(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
|
2011-06-16 23:26:32 +08:00
|
|
|
}
|
|
|
|
|
2013-06-20 02:52:21 +08:00
|
|
|
static void rcu_prepare_kthreads(int cpu)
|
2011-06-16 23:26:32 +08:00
|
|
|
{
|
2014-03-24 13:32:09 +08:00
|
|
|
struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
|
2011-06-16 23:26:32 +08:00
|
|
|
struct rcu_node *rnp = rdp->mynode;
|
|
|
|
|
|
|
|
/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
|
2012-07-16 18:42:38 +08:00
|
|
|
if (rcu_scheduler_fully_active)
|
2014-03-24 13:32:09 +08:00
|
|
|
(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
|
2011-06-16 23:26:32 +08:00
|
|
|
}
|
|
|
|
|
2011-02-08 04:47:15 +08:00
|
|
|
#else /* #ifdef CONFIG_RCU_BOOST */
|
|
|
|
|
2011-05-05 12:43:49 +08:00
|
|
|
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
|
2014-06-12 04:39:40 +08:00
|
|
|
__releases(rnp->lock)
|
2011-02-08 04:47:15 +08:00
|
|
|
{
|
2011-05-05 12:43:49 +08:00
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
2011-02-08 04:47:15 +08:00
|
|
|
}
|
|
|
|
|
2011-06-16 06:47:09 +08:00
|
|
|
static void invoke_rcu_callbacks_kthread(void)
|
2011-02-08 04:47:15 +08:00
|
|
|
{
|
2011-06-16 06:47:09 +08:00
|
|
|
WARN_ON_ONCE(1);
|
2011-02-08 04:47:15 +08:00
|
|
|
}
|
|
|
|
|
2011-11-30 07:57:13 +08:00
|
|
|
static bool rcu_is_callbacks_kthread(void)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-02-08 04:47:15 +08:00
|
|
|
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2012-07-16 18:42:35 +08:00
|
|
|
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
|
2011-06-16 23:26:32 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2014-07-14 03:00:53 +08:00
|
|
|
static void __init rcu_spawn_boost_kthreads(void)
|
2011-07-11 06:57:35 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2013-06-20 02:52:21 +08:00
|
|
|
static void rcu_prepare_kthreads(int cpu)
|
2011-06-16 23:26:32 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2011-02-08 04:47:15 +08:00
|
|
|
#endif /* #else #ifdef CONFIG_RCU_BOOST */
|
|
|
|
|
2010-02-23 09:04:59 +08:00
|
|
|
#if !defined(CONFIG_RCU_FAST_NO_HZ)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check to see if any future RCU-related work will need to be done
|
|
|
|
* by the current CPU, even if none need be done immediately, returning
|
|
|
|
* 1 if so. This function is part of the RCU implementation; it is -not-
|
|
|
|
* an exported member of the RCU API.
|
|
|
|
*
|
rcu: Permit dyntick-idle with callbacks pending
The current implementation of RCU_FAST_NO_HZ prevents CPUs from entering
dyntick-idle state if they have RCU callbacks pending. Unfortunately,
this has the side-effect of often preventing them from entering this
state, especially if at least one other CPU is not in dyntick-idle state.
However, the resulting per-tick wakeup is wasteful in many cases: if the
CPU has already fully responded to the current RCU grace period, there
will be nothing for it to do until this grace period ends, which will
frequently take several jiffies.
This commit therefore permits a CPU that has done everything that the
current grace period has asked of it (rcu_pending() == 0) even if it
still as RCU callbacks pending. However, such a CPU posts a timer to
wake it up several jiffies later (6 jiffies, based on experience with
grace-period lengths). This wakeup is required to handle situations
that can result in all CPUs being in dyntick-idle mode, thus failing
to ever complete the current grace period. If a CPU wakes up before
the timer goes off, then it cancels that timer, thus avoiding spurious
wakeups.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2011-11-29 04:28:34 +08:00
|
|
|
* Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
|
|
|
|
* any flavor of RCU.
|
2010-02-23 09:04:59 +08:00
|
|
|
*/
|
2013-11-18 11:27:16 +08:00
|
|
|
#ifndef CONFIG_RCU_NOCB_CPU_ALL
|
2014-10-22 04:23:08 +08:00
|
|
|
int rcu_needs_cpu(unsigned long *delta_jiffies)
|
2010-02-23 09:04:59 +08:00
|
|
|
{
|
rcu: Precompute RCU_FAST_NO_HZ timer offsets
When a CPU is entering dyntick-idle mode, tick_nohz_stop_sched_tick()
calls rcu_needs_cpu() see if RCU needs that CPU, and, if not, computes the
next wakeup time based on the timer wheels. Only later, when actually
entering the idle loop, rcu_prepare_for_idle() will be invoked. In some
cases, rcu_prepare_for_idle() will post timers to wake the CPU back up.
But all for naught: The next wakeup time for the CPU has already been
computed, and posting a timer afterwards does not force that wakeup
time to be recomputed. This means that rcu_prepare_for_idle()'s have
no effect.
This is not a problem on a busy system because something else will wake
up the CPU soon enough. However, on lightly loaded systems, the CPU
might stay asleep for a considerable length of time. If that CPU has
a callback that the rest of the system is waiting on, the system might
run very slowly or (in theory) even hang.
This commit avoids this problem by having rcu_needs_cpu() give
tick_nohz_stop_sched_tick() an estimate of when RCU will need the CPU
to wake back up, which tick_nohz_stop_sched_tick() takes into account
when programming the CPU's wakeup time. An alternative approach is
for rcu_prepare_for_idle() to use hrtimers instead of normal timers,
but timers are much more efficient than are hrtimers for frequently
and repeatedly posting and cancelling a given timer, which is exactly
what RCU_FAST_NO_HZ does.
Reported-by: Pascal Chapperon <pascal.chapperon@wanadoo.fr>
Reported-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Tested-by: Pascal Chapperon <pascal.chapperon@wanadoo.fr>
2012-05-11 07:41:44 +08:00
|
|
|
*delta_jiffies = ULONG_MAX;
|
2014-10-22 04:23:08 +08:00
|
|
|
return rcu_cpu_has_callbacks(NULL);
|
rcu: Permit dyntick-idle with callbacks pending
The current implementation of RCU_FAST_NO_HZ prevents CPUs from entering
dyntick-idle state if they have RCU callbacks pending. Unfortunately,
this has the side-effect of often preventing them from entering this
state, especially if at least one other CPU is not in dyntick-idle state.
However, the resulting per-tick wakeup is wasteful in many cases: if the
CPU has already fully responded to the current RCU grace period, there
will be nothing for it to do until this grace period ends, which will
frequently take several jiffies.
This commit therefore permits a CPU that has done everything that the
current grace period has asked of it (rcu_pending() == 0) even if it
still as RCU callbacks pending. However, such a CPU posts a timer to
wake it up several jiffies later (6 jiffies, based on experience with
grace-period lengths). This wakeup is required to handle situations
that can result in all CPUs being in dyntick-idle mode, thus failing
to ever complete the current grace period. If a CPU wakes up before
the timer goes off, then it cancels that timer, thus avoiding spurious
wakeups.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2011-11-29 04:28:34 +08:00
|
|
|
}
|
2013-11-18 11:27:16 +08:00
|
|
|
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
|
rcu: Permit dyntick-idle with callbacks pending
The current implementation of RCU_FAST_NO_HZ prevents CPUs from entering
dyntick-idle state if they have RCU callbacks pending. Unfortunately,
this has the side-effect of often preventing them from entering this
state, especially if at least one other CPU is not in dyntick-idle state.
However, the resulting per-tick wakeup is wasteful in many cases: if the
CPU has already fully responded to the current RCU grace period, there
will be nothing for it to do until this grace period ends, which will
frequently take several jiffies.
This commit therefore permits a CPU that has done everything that the
current grace period has asked of it (rcu_pending() == 0) even if it
still as RCU callbacks pending. However, such a CPU posts a timer to
wake it up several jiffies later (6 jiffies, based on experience with
grace-period lengths). This wakeup is required to handle situations
that can result in all CPUs being in dyntick-idle mode, thus failing
to ever complete the current grace period. If a CPU wakes up before
the timer goes off, then it cancels that timer, thus avoiding spurious
wakeups.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2011-11-29 04:28:34 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
|
|
|
|
* after it.
|
|
|
|
*/
|
2014-10-23 06:07:37 +08:00
|
|
|
static void rcu_cleanup_after_idle(void)
|
rcu: Permit dyntick-idle with callbacks pending
The current implementation of RCU_FAST_NO_HZ prevents CPUs from entering
dyntick-idle state if they have RCU callbacks pending. Unfortunately,
this has the side-effect of often preventing them from entering this
state, especially if at least one other CPU is not in dyntick-idle state.
However, the resulting per-tick wakeup is wasteful in many cases: if the
CPU has already fully responded to the current RCU grace period, there
will be nothing for it to do until this grace period ends, which will
frequently take several jiffies.
This commit therefore permits a CPU that has done everything that the
current grace period has asked of it (rcu_pending() == 0) even if it
still as RCU callbacks pending. However, such a CPU posts a timer to
wake it up several jiffies later (6 jiffies, based on experience with
grace-period lengths). This wakeup is required to handle situations
that can result in all CPUs being in dyntick-idle mode, thus failing
to ever complete the current grace period. If a CPU wakes up before
the timer goes off, then it cancels that timer, thus avoiding spurious
wakeups.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2011-11-29 04:28:34 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2011-11-02 21:54:54 +08:00
|
|
|
/*
|
2012-01-17 05:29:10 +08:00
|
|
|
* Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
|
2011-11-02 21:54:54 +08:00
|
|
|
* is nothing.
|
|
|
|
*/
|
2014-10-23 06:03:43 +08:00
|
|
|
static void rcu_prepare_for_idle(void)
|
2011-11-02 21:54:54 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2012-02-29 03:02:21 +08:00
|
|
|
/*
|
|
|
|
* Don't bother keeping a running count of the number of RCU callbacks
|
|
|
|
* posted because CONFIG_RCU_FAST_NO_HZ=n.
|
|
|
|
*/
|
|
|
|
static void rcu_idle_count_callbacks_posted(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2010-02-23 09:04:59 +08:00
|
|
|
#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
|
|
|
|
|
2011-12-01 07:41:14 +08:00
|
|
|
/*
|
|
|
|
* This code is invoked when a CPU goes idle, at which point we want
|
|
|
|
* to have the CPU do everything required for RCU so that it can enter
|
|
|
|
* the energy-efficient dyntick-idle mode. This is handled by a
|
|
|
|
* state machine implemented by rcu_prepare_for_idle() below.
|
|
|
|
*
|
|
|
|
* The following three proprocessor symbols control this state machine:
|
|
|
|
*
|
|
|
|
* RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
|
|
|
|
* to sleep in dyntick-idle mode with RCU callbacks pending. This
|
|
|
|
* is sized to be roughly one RCU grace period. Those energy-efficiency
|
|
|
|
* benchmarkers who might otherwise be tempted to set this to a large
|
|
|
|
* number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
|
|
|
|
* system. And if you are -that- concerned about energy efficiency,
|
|
|
|
* just power the system down and be done with it!
|
2012-01-11 06:13:24 +08:00
|
|
|
* RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
|
|
|
|
* permitted to sleep in dyntick-idle mode with only lazy RCU
|
|
|
|
* callbacks pending. Setting this too high can OOM your system.
|
2011-12-01 07:41:14 +08:00
|
|
|
*
|
|
|
|
* The values below work well in practice. If future workloads require
|
|
|
|
* adjustment, they can be converted into kernel config parameters, though
|
|
|
|
* making the state machine smarter might be a better option.
|
|
|
|
*/
|
2012-06-05 11:45:10 +08:00
|
|
|
#define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */
|
2012-01-11 06:13:24 +08:00
|
|
|
#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
|
2011-12-01 07:41:14 +08:00
|
|
|
|
2012-12-13 04:35:29 +08:00
|
|
|
static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY;
|
|
|
|
module_param(rcu_idle_gp_delay, int, 0644);
|
|
|
|
static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY;
|
|
|
|
module_param(rcu_idle_lazy_gp_delay, int, 0644);
|
2012-01-07 06:11:30 +08:00
|
|
|
|
2013-11-14 04:01:57 +08:00
|
|
|
extern int tick_nohz_active;
|
2012-01-07 06:11:30 +08:00
|
|
|
|
|
|
|
/*
|
2013-08-26 12:20:47 +08:00
|
|
|
* Try to advance callbacks for all flavors of RCU on the current CPU, but
|
|
|
|
* only if it has been awhile since the last time we did so. Afterwards,
|
|
|
|
* if there are any callbacks ready for immediate invocation, return true.
|
2012-01-07 06:11:30 +08:00
|
|
|
*/
|
2013-11-18 13:08:07 +08:00
|
|
|
static bool __maybe_unused rcu_try_advance_all_cbs(void)
|
2012-01-07 06:11:30 +08:00
|
|
|
{
|
2012-12-29 03:30:36 +08:00
|
|
|
bool cbs_ready = false;
|
|
|
|
struct rcu_data *rdp;
|
2013-08-26 12:20:47 +08:00
|
|
|
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
2012-12-29 03:30:36 +08:00
|
|
|
struct rcu_node *rnp;
|
|
|
|
struct rcu_state *rsp;
|
2012-01-07 06:11:30 +08:00
|
|
|
|
2013-08-26 12:20:47 +08:00
|
|
|
/* Exit early if we advanced recently. */
|
|
|
|
if (jiffies == rdtp->last_advance_all)
|
2014-07-09 06:26:13 +08:00
|
|
|
return false;
|
2013-08-26 12:20:47 +08:00
|
|
|
rdtp->last_advance_all = jiffies;
|
|
|
|
|
2012-12-29 03:30:36 +08:00
|
|
|
for_each_rcu_flavor(rsp) {
|
|
|
|
rdp = this_cpu_ptr(rsp->rda);
|
|
|
|
rnp = rdp->mynode;
|
2012-01-07 06:11:30 +08:00
|
|
|
|
2012-12-29 03:30:36 +08:00
|
|
|
/*
|
|
|
|
* Don't bother checking unless a grace period has
|
|
|
|
* completed since we last checked and there are
|
|
|
|
* callbacks not yet ready to invoke.
|
|
|
|
*/
|
rcu: Handle gpnum/completed wrap while dyntick idle
Subtle race conditions can result if a CPU stays in dyntick-idle mode
long enough for the ->gpnum and ->completed fields to wrap. For
example, consider the following sequence of events:
o CPU 1 encounters a quiescent state while waiting for grace period
5 to complete, but then enters dyntick-idle mode.
o While CPU 1 is in dyntick-idle mode, the grace-period counters
wrap around so that the grace period number is now 4.
o Just as CPU 1 exits dyntick-idle mode, grace period 4 completes
and grace period 5 begins.
o The quiescent state that CPU 1 passed through during the old
grace period 5 looks like it applies to the new grace period
5. Therefore, the new grace period 5 completes without CPU 1
having passed through a quiescent state.
This could clearly be a fatal surprise to any long-running RCU read-side
critical section that happened to be running on CPU 1 at the time. At one
time, this was not a problem, given that it takes significant time for
the grace-period counters to overflow even on 32-bit systems. However,
with the advent of NO_HZ_FULL and SMP embedded systems, arbitrarily long
idle periods are now becoming quite feasible. It is therefore time to
close this race.
This commit therefore avoids this race condition by having the
quiescent-state forcing code detect when a CPU is falling too far
behind, and setting a new rcu_data field ->gpwrap when this happens.
Whenever this new ->gpwrap field is set, the CPU's ->gpnum and ->completed
fields are known to be untrustworthy, and can be ignored, along with
any associated quiescent states.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-12-09 12:26:55 +08:00
|
|
|
if ((rdp->completed != rnp->completed ||
|
2015-03-04 06:57:58 +08:00
|
|
|
unlikely(READ_ONCE(rdp->gpwrap))) &&
|
2012-12-29 03:30:36 +08:00
|
|
|
rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL])
|
2013-03-20 02:32:11 +08:00
|
|
|
note_gp_changes(rsp, rdp);
|
2012-01-07 06:11:30 +08:00
|
|
|
|
2012-12-29 03:30:36 +08:00
|
|
|
if (cpu_has_callbacks_ready_to_invoke(rdp))
|
|
|
|
cbs_ready = true;
|
|
|
|
}
|
|
|
|
return cbs_ready;
|
2012-01-07 06:11:30 +08:00
|
|
|
}
|
|
|
|
|
rcu: Precompute RCU_FAST_NO_HZ timer offsets
When a CPU is entering dyntick-idle mode, tick_nohz_stop_sched_tick()
calls rcu_needs_cpu() see if RCU needs that CPU, and, if not, computes the
next wakeup time based on the timer wheels. Only later, when actually
entering the idle loop, rcu_prepare_for_idle() will be invoked. In some
cases, rcu_prepare_for_idle() will post timers to wake the CPU back up.
But all for naught: The next wakeup time for the CPU has already been
computed, and posting a timer afterwards does not force that wakeup
time to be recomputed. This means that rcu_prepare_for_idle()'s have
no effect.
This is not a problem on a busy system because something else will wake
up the CPU soon enough. However, on lightly loaded systems, the CPU
might stay asleep for a considerable length of time. If that CPU has
a callback that the rest of the system is waiting on, the system might
run very slowly or (in theory) even hang.
This commit avoids this problem by having rcu_needs_cpu() give
tick_nohz_stop_sched_tick() an estimate of when RCU will need the CPU
to wake back up, which tick_nohz_stop_sched_tick() takes into account
when programming the CPU's wakeup time. An alternative approach is
for rcu_prepare_for_idle() to use hrtimers instead of normal timers,
but timers are much more efficient than are hrtimers for frequently
and repeatedly posting and cancelling a given timer, which is exactly
what RCU_FAST_NO_HZ does.
Reported-by: Pascal Chapperon <pascal.chapperon@wanadoo.fr>
Reported-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Tested-by: Pascal Chapperon <pascal.chapperon@wanadoo.fr>
2012-05-11 07:41:44 +08:00
|
|
|
/*
|
2012-12-29 03:30:36 +08:00
|
|
|
* Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
|
|
|
|
* to invoke. If the CPU has callbacks, try to advance them. Tell the
|
|
|
|
* caller to set the timeout based on whether or not there are non-lazy
|
|
|
|
* callbacks.
|
rcu: Precompute RCU_FAST_NO_HZ timer offsets
When a CPU is entering dyntick-idle mode, tick_nohz_stop_sched_tick()
calls rcu_needs_cpu() see if RCU needs that CPU, and, if not, computes the
next wakeup time based on the timer wheels. Only later, when actually
entering the idle loop, rcu_prepare_for_idle() will be invoked. In some
cases, rcu_prepare_for_idle() will post timers to wake the CPU back up.
But all for naught: The next wakeup time for the CPU has already been
computed, and posting a timer afterwards does not force that wakeup
time to be recomputed. This means that rcu_prepare_for_idle()'s have
no effect.
This is not a problem on a busy system because something else will wake
up the CPU soon enough. However, on lightly loaded systems, the CPU
might stay asleep for a considerable length of time. If that CPU has
a callback that the rest of the system is waiting on, the system might
run very slowly or (in theory) even hang.
This commit avoids this problem by having rcu_needs_cpu() give
tick_nohz_stop_sched_tick() an estimate of when RCU will need the CPU
to wake back up, which tick_nohz_stop_sched_tick() takes into account
when programming the CPU's wakeup time. An alternative approach is
for rcu_prepare_for_idle() to use hrtimers instead of normal timers,
but timers are much more efficient than are hrtimers for frequently
and repeatedly posting and cancelling a given timer, which is exactly
what RCU_FAST_NO_HZ does.
Reported-by: Pascal Chapperon <pascal.chapperon@wanadoo.fr>
Reported-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Tested-by: Pascal Chapperon <pascal.chapperon@wanadoo.fr>
2012-05-11 07:41:44 +08:00
|
|
|
*
|
2012-12-29 03:30:36 +08:00
|
|
|
* The caller must have disabled interrupts.
|
rcu: Precompute RCU_FAST_NO_HZ timer offsets
When a CPU is entering dyntick-idle mode, tick_nohz_stop_sched_tick()
calls rcu_needs_cpu() see if RCU needs that CPU, and, if not, computes the
next wakeup time based on the timer wheels. Only later, when actually
entering the idle loop, rcu_prepare_for_idle() will be invoked. In some
cases, rcu_prepare_for_idle() will post timers to wake the CPU back up.
But all for naught: The next wakeup time for the CPU has already been
computed, and posting a timer afterwards does not force that wakeup
time to be recomputed. This means that rcu_prepare_for_idle()'s have
no effect.
This is not a problem on a busy system because something else will wake
up the CPU soon enough. However, on lightly loaded systems, the CPU
might stay asleep for a considerable length of time. If that CPU has
a callback that the rest of the system is waiting on, the system might
run very slowly or (in theory) even hang.
This commit avoids this problem by having rcu_needs_cpu() give
tick_nohz_stop_sched_tick() an estimate of when RCU will need the CPU
to wake back up, which tick_nohz_stop_sched_tick() takes into account
when programming the CPU's wakeup time. An alternative approach is
for rcu_prepare_for_idle() to use hrtimers instead of normal timers,
but timers are much more efficient than are hrtimers for frequently
and repeatedly posting and cancelling a given timer, which is exactly
what RCU_FAST_NO_HZ does.
Reported-by: Pascal Chapperon <pascal.chapperon@wanadoo.fr>
Reported-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Tested-by: Pascal Chapperon <pascal.chapperon@wanadoo.fr>
2012-05-11 07:41:44 +08:00
|
|
|
*/
|
2013-11-18 11:27:16 +08:00
|
|
|
#ifndef CONFIG_RCU_NOCB_CPU_ALL
|
2014-10-22 04:23:08 +08:00
|
|
|
int rcu_needs_cpu(unsigned long *dj)
|
rcu: Precompute RCU_FAST_NO_HZ timer offsets
When a CPU is entering dyntick-idle mode, tick_nohz_stop_sched_tick()
calls rcu_needs_cpu() see if RCU needs that CPU, and, if not, computes the
next wakeup time based on the timer wheels. Only later, when actually
entering the idle loop, rcu_prepare_for_idle() will be invoked. In some
cases, rcu_prepare_for_idle() will post timers to wake the CPU back up.
But all for naught: The next wakeup time for the CPU has already been
computed, and posting a timer afterwards does not force that wakeup
time to be recomputed. This means that rcu_prepare_for_idle()'s have
no effect.
This is not a problem on a busy system because something else will wake
up the CPU soon enough. However, on lightly loaded systems, the CPU
might stay asleep for a considerable length of time. If that CPU has
a callback that the rest of the system is waiting on, the system might
run very slowly or (in theory) even hang.
This commit avoids this problem by having rcu_needs_cpu() give
tick_nohz_stop_sched_tick() an estimate of when RCU will need the CPU
to wake back up, which tick_nohz_stop_sched_tick() takes into account
when programming the CPU's wakeup time. An alternative approach is
for rcu_prepare_for_idle() to use hrtimers instead of normal timers,
but timers are much more efficient than are hrtimers for frequently
and repeatedly posting and cancelling a given timer, which is exactly
what RCU_FAST_NO_HZ does.
Reported-by: Pascal Chapperon <pascal.chapperon@wanadoo.fr>
Reported-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Tested-by: Pascal Chapperon <pascal.chapperon@wanadoo.fr>
2012-05-11 07:41:44 +08:00
|
|
|
{
|
2014-10-22 04:23:08 +08:00
|
|
|
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
rcu: Precompute RCU_FAST_NO_HZ timer offsets
When a CPU is entering dyntick-idle mode, tick_nohz_stop_sched_tick()
calls rcu_needs_cpu() see if RCU needs that CPU, and, if not, computes the
next wakeup time based on the timer wheels. Only later, when actually
entering the idle loop, rcu_prepare_for_idle() will be invoked. In some
cases, rcu_prepare_for_idle() will post timers to wake the CPU back up.
But all for naught: The next wakeup time for the CPU has already been
computed, and posting a timer afterwards does not force that wakeup
time to be recomputed. This means that rcu_prepare_for_idle()'s have
no effect.
This is not a problem on a busy system because something else will wake
up the CPU soon enough. However, on lightly loaded systems, the CPU
might stay asleep for a considerable length of time. If that CPU has
a callback that the rest of the system is waiting on, the system might
run very slowly or (in theory) even hang.
This commit avoids this problem by having rcu_needs_cpu() give
tick_nohz_stop_sched_tick() an estimate of when RCU will need the CPU
to wake back up, which tick_nohz_stop_sched_tick() takes into account
when programming the CPU's wakeup time. An alternative approach is
for rcu_prepare_for_idle() to use hrtimers instead of normal timers,
but timers are much more efficient than are hrtimers for frequently
and repeatedly posting and cancelling a given timer, which is exactly
what RCU_FAST_NO_HZ does.
Reported-by: Pascal Chapperon <pascal.chapperon@wanadoo.fr>
Reported-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Tested-by: Pascal Chapperon <pascal.chapperon@wanadoo.fr>
2012-05-11 07:41:44 +08:00
|
|
|
|
2012-12-29 03:30:36 +08:00
|
|
|
/* Snapshot to detect later posting of non-lazy callback. */
|
|
|
|
rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
|
|
|
|
|
rcu: Precompute RCU_FAST_NO_HZ timer offsets
When a CPU is entering dyntick-idle mode, tick_nohz_stop_sched_tick()
calls rcu_needs_cpu() see if RCU needs that CPU, and, if not, computes the
next wakeup time based on the timer wheels. Only later, when actually
entering the idle loop, rcu_prepare_for_idle() will be invoked. In some
cases, rcu_prepare_for_idle() will post timers to wake the CPU back up.
But all for naught: The next wakeup time for the CPU has already been
computed, and posting a timer afterwards does not force that wakeup
time to be recomputed. This means that rcu_prepare_for_idle()'s have
no effect.
This is not a problem on a busy system because something else will wake
up the CPU soon enough. However, on lightly loaded systems, the CPU
might stay asleep for a considerable length of time. If that CPU has
a callback that the rest of the system is waiting on, the system might
run very slowly or (in theory) even hang.
This commit avoids this problem by having rcu_needs_cpu() give
tick_nohz_stop_sched_tick() an estimate of when RCU will need the CPU
to wake back up, which tick_nohz_stop_sched_tick() takes into account
when programming the CPU's wakeup time. An alternative approach is
for rcu_prepare_for_idle() to use hrtimers instead of normal timers,
but timers are much more efficient than are hrtimers for frequently
and repeatedly posting and cancelling a given timer, which is exactly
what RCU_FAST_NO_HZ does.
Reported-by: Pascal Chapperon <pascal.chapperon@wanadoo.fr>
Reported-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Tested-by: Pascal Chapperon <pascal.chapperon@wanadoo.fr>
2012-05-11 07:41:44 +08:00
|
|
|
/* If no callbacks, RCU doesn't need the CPU. */
|
2014-10-22 04:23:08 +08:00
|
|
|
if (!rcu_cpu_has_callbacks(&rdtp->all_lazy)) {
|
2012-12-29 03:30:36 +08:00
|
|
|
*dj = ULONG_MAX;
|
rcu: Precompute RCU_FAST_NO_HZ timer offsets
When a CPU is entering dyntick-idle mode, tick_nohz_stop_sched_tick()
calls rcu_needs_cpu() see if RCU needs that CPU, and, if not, computes the
next wakeup time based on the timer wheels. Only later, when actually
entering the idle loop, rcu_prepare_for_idle() will be invoked. In some
cases, rcu_prepare_for_idle() will post timers to wake the CPU back up.
But all for naught: The next wakeup time for the CPU has already been
computed, and posting a timer afterwards does not force that wakeup
time to be recomputed. This means that rcu_prepare_for_idle()'s have
no effect.
This is not a problem on a busy system because something else will wake
up the CPU soon enough. However, on lightly loaded systems, the CPU
might stay asleep for a considerable length of time. If that CPU has
a callback that the rest of the system is waiting on, the system might
run very slowly or (in theory) even hang.
This commit avoids this problem by having rcu_needs_cpu() give
tick_nohz_stop_sched_tick() an estimate of when RCU will need the CPU
to wake back up, which tick_nohz_stop_sched_tick() takes into account
when programming the CPU's wakeup time. An alternative approach is
for rcu_prepare_for_idle() to use hrtimers instead of normal timers,
but timers are much more efficient than are hrtimers for frequently
and repeatedly posting and cancelling a given timer, which is exactly
what RCU_FAST_NO_HZ does.
Reported-by: Pascal Chapperon <pascal.chapperon@wanadoo.fr>
Reported-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Tested-by: Pascal Chapperon <pascal.chapperon@wanadoo.fr>
2012-05-11 07:41:44 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2012-12-29 03:30:36 +08:00
|
|
|
|
|
|
|
/* Attempt to advance callbacks. */
|
|
|
|
if (rcu_try_advance_all_cbs()) {
|
|
|
|
/* Some ready to invoke, so initiate later invocation. */
|
|
|
|
invoke_rcu_core();
|
rcu: Precompute RCU_FAST_NO_HZ timer offsets
When a CPU is entering dyntick-idle mode, tick_nohz_stop_sched_tick()
calls rcu_needs_cpu() see if RCU needs that CPU, and, if not, computes the
next wakeup time based on the timer wheels. Only later, when actually
entering the idle loop, rcu_prepare_for_idle() will be invoked. In some
cases, rcu_prepare_for_idle() will post timers to wake the CPU back up.
But all for naught: The next wakeup time for the CPU has already been
computed, and posting a timer afterwards does not force that wakeup
time to be recomputed. This means that rcu_prepare_for_idle()'s have
no effect.
This is not a problem on a busy system because something else will wake
up the CPU soon enough. However, on lightly loaded systems, the CPU
might stay asleep for a considerable length of time. If that CPU has
a callback that the rest of the system is waiting on, the system might
run very slowly or (in theory) even hang.
This commit avoids this problem by having rcu_needs_cpu() give
tick_nohz_stop_sched_tick() an estimate of when RCU will need the CPU
to wake back up, which tick_nohz_stop_sched_tick() takes into account
when programming the CPU's wakeup time. An alternative approach is
for rcu_prepare_for_idle() to use hrtimers instead of normal timers,
but timers are much more efficient than are hrtimers for frequently
and repeatedly posting and cancelling a given timer, which is exactly
what RCU_FAST_NO_HZ does.
Reported-by: Pascal Chapperon <pascal.chapperon@wanadoo.fr>
Reported-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Tested-by: Pascal Chapperon <pascal.chapperon@wanadoo.fr>
2012-05-11 07:41:44 +08:00
|
|
|
return 1;
|
|
|
|
}
|
2012-12-29 03:30:36 +08:00
|
|
|
rdtp->last_accelerate = jiffies;
|
|
|
|
|
|
|
|
/* Request timer delay depending on laziness, and round. */
|
2013-05-13 21:53:37 +08:00
|
|
|
if (!rdtp->all_lazy) {
|
2012-12-29 03:30:36 +08:00
|
|
|
*dj = round_up(rcu_idle_gp_delay + jiffies,
|
|
|
|
rcu_idle_gp_delay) - jiffies;
|
2012-06-05 11:45:10 +08:00
|
|
|
} else {
|
2012-12-29 03:30:36 +08:00
|
|
|
*dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
|
2012-06-05 11:45:10 +08:00
|
|
|
}
|
rcu: Precompute RCU_FAST_NO_HZ timer offsets
When a CPU is entering dyntick-idle mode, tick_nohz_stop_sched_tick()
calls rcu_needs_cpu() see if RCU needs that CPU, and, if not, computes the
next wakeup time based on the timer wheels. Only later, when actually
entering the idle loop, rcu_prepare_for_idle() will be invoked. In some
cases, rcu_prepare_for_idle() will post timers to wake the CPU back up.
But all for naught: The next wakeup time for the CPU has already been
computed, and posting a timer afterwards does not force that wakeup
time to be recomputed. This means that rcu_prepare_for_idle()'s have
no effect.
This is not a problem on a busy system because something else will wake
up the CPU soon enough. However, on lightly loaded systems, the CPU
might stay asleep for a considerable length of time. If that CPU has
a callback that the rest of the system is waiting on, the system might
run very slowly or (in theory) even hang.
This commit avoids this problem by having rcu_needs_cpu() give
tick_nohz_stop_sched_tick() an estimate of when RCU will need the CPU
to wake back up, which tick_nohz_stop_sched_tick() takes into account
when programming the CPU's wakeup time. An alternative approach is
for rcu_prepare_for_idle() to use hrtimers instead of normal timers,
but timers are much more efficient than are hrtimers for frequently
and repeatedly posting and cancelling a given timer, which is exactly
what RCU_FAST_NO_HZ does.
Reported-by: Pascal Chapperon <pascal.chapperon@wanadoo.fr>
Reported-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Tested-by: Pascal Chapperon <pascal.chapperon@wanadoo.fr>
2012-05-11 07:41:44 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2013-11-18 11:27:16 +08:00
|
|
|
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
|
rcu: Precompute RCU_FAST_NO_HZ timer offsets
When a CPU is entering dyntick-idle mode, tick_nohz_stop_sched_tick()
calls rcu_needs_cpu() see if RCU needs that CPU, and, if not, computes the
next wakeup time based on the timer wheels. Only later, when actually
entering the idle loop, rcu_prepare_for_idle() will be invoked. In some
cases, rcu_prepare_for_idle() will post timers to wake the CPU back up.
But all for naught: The next wakeup time for the CPU has already been
computed, and posting a timer afterwards does not force that wakeup
time to be recomputed. This means that rcu_prepare_for_idle()'s have
no effect.
This is not a problem on a busy system because something else will wake
up the CPU soon enough. However, on lightly loaded systems, the CPU
might stay asleep for a considerable length of time. If that CPU has
a callback that the rest of the system is waiting on, the system might
run very slowly or (in theory) even hang.
This commit avoids this problem by having rcu_needs_cpu() give
tick_nohz_stop_sched_tick() an estimate of when RCU will need the CPU
to wake back up, which tick_nohz_stop_sched_tick() takes into account
when programming the CPU's wakeup time. An alternative approach is
for rcu_prepare_for_idle() to use hrtimers instead of normal timers,
but timers are much more efficient than are hrtimers for frequently
and repeatedly posting and cancelling a given timer, which is exactly
what RCU_FAST_NO_HZ does.
Reported-by: Pascal Chapperon <pascal.chapperon@wanadoo.fr>
Reported-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Tested-by: Pascal Chapperon <pascal.chapperon@wanadoo.fr>
2012-05-11 07:41:44 +08:00
|
|
|
|
2012-05-01 05:16:19 +08:00
|
|
|
/*
|
2012-12-29 03:30:36 +08:00
|
|
|
* Prepare a CPU for idle from an RCU perspective. The first major task
|
|
|
|
* is to sense whether nohz mode has been enabled or disabled via sysfs.
|
|
|
|
* The second major task is to check to see if a non-lazy callback has
|
|
|
|
* arrived at a CPU that previously had only lazy callbacks. The third
|
|
|
|
* major task is to accelerate (that is, assign grace-period numbers to)
|
|
|
|
* any recently arrived callbacks.
|
2011-11-02 21:54:54 +08:00
|
|
|
*
|
|
|
|
* The caller must have disabled interrupts.
|
2010-02-23 09:04:59 +08:00
|
|
|
*/
|
2014-10-23 06:03:43 +08:00
|
|
|
static void rcu_prepare_for_idle(void)
|
2010-02-23 09:04:59 +08:00
|
|
|
{
|
2013-11-18 13:08:07 +08:00
|
|
|
#ifndef CONFIG_RCU_NOCB_CPU_ALL
|
rcu: Make callers awaken grace-period kthread
The rcu_start_gp_advanced() function currently uses irq_work_queue()
to defer wakeups of the RCU grace-period kthread. This deferring
is necessary to avoid RCU-scheduler deadlocks involving the rcu_node
structure's lock, meaning that RCU cannot call any of the scheduler's
wake-up functions while holding one of these locks.
Unfortunately, the second and subsequent calls to irq_work_queue() are
ignored, and the first call will be ignored (aside from queuing the work
item) if the scheduler-clock tick is turned off. This is OK for many
uses, especially those where irq_work_queue() is called from an interrupt
or softirq handler, because in those cases the scheduler-clock-tick state
will be re-evaluated, which will turn the scheduler-clock tick back on.
On the next tick, any deferred work will then be processed.
However, this strategy does not always work for RCU, which can be invoked
at process level from idle CPUs. In this case, the tick might never
be turned back on, indefinitely defering a grace-period start request.
Note that the RCU CPU stall detector cannot see this condition, because
there is no RCU grace period in progress. Therefore, we can (and do!)
see long tens-of-seconds stalls in grace-period handling. In theory,
we could see a full grace-period hang, but rcutorture testing to date
has seen only the tens-of-seconds stalls. Event tracing demonstrates
that irq_work_queue() is being called repeatedly to no effect during
these stalls: The "newreq" event appears repeatedly from a task that is
not one of the grace-period kthreads.
In theory, irq_work_queue() might be fixed to avoid this sort of issue,
but RCU's requirements are unusual and it is quite straightforward to pass
wake-up responsibility up through RCU's call chain, so that the wakeup
happens when the offending locks are released.
This commit therefore makes this change. The rcu_start_gp_advanced(),
rcu_start_future_gp(), rcu_accelerate_cbs(), rcu_advance_cbs(),
__note_gp_changes(), and rcu_start_gp() functions now return a boolean
which indicates when a wake-up is needed. A new rcu_gp_kthread_wake()
does the wakeup when it is necessary and safe to do so: No self-wakes,
no wake-ups if the ->gp_flags field indicates there is no need (as in
someone else did the wake-up before we got around to it), and no wake-ups
before the grace-period kthread has been created.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
2014-03-12 04:02:16 +08:00
|
|
|
bool needwake;
|
2012-12-29 03:30:36 +08:00
|
|
|
struct rcu_data *rdp;
|
2014-10-23 06:03:43 +08:00
|
|
|
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
2012-12-29 03:30:36 +08:00
|
|
|
struct rcu_node *rnp;
|
|
|
|
struct rcu_state *rsp;
|
2012-06-25 01:15:02 +08:00
|
|
|
int tne;
|
|
|
|
|
|
|
|
/* Handle nohz enablement switches conservatively. */
|
2015-03-04 06:57:58 +08:00
|
|
|
tne = READ_ONCE(tick_nohz_active);
|
2012-06-25 01:15:02 +08:00
|
|
|
if (tne != rdtp->tick_nohz_enabled_snap) {
|
2014-10-22 04:23:08 +08:00
|
|
|
if (rcu_cpu_has_callbacks(NULL))
|
2012-06-25 01:15:02 +08:00
|
|
|
invoke_rcu_core(); /* force nohz to see update. */
|
|
|
|
rdtp->tick_nohz_enabled_snap = tne;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (!tne)
|
|
|
|
return;
|
2012-03-16 03:16:26 +08:00
|
|
|
|
2012-12-29 03:30:36 +08:00
|
|
|
/* If this is a no-CBs CPU, no callbacks, just return. */
|
2014-10-23 06:03:43 +08:00
|
|
|
if (rcu_is_nocb_cpu(smp_processor_id()))
|
2012-06-29 03:33:51 +08:00
|
|
|
return;
|
|
|
|
|
2012-02-29 03:02:21 +08:00
|
|
|
/*
|
2012-12-29 03:30:36 +08:00
|
|
|
* If a non-lazy callback arrived at a CPU having only lazy
|
|
|
|
* callbacks, invoke RCU core for the side-effect of recalculating
|
|
|
|
* idle duration on re-entry to idle.
|
2012-02-29 03:02:21 +08:00
|
|
|
*/
|
2012-12-29 03:30:36 +08:00
|
|
|
if (rdtp->all_lazy &&
|
|
|
|
rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) {
|
2013-09-06 08:02:11 +08:00
|
|
|
rdtp->all_lazy = false;
|
|
|
|
rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
|
2012-12-29 03:30:36 +08:00
|
|
|
invoke_rcu_core();
|
2012-02-29 03:02:21 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-11-23 09:07:11 +08:00
|
|
|
/*
|
2012-12-29 03:30:36 +08:00
|
|
|
* If we have not yet accelerated this jiffy, accelerate all
|
|
|
|
* callbacks on this CPU.
|
2011-11-23 09:07:11 +08:00
|
|
|
*/
|
2012-12-29 03:30:36 +08:00
|
|
|
if (rdtp->last_accelerate == jiffies)
|
2011-11-02 21:54:54 +08:00
|
|
|
return;
|
2012-12-29 03:30:36 +08:00
|
|
|
rdtp->last_accelerate = jiffies;
|
|
|
|
for_each_rcu_flavor(rsp) {
|
2014-10-23 06:03:43 +08:00
|
|
|
rdp = this_cpu_ptr(rsp->rda);
|
2012-12-29 03:30:36 +08:00
|
|
|
if (!*rdp->nxttail[RCU_DONE_TAIL])
|
|
|
|
continue;
|
|
|
|
rnp = rdp->mynode;
|
|
|
|
raw_spin_lock(&rnp->lock); /* irqs already disabled. */
|
2013-12-12 05:59:10 +08:00
|
|
|
smp_mb__after_unlock_lock();
|
rcu: Make callers awaken grace-period kthread
The rcu_start_gp_advanced() function currently uses irq_work_queue()
to defer wakeups of the RCU grace-period kthread. This deferring
is necessary to avoid RCU-scheduler deadlocks involving the rcu_node
structure's lock, meaning that RCU cannot call any of the scheduler's
wake-up functions while holding one of these locks.
Unfortunately, the second and subsequent calls to irq_work_queue() are
ignored, and the first call will be ignored (aside from queuing the work
item) if the scheduler-clock tick is turned off. This is OK for many
uses, especially those where irq_work_queue() is called from an interrupt
or softirq handler, because in those cases the scheduler-clock-tick state
will be re-evaluated, which will turn the scheduler-clock tick back on.
On the next tick, any deferred work will then be processed.
However, this strategy does not always work for RCU, which can be invoked
at process level from idle CPUs. In this case, the tick might never
be turned back on, indefinitely defering a grace-period start request.
Note that the RCU CPU stall detector cannot see this condition, because
there is no RCU grace period in progress. Therefore, we can (and do!)
see long tens-of-seconds stalls in grace-period handling. In theory,
we could see a full grace-period hang, but rcutorture testing to date
has seen only the tens-of-seconds stalls. Event tracing demonstrates
that irq_work_queue() is being called repeatedly to no effect during
these stalls: The "newreq" event appears repeatedly from a task that is
not one of the grace-period kthreads.
In theory, irq_work_queue() might be fixed to avoid this sort of issue,
but RCU's requirements are unusual and it is quite straightforward to pass
wake-up responsibility up through RCU's call chain, so that the wakeup
happens when the offending locks are released.
This commit therefore makes this change. The rcu_start_gp_advanced(),
rcu_start_future_gp(), rcu_accelerate_cbs(), rcu_advance_cbs(),
__note_gp_changes(), and rcu_start_gp() functions now return a boolean
which indicates when a wake-up is needed. A new rcu_gp_kthread_wake()
does the wakeup when it is necessary and safe to do so: No self-wakes,
no wake-ups if the ->gp_flags field indicates there is no need (as in
someone else did the wake-up before we got around to it), and no wake-ups
before the grace-period kthread has been created.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
2014-03-12 04:02:16 +08:00
|
|
|
needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
|
2012-12-29 03:30:36 +08:00
|
|
|
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
rcu: Make callers awaken grace-period kthread
The rcu_start_gp_advanced() function currently uses irq_work_queue()
to defer wakeups of the RCU grace-period kthread. This deferring
is necessary to avoid RCU-scheduler deadlocks involving the rcu_node
structure's lock, meaning that RCU cannot call any of the scheduler's
wake-up functions while holding one of these locks.
Unfortunately, the second and subsequent calls to irq_work_queue() are
ignored, and the first call will be ignored (aside from queuing the work
item) if the scheduler-clock tick is turned off. This is OK for many
uses, especially those where irq_work_queue() is called from an interrupt
or softirq handler, because in those cases the scheduler-clock-tick state
will be re-evaluated, which will turn the scheduler-clock tick back on.
On the next tick, any deferred work will then be processed.
However, this strategy does not always work for RCU, which can be invoked
at process level from idle CPUs. In this case, the tick might never
be turned back on, indefinitely defering a grace-period start request.
Note that the RCU CPU stall detector cannot see this condition, because
there is no RCU grace period in progress. Therefore, we can (and do!)
see long tens-of-seconds stalls in grace-period handling. In theory,
we could see a full grace-period hang, but rcutorture testing to date
has seen only the tens-of-seconds stalls. Event tracing demonstrates
that irq_work_queue() is being called repeatedly to no effect during
these stalls: The "newreq" event appears repeatedly from a task that is
not one of the grace-period kthreads.
In theory, irq_work_queue() might be fixed to avoid this sort of issue,
but RCU's requirements are unusual and it is quite straightforward to pass
wake-up responsibility up through RCU's call chain, so that the wakeup
happens when the offending locks are released.
This commit therefore makes this change. The rcu_start_gp_advanced(),
rcu_start_future_gp(), rcu_accelerate_cbs(), rcu_advance_cbs(),
__note_gp_changes(), and rcu_start_gp() functions now return a boolean
which indicates when a wake-up is needed. A new rcu_gp_kthread_wake()
does the wakeup when it is necessary and safe to do so: No self-wakes,
no wake-ups if the ->gp_flags field indicates there is no need (as in
someone else did the wake-up before we got around to it), and no wake-ups
before the grace-period kthread has been created.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
2014-03-12 04:02:16 +08:00
|
|
|
if (needwake)
|
|
|
|
rcu_gp_kthread_wake(rsp);
|
2010-04-26 12:04:29 +08:00
|
|
|
}
|
2013-11-18 13:08:07 +08:00
|
|
|
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
|
2012-12-29 03:30:36 +08:00
|
|
|
}
|
2011-11-23 09:07:11 +08:00
|
|
|
|
2012-12-29 03:30:36 +08:00
|
|
|
/*
|
|
|
|
* Clean up for exit from idle. Attempt to advance callbacks based on
|
|
|
|
* any grace periods that elapsed while the CPU was idle, and if any
|
|
|
|
* callbacks are now ready to invoke, initiate invocation.
|
|
|
|
*/
|
2014-10-23 06:07:37 +08:00
|
|
|
static void rcu_cleanup_after_idle(void)
|
2012-12-29 03:30:36 +08:00
|
|
|
{
|
2013-11-18 13:08:07 +08:00
|
|
|
#ifndef CONFIG_RCU_NOCB_CPU_ALL
|
2014-10-23 06:07:37 +08:00
|
|
|
if (rcu_is_nocb_cpu(smp_processor_id()))
|
2011-11-02 21:54:54 +08:00
|
|
|
return;
|
2013-08-23 09:16:16 +08:00
|
|
|
if (rcu_try_advance_all_cbs())
|
|
|
|
invoke_rcu_core();
|
2013-11-18 13:08:07 +08:00
|
|
|
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
|
2010-02-23 09:04:59 +08:00
|
|
|
}
|
|
|
|
|
2012-02-29 03:02:21 +08:00
|
|
|
/*
|
2012-05-04 06:38:10 +08:00
|
|
|
* Keep a running count of the number of non-lazy callbacks posted
|
|
|
|
* on this CPU. This running counter (which is never decremented) allows
|
|
|
|
* rcu_prepare_for_idle() to detect when something out of the idle loop
|
|
|
|
* posts a callback, even if an equal number of callbacks are invoked.
|
|
|
|
* Of course, callbacks should only be posted from within a trace event
|
|
|
|
* designed to be called from idle or from within RCU_NONIDLE().
|
2012-02-29 03:02:21 +08:00
|
|
|
*/
|
|
|
|
static void rcu_idle_count_callbacks_posted(void)
|
|
|
|
{
|
2012-05-10 03:07:05 +08:00
|
|
|
__this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
|
2012-02-29 03:02:21 +08:00
|
|
|
}
|
|
|
|
|
2012-06-12 08:39:43 +08:00
|
|
|
/*
|
|
|
|
* Data for flushing lazy RCU callbacks at OOM time.
|
|
|
|
*/
|
|
|
|
static atomic_t oom_callback_count;
|
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* RCU OOM callback -- decrement the outstanding count and deliver the
|
|
|
|
* wake-up if we are the last one.
|
|
|
|
*/
|
|
|
|
static void rcu_oom_callback(struct rcu_head *rhp)
|
|
|
|
{
|
|
|
|
if (atomic_dec_and_test(&oom_callback_count))
|
|
|
|
wake_up(&oom_callback_wq);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Post an rcu_oom_notify callback on the current CPU if it has at
|
|
|
|
* least one lazy callback. This will unnecessarily post callbacks
|
|
|
|
* to CPUs that already have a non-lazy callback at the end of their
|
|
|
|
* callback list, but this is an infrequent operation, so accept some
|
|
|
|
* extra overhead to keep things simple.
|
|
|
|
*/
|
|
|
|
static void rcu_oom_notify_cpu(void *unused)
|
|
|
|
{
|
|
|
|
struct rcu_state *rsp;
|
|
|
|
struct rcu_data *rdp;
|
|
|
|
|
|
|
|
for_each_rcu_flavor(rsp) {
|
2014-04-16 03:20:12 +08:00
|
|
|
rdp = raw_cpu_ptr(rsp->rda);
|
2012-06-12 08:39:43 +08:00
|
|
|
if (rdp->qlen_lazy != 0) {
|
|
|
|
atomic_inc(&oom_callback_count);
|
|
|
|
rsp->call(&rdp->oom_head, rcu_oom_callback);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If low on memory, ensure that each CPU has a non-lazy callback.
|
|
|
|
* This will wake up CPUs that have only lazy callbacks, in turn
|
|
|
|
* ensuring that they free up the corresponding memory in a timely manner.
|
|
|
|
* Because an uncertain amount of memory will be freed in some uncertain
|
|
|
|
* timeframe, we do not claim to have freed anything.
|
|
|
|
*/
|
|
|
|
static int rcu_oom_notify(struct notifier_block *self,
|
|
|
|
unsigned long notused, void *nfreed)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
/* Wait for callbacks from earlier instance to complete. */
|
|
|
|
wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0);
|
2013-09-25 06:04:06 +08:00
|
|
|
smp_mb(); /* Ensure callback reuse happens after callback invocation. */
|
2012-06-12 08:39:43 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Prevent premature wakeup: ensure that all increments happen
|
|
|
|
* before there is a chance of the counter reaching zero.
|
|
|
|
*/
|
|
|
|
atomic_set(&oom_callback_count, 1);
|
|
|
|
|
|
|
|
get_online_cpus();
|
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
|
2014-07-02 02:26:57 +08:00
|
|
|
cond_resched_rcu_qs();
|
2012-06-12 08:39:43 +08:00
|
|
|
}
|
|
|
|
put_online_cpus();
|
|
|
|
|
|
|
|
/* Unconditionally decrement: no need to wake ourselves up. */
|
|
|
|
atomic_dec(&oom_callback_count);
|
|
|
|
|
|
|
|
return NOTIFY_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block rcu_oom_nb = {
|
|
|
|
.notifier_call = rcu_oom_notify
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init rcu_register_oom_notifier(void)
|
|
|
|
{
|
|
|
|
register_oom_notifier(&rcu_oom_nb);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_initcall(rcu_register_oom_notifier);
|
|
|
|
|
2010-02-23 09:04:59 +08:00
|
|
|
#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
|
2012-01-17 05:29:10 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_RCU_CPU_STALL_INFO
|
|
|
|
|
|
|
|
#ifdef CONFIG_RCU_FAST_NO_HZ
|
|
|
|
|
|
|
|
static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
|
|
|
|
{
|
2012-05-10 03:07:05 +08:00
|
|
|
struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
|
2012-12-29 03:30:36 +08:00
|
|
|
unsigned long nlpd = rdtp->nonlazy_posted - rdtp->nonlazy_posted_snap;
|
2012-01-17 05:29:10 +08:00
|
|
|
|
2012-12-29 03:30:36 +08:00
|
|
|
sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c",
|
|
|
|
rdtp->last_accelerate & 0xffff, jiffies & 0xffff,
|
|
|
|
ulong2long(nlpd),
|
|
|
|
rdtp->all_lazy ? 'L' : '.',
|
|
|
|
rdtp->tick_nohz_enabled_snap ? '.' : 'D');
|
2012-01-17 05:29:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
|
|
|
|
|
|
|
|
static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
|
|
|
|
{
|
2012-06-19 16:43:16 +08:00
|
|
|
*cp = '\0';
|
2012-01-17 05:29:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
|
|
|
|
|
|
|
|
/* Initiate the stall-info list. */
|
|
|
|
static void print_cpu_stall_info_begin(void)
|
|
|
|
{
|
2013-03-19 07:24:11 +08:00
|
|
|
pr_cont("\n");
|
2012-01-17 05:29:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Print out diagnostic information for the specified stalled CPU.
|
|
|
|
*
|
|
|
|
* If the specified CPU is aware of the current RCU grace period
|
|
|
|
* (flavor specified by rsp), then print the number of scheduling
|
|
|
|
* clock interrupts the CPU has taken during the time that it has
|
|
|
|
* been aware. Otherwise, print the number of RCU grace periods
|
|
|
|
* that this CPU is ignorant of, for example, "1" if the CPU was
|
|
|
|
* aware of the previous grace period.
|
|
|
|
*
|
|
|
|
* Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
|
|
|
|
*/
|
|
|
|
static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
|
|
|
|
{
|
|
|
|
char fast_no_hz[72];
|
|
|
|
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
|
|
|
struct rcu_dynticks *rdtp = rdp->dynticks;
|
|
|
|
char *ticks_title;
|
|
|
|
unsigned long ticks_value;
|
|
|
|
|
|
|
|
if (rsp->gpnum == rdp->gpnum) {
|
|
|
|
ticks_title = "ticks this GP";
|
|
|
|
ticks_value = rdp->ticks_this_gp;
|
|
|
|
} else {
|
|
|
|
ticks_title = "GPs behind";
|
|
|
|
ticks_value = rsp->gpnum - rdp->gpnum;
|
|
|
|
}
|
|
|
|
print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
|
2014-12-09 01:57:48 +08:00
|
|
|
pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
|
2012-01-17 05:29:10 +08:00
|
|
|
cpu, ticks_value, ticks_title,
|
|
|
|
atomic_read(&rdtp->dynticks) & 0xfff,
|
|
|
|
rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
|
2013-03-07 05:37:09 +08:00
|
|
|
rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
|
2015-03-04 06:57:58 +08:00
|
|
|
READ_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
|
2012-01-17 05:29:10 +08:00
|
|
|
fast_no_hz);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Terminate the stall-info list. */
|
|
|
|
static void print_cpu_stall_info_end(void)
|
|
|
|
{
|
2013-03-19 07:24:11 +08:00
|
|
|
pr_err("\t");
|
2012-01-17 05:29:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Zero ->ticks_this_gp for all flavors of RCU. */
|
|
|
|
static void zero_cpu_stall_ticks(struct rcu_data *rdp)
|
|
|
|
{
|
|
|
|
rdp->ticks_this_gp = 0;
|
2013-03-07 05:37:09 +08:00
|
|
|
rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
|
2012-01-17 05:29:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Increment ->ticks_this_gp for all flavors of RCU. */
|
|
|
|
static void increment_cpu_stall_ticks(void)
|
|
|
|
{
|
2012-08-11 04:55:03 +08:00
|
|
|
struct rcu_state *rsp;
|
|
|
|
|
|
|
|
for_each_rcu_flavor(rsp)
|
2014-04-16 03:20:12 +08:00
|
|
|
raw_cpu_inc(rsp->rda->ticks_this_gp);
|
2012-01-17 05:29:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
|
|
|
|
|
|
|
|
static void print_cpu_stall_info_begin(void)
|
|
|
|
{
|
2013-03-19 07:24:11 +08:00
|
|
|
pr_cont(" {");
|
2012-01-17 05:29:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
|
|
|
|
{
|
2013-03-19 07:24:11 +08:00
|
|
|
pr_cont(" %d", cpu);
|
2012-01-17 05:29:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void print_cpu_stall_info_end(void)
|
|
|
|
{
|
2013-03-19 07:24:11 +08:00
|
|
|
pr_cont("} ");
|
2012-01-17 05:29:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void zero_cpu_stall_ticks(struct rcu_data *rdp)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void increment_cpu_stall_ticks(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
|
2012-08-20 12:35:53 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_RCU_NOCB_CPU
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Offload callback processing from the boot-time-specified set of CPUs
|
|
|
|
* specified by rcu_nocb_mask. For each CPU in the set, there is a
|
|
|
|
* kthread created that pulls the callbacks from the corresponding CPU,
|
|
|
|
* waits for a grace period to elapse, and invokes the callbacks.
|
|
|
|
* The no-CBs CPUs do a wake_up() on their kthread when they insert
|
|
|
|
* a callback into any empty list, unless the rcu_nocb_poll boot parameter
|
|
|
|
* has been specified, in which case each kthread actively polls its
|
|
|
|
* CPU. (Which isn't so great for energy efficiency, but which does
|
|
|
|
* reduce RCU's overhead on that CPU.)
|
|
|
|
*
|
|
|
|
* This is intended to be used in conjunction with Frederic Weisbecker's
|
|
|
|
* adaptive-idle work, which would seriously reduce OS jitter on CPUs
|
|
|
|
* running CPU-bound user-mode computations.
|
|
|
|
*
|
|
|
|
* Offloading of callback processing could also in theory be used as
|
|
|
|
* an energy-efficiency measure because CPUs with no RCU callbacks
|
|
|
|
* queued are more aggressive about entering dyntick-idle mode.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
/* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. */
|
|
|
|
static int __init rcu_nocb_setup(char *str)
|
|
|
|
{
|
|
|
|
alloc_bootmem_cpumask_var(&rcu_nocb_mask);
|
|
|
|
have_rcu_nocb_mask = true;
|
|
|
|
cpulist_parse(str, rcu_nocb_mask);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
__setup("rcu_nocbs=", rcu_nocb_setup);
|
|
|
|
|
rcu: Make rcu_nocb_poll an early_param instead of module_param
The as-documented rcu_nocb_poll will fail to enable this feature
for two reasons. (1) there is an extra "s" in the documented
name which is not in the code, and (2) since it uses module_param,
it really is expecting a prefix, akin to "rcutree.fanout_leaf"
and the prefix isn't documented.
However, there are several reasons why we might not want to
simply fix the typo and add the prefix:
1) we'd end up with rcutree.rcu_nocb_poll, and rather probably make
a change to rcutree.nocb_poll
2) if we did #1, then the prefix wouldn't be consistent with the
rcu_nocbs=<cpumap> parameter (i.e. one with, one without prefix)
3) the use of module_param in a header file is less than desired,
since it isn't immediately obvious that it will get processed
via rcutree.c and get the prefix from that (although use of
module_param_named() could clarify that.)
4) the implied export of /sys/module/rcutree/parameters/rcu_nocb_poll
data to userspace via module_param() doesn't really buy us anything,
as it is read-only and we can tell if it is enabled already without
it, since there is a printk at early boot telling us so.
In light of all that, just change it from a module_param() to an
early_setup() call, and worry about adding it to /sys later on if
we decide to allow a dynamic setting of it.
Also change the variable to be tagged as read_mostly, since it
will only ever be fiddled with at most, once at boot.
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2012-12-21 05:19:22 +08:00
|
|
|
static int __init parse_rcu_nocb_poll(char *arg)
|
|
|
|
{
|
|
|
|
rcu_nocb_poll = 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
|
|
|
|
|
2013-02-11 12:48:58 +08:00
|
|
|
/*
|
2012-12-31 07:21:01 +08:00
|
|
|
* Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
|
|
|
|
* grace period.
|
2013-02-11 12:48:58 +08:00
|
|
|
*/
|
2012-12-31 07:21:01 +08:00
|
|
|
static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
|
2013-02-11 12:48:58 +08:00
|
|
|
{
|
2012-12-31 07:21:01 +08:00
|
|
|
wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
|
2013-02-11 12:48:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2012-12-31 05:06:35 +08:00
|
|
|
* Set the root rcu_node structure's ->need_future_gp field
|
2013-02-11 12:48:58 +08:00
|
|
|
* based on the sum of those of all rcu_node structures. This does
|
|
|
|
* double-count the root rcu_node structure's requests, but this
|
|
|
|
* is necessary to handle the possibility of a rcu_nocb_kthread()
|
|
|
|
* having awakened during the time that the rcu_node structures
|
|
|
|
* were being updated for the end of the previous grace period.
|
2013-01-08 05:37:42 +08:00
|
|
|
*/
|
2013-02-11 12:48:58 +08:00
|
|
|
static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
|
|
|
|
{
|
2012-12-31 05:06:35 +08:00
|
|
|
rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
|
2013-02-11 12:48:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void rcu_init_one_nocb(struct rcu_node *rnp)
|
2013-01-08 05:37:42 +08:00
|
|
|
{
|
2013-02-11 12:48:58 +08:00
|
|
|
init_waitqueue_head(&rnp->nocb_gp_wq[0]);
|
|
|
|
init_waitqueue_head(&rnp->nocb_gp_wq[1]);
|
2013-01-08 05:37:42 +08:00
|
|
|
}
|
|
|
|
|
2013-11-18 10:25:48 +08:00
|
|
|
#ifndef CONFIG_RCU_NOCB_CPU_ALL
|
2014-02-24 22:18:09 +08:00
|
|
|
/* Is the specified CPU a no-CBs CPU? */
|
2013-03-27 06:47:24 +08:00
|
|
|
bool rcu_is_nocb_cpu(int cpu)
|
2012-08-20 12:35:53 +08:00
|
|
|
{
|
|
|
|
if (have_rcu_nocb_mask)
|
|
|
|
return cpumask_test_cpu(cpu, rcu_nocb_mask);
|
|
|
|
return false;
|
|
|
|
}
|
2013-11-18 10:25:48 +08:00
|
|
|
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
|
2012-08-20 12:35:53 +08:00
|
|
|
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
/*
|
|
|
|
* Kick the leader kthread for this NOCB group.
|
|
|
|
*/
|
|
|
|
static void wake_nocb_leader(struct rcu_data *rdp, bool force)
|
|
|
|
{
|
|
|
|
struct rcu_data *rdp_leader = rdp->nocb_leader;
|
|
|
|
|
2015-03-04 06:57:58 +08:00
|
|
|
if (!READ_ONCE(rdp_leader->nocb_kthread))
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
return;
|
2015-03-04 06:57:58 +08:00
|
|
|
if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) {
|
2014-08-13 01:47:48 +08:00
|
|
|
/* Prior smp_mb__after_atomic() orders against prior enqueue. */
|
2015-03-04 06:57:58 +08:00
|
|
|
WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
wake_up(&rdp_leader->nocb_wq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
rcu: Make rcu_barrier() understand about missing rcuo kthreads
Commit 35ce7f29a44a (rcu: Create rcuo kthreads only for onlined CPUs)
avoids creating rcuo kthreads for CPUs that never come online. This
fixes a bug in many instances of firmware: Instead of lying about their
age, these systems instead lie about the number of CPUs that they have.
Before commit 35ce7f29a44a, this could result in huge numbers of useless
rcuo kthreads being created.
It appears that experience indicates that I should have told the
people suffering from this problem to fix their broken firmware, but
I instead produced what turned out to be a partial fix. The missing
piece supplied by this commit makes sure that rcu_barrier() knows not to
post callbacks for no-CBs CPUs that have not yet come online, because
otherwise rcu_barrier() will hang on systems having firmware that lies
about the number of CPUs.
It is tempting to simply have rcu_barrier() refuse to post a callback on
any no-CBs CPU that does not have an rcuo kthread. This unfortunately
does not work because rcu_barrier() is required to wait for all pending
callbacks. It is therefore required to wait even for those callbacks
that cannot possibly be invoked. Even if doing so hangs the system.
Given that posting a callback to a no-CBs CPU that does not yet have an
rcuo kthread can hang rcu_barrier(), It is tempting to report an error
in this case. Unfortunately, this will result in false positives at
boot time, when it is perfectly legal to post callbacks to the boot CPU
before the scheduler has started, in other words, before it is legal
to invoke rcu_barrier().
So this commit instead has rcu_barrier() avoid posting callbacks to
CPUs having neither rcuo kthread nor pending callbacks, and has it
complain bitterly if it finds CPUs having no rcuo kthread but some
pending callbacks. And when rcu_barrier() does find CPUs having no rcuo
kthread but pending callbacks, as noted earlier, it has no choice but
to hang indefinitely.
Reported-by: Yanko Kaneti <yaneti@declera.com>
Reported-by: Jay Vosburgh <jay.vosburgh@canonical.com>
Reported-by: Meelis Roos <mroos@linux.ee>
Reported-by: Eric B Munson <emunson@akamai.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Eric B Munson <emunson@akamai.com>
Tested-by: Jay Vosburgh <jay.vosburgh@canonical.com>
Tested-by: Yanko Kaneti <yaneti@declera.com>
Tested-by: Kevin Fenzi <kevin@scrye.com>
Tested-by: Meelis Roos <mroos@linux.ee>
2014-10-28 00:15:54 +08:00
|
|
|
/*
|
|
|
|
* Does the specified CPU need an RCU callback for the specified flavor
|
|
|
|
* of rcu_barrier()?
|
|
|
|
*/
|
|
|
|
static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
|
|
|
|
{
|
|
|
|
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
2014-12-19 04:31:27 +08:00
|
|
|
unsigned long ret;
|
|
|
|
#ifdef CONFIG_PROVE_RCU
|
rcu: Make rcu_barrier() understand about missing rcuo kthreads
Commit 35ce7f29a44a (rcu: Create rcuo kthreads only for onlined CPUs)
avoids creating rcuo kthreads for CPUs that never come online. This
fixes a bug in many instances of firmware: Instead of lying about their
age, these systems instead lie about the number of CPUs that they have.
Before commit 35ce7f29a44a, this could result in huge numbers of useless
rcuo kthreads being created.
It appears that experience indicates that I should have told the
people suffering from this problem to fix their broken firmware, but
I instead produced what turned out to be a partial fix. The missing
piece supplied by this commit makes sure that rcu_barrier() knows not to
post callbacks for no-CBs CPUs that have not yet come online, because
otherwise rcu_barrier() will hang on systems having firmware that lies
about the number of CPUs.
It is tempting to simply have rcu_barrier() refuse to post a callback on
any no-CBs CPU that does not have an rcuo kthread. This unfortunately
does not work because rcu_barrier() is required to wait for all pending
callbacks. It is therefore required to wait even for those callbacks
that cannot possibly be invoked. Even if doing so hangs the system.
Given that posting a callback to a no-CBs CPU that does not yet have an
rcuo kthread can hang rcu_barrier(), It is tempting to report an error
in this case. Unfortunately, this will result in false positives at
boot time, when it is perfectly legal to post callbacks to the boot CPU
before the scheduler has started, in other words, before it is legal
to invoke rcu_barrier().
So this commit instead has rcu_barrier() avoid posting callbacks to
CPUs having neither rcuo kthread nor pending callbacks, and has it
complain bitterly if it finds CPUs having no rcuo kthread but some
pending callbacks. And when rcu_barrier() does find CPUs having no rcuo
kthread but pending callbacks, as noted earlier, it has no choice but
to hang indefinitely.
Reported-by: Yanko Kaneti <yaneti@declera.com>
Reported-by: Jay Vosburgh <jay.vosburgh@canonical.com>
Reported-by: Meelis Roos <mroos@linux.ee>
Reported-by: Eric B Munson <emunson@akamai.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Eric B Munson <emunson@akamai.com>
Tested-by: Jay Vosburgh <jay.vosburgh@canonical.com>
Tested-by: Yanko Kaneti <yaneti@declera.com>
Tested-by: Kevin Fenzi <kevin@scrye.com>
Tested-by: Meelis Roos <mroos@linux.ee>
2014-10-28 00:15:54 +08:00
|
|
|
struct rcu_head *rhp;
|
2014-12-19 04:31:27 +08:00
|
|
|
#endif /* #ifdef CONFIG_PROVE_RCU */
|
rcu: Make rcu_barrier() understand about missing rcuo kthreads
Commit 35ce7f29a44a (rcu: Create rcuo kthreads only for onlined CPUs)
avoids creating rcuo kthreads for CPUs that never come online. This
fixes a bug in many instances of firmware: Instead of lying about their
age, these systems instead lie about the number of CPUs that they have.
Before commit 35ce7f29a44a, this could result in huge numbers of useless
rcuo kthreads being created.
It appears that experience indicates that I should have told the
people suffering from this problem to fix their broken firmware, but
I instead produced what turned out to be a partial fix. The missing
piece supplied by this commit makes sure that rcu_barrier() knows not to
post callbacks for no-CBs CPUs that have not yet come online, because
otherwise rcu_barrier() will hang on systems having firmware that lies
about the number of CPUs.
It is tempting to simply have rcu_barrier() refuse to post a callback on
any no-CBs CPU that does not have an rcuo kthread. This unfortunately
does not work because rcu_barrier() is required to wait for all pending
callbacks. It is therefore required to wait even for those callbacks
that cannot possibly be invoked. Even if doing so hangs the system.
Given that posting a callback to a no-CBs CPU that does not yet have an
rcuo kthread can hang rcu_barrier(), It is tempting to report an error
in this case. Unfortunately, this will result in false positives at
boot time, when it is perfectly legal to post callbacks to the boot CPU
before the scheduler has started, in other words, before it is legal
to invoke rcu_barrier().
So this commit instead has rcu_barrier() avoid posting callbacks to
CPUs having neither rcuo kthread nor pending callbacks, and has it
complain bitterly if it finds CPUs having no rcuo kthread but some
pending callbacks. And when rcu_barrier() does find CPUs having no rcuo
kthread but pending callbacks, as noted earlier, it has no choice but
to hang indefinitely.
Reported-by: Yanko Kaneti <yaneti@declera.com>
Reported-by: Jay Vosburgh <jay.vosburgh@canonical.com>
Reported-by: Meelis Roos <mroos@linux.ee>
Reported-by: Eric B Munson <emunson@akamai.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Eric B Munson <emunson@akamai.com>
Tested-by: Jay Vosburgh <jay.vosburgh@canonical.com>
Tested-by: Yanko Kaneti <yaneti@declera.com>
Tested-by: Kevin Fenzi <kevin@scrye.com>
Tested-by: Meelis Roos <mroos@linux.ee>
2014-10-28 00:15:54 +08:00
|
|
|
|
2014-12-19 04:31:27 +08:00
|
|
|
/*
|
|
|
|
* Check count of all no-CBs callbacks awaiting invocation.
|
|
|
|
* There needs to be a barrier before this function is called,
|
|
|
|
* but associated with a prior determination that no more
|
|
|
|
* callbacks would be posted. In the worst case, the first
|
|
|
|
* barrier in _rcu_barrier() suffices (but the caller cannot
|
|
|
|
* necessarily rely on this, not a substitute for the caller
|
|
|
|
* getting the concurrency design right!). There must also be
|
|
|
|
* a barrier between the following load an posting of a callback
|
|
|
|
* (if a callback is in fact needed). This is associated with an
|
|
|
|
* atomic_inc() in the caller.
|
|
|
|
*/
|
|
|
|
ret = atomic_long_read(&rdp->nocb_q_count);
|
rcu: Make rcu_barrier() understand about missing rcuo kthreads
Commit 35ce7f29a44a (rcu: Create rcuo kthreads only for onlined CPUs)
avoids creating rcuo kthreads for CPUs that never come online. This
fixes a bug in many instances of firmware: Instead of lying about their
age, these systems instead lie about the number of CPUs that they have.
Before commit 35ce7f29a44a, this could result in huge numbers of useless
rcuo kthreads being created.
It appears that experience indicates that I should have told the
people suffering from this problem to fix their broken firmware, but
I instead produced what turned out to be a partial fix. The missing
piece supplied by this commit makes sure that rcu_barrier() knows not to
post callbacks for no-CBs CPUs that have not yet come online, because
otherwise rcu_barrier() will hang on systems having firmware that lies
about the number of CPUs.
It is tempting to simply have rcu_barrier() refuse to post a callback on
any no-CBs CPU that does not have an rcuo kthread. This unfortunately
does not work because rcu_barrier() is required to wait for all pending
callbacks. It is therefore required to wait even for those callbacks
that cannot possibly be invoked. Even if doing so hangs the system.
Given that posting a callback to a no-CBs CPU that does not yet have an
rcuo kthread can hang rcu_barrier(), It is tempting to report an error
in this case. Unfortunately, this will result in false positives at
boot time, when it is perfectly legal to post callbacks to the boot CPU
before the scheduler has started, in other words, before it is legal
to invoke rcu_barrier().
So this commit instead has rcu_barrier() avoid posting callbacks to
CPUs having neither rcuo kthread nor pending callbacks, and has it
complain bitterly if it finds CPUs having no rcuo kthread but some
pending callbacks. And when rcu_barrier() does find CPUs having no rcuo
kthread but pending callbacks, as noted earlier, it has no choice but
to hang indefinitely.
Reported-by: Yanko Kaneti <yaneti@declera.com>
Reported-by: Jay Vosburgh <jay.vosburgh@canonical.com>
Reported-by: Meelis Roos <mroos@linux.ee>
Reported-by: Eric B Munson <emunson@akamai.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Eric B Munson <emunson@akamai.com>
Tested-by: Jay Vosburgh <jay.vosburgh@canonical.com>
Tested-by: Yanko Kaneti <yaneti@declera.com>
Tested-by: Kevin Fenzi <kevin@scrye.com>
Tested-by: Meelis Roos <mroos@linux.ee>
2014-10-28 00:15:54 +08:00
|
|
|
|
2014-12-19 04:31:27 +08:00
|
|
|
#ifdef CONFIG_PROVE_RCU
|
2015-03-04 06:57:58 +08:00
|
|
|
rhp = READ_ONCE(rdp->nocb_head);
|
rcu: Make rcu_barrier() understand about missing rcuo kthreads
Commit 35ce7f29a44a (rcu: Create rcuo kthreads only for onlined CPUs)
avoids creating rcuo kthreads for CPUs that never come online. This
fixes a bug in many instances of firmware: Instead of lying about their
age, these systems instead lie about the number of CPUs that they have.
Before commit 35ce7f29a44a, this could result in huge numbers of useless
rcuo kthreads being created.
It appears that experience indicates that I should have told the
people suffering from this problem to fix their broken firmware, but
I instead produced what turned out to be a partial fix. The missing
piece supplied by this commit makes sure that rcu_barrier() knows not to
post callbacks for no-CBs CPUs that have not yet come online, because
otherwise rcu_barrier() will hang on systems having firmware that lies
about the number of CPUs.
It is tempting to simply have rcu_barrier() refuse to post a callback on
any no-CBs CPU that does not have an rcuo kthread. This unfortunately
does not work because rcu_barrier() is required to wait for all pending
callbacks. It is therefore required to wait even for those callbacks
that cannot possibly be invoked. Even if doing so hangs the system.
Given that posting a callback to a no-CBs CPU that does not yet have an
rcuo kthread can hang rcu_barrier(), It is tempting to report an error
in this case. Unfortunately, this will result in false positives at
boot time, when it is perfectly legal to post callbacks to the boot CPU
before the scheduler has started, in other words, before it is legal
to invoke rcu_barrier().
So this commit instead has rcu_barrier() avoid posting callbacks to
CPUs having neither rcuo kthread nor pending callbacks, and has it
complain bitterly if it finds CPUs having no rcuo kthread but some
pending callbacks. And when rcu_barrier() does find CPUs having no rcuo
kthread but pending callbacks, as noted earlier, it has no choice but
to hang indefinitely.
Reported-by: Yanko Kaneti <yaneti@declera.com>
Reported-by: Jay Vosburgh <jay.vosburgh@canonical.com>
Reported-by: Meelis Roos <mroos@linux.ee>
Reported-by: Eric B Munson <emunson@akamai.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Eric B Munson <emunson@akamai.com>
Tested-by: Jay Vosburgh <jay.vosburgh@canonical.com>
Tested-by: Yanko Kaneti <yaneti@declera.com>
Tested-by: Kevin Fenzi <kevin@scrye.com>
Tested-by: Meelis Roos <mroos@linux.ee>
2014-10-28 00:15:54 +08:00
|
|
|
if (!rhp)
|
2015-03-04 06:57:58 +08:00
|
|
|
rhp = READ_ONCE(rdp->nocb_gp_head);
|
rcu: Make rcu_barrier() understand about missing rcuo kthreads
Commit 35ce7f29a44a (rcu: Create rcuo kthreads only for onlined CPUs)
avoids creating rcuo kthreads for CPUs that never come online. This
fixes a bug in many instances of firmware: Instead of lying about their
age, these systems instead lie about the number of CPUs that they have.
Before commit 35ce7f29a44a, this could result in huge numbers of useless
rcuo kthreads being created.
It appears that experience indicates that I should have told the
people suffering from this problem to fix their broken firmware, but
I instead produced what turned out to be a partial fix. The missing
piece supplied by this commit makes sure that rcu_barrier() knows not to
post callbacks for no-CBs CPUs that have not yet come online, because
otherwise rcu_barrier() will hang on systems having firmware that lies
about the number of CPUs.
It is tempting to simply have rcu_barrier() refuse to post a callback on
any no-CBs CPU that does not have an rcuo kthread. This unfortunately
does not work because rcu_barrier() is required to wait for all pending
callbacks. It is therefore required to wait even for those callbacks
that cannot possibly be invoked. Even if doing so hangs the system.
Given that posting a callback to a no-CBs CPU that does not yet have an
rcuo kthread can hang rcu_barrier(), It is tempting to report an error
in this case. Unfortunately, this will result in false positives at
boot time, when it is perfectly legal to post callbacks to the boot CPU
before the scheduler has started, in other words, before it is legal
to invoke rcu_barrier().
So this commit instead has rcu_barrier() avoid posting callbacks to
CPUs having neither rcuo kthread nor pending callbacks, and has it
complain bitterly if it finds CPUs having no rcuo kthread but some
pending callbacks. And when rcu_barrier() does find CPUs having no rcuo
kthread but pending callbacks, as noted earlier, it has no choice but
to hang indefinitely.
Reported-by: Yanko Kaneti <yaneti@declera.com>
Reported-by: Jay Vosburgh <jay.vosburgh@canonical.com>
Reported-by: Meelis Roos <mroos@linux.ee>
Reported-by: Eric B Munson <emunson@akamai.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Eric B Munson <emunson@akamai.com>
Tested-by: Jay Vosburgh <jay.vosburgh@canonical.com>
Tested-by: Yanko Kaneti <yaneti@declera.com>
Tested-by: Kevin Fenzi <kevin@scrye.com>
Tested-by: Meelis Roos <mroos@linux.ee>
2014-10-28 00:15:54 +08:00
|
|
|
if (!rhp)
|
2015-03-04 06:57:58 +08:00
|
|
|
rhp = READ_ONCE(rdp->nocb_follower_head);
|
rcu: Make rcu_barrier() understand about missing rcuo kthreads
Commit 35ce7f29a44a (rcu: Create rcuo kthreads only for onlined CPUs)
avoids creating rcuo kthreads for CPUs that never come online. This
fixes a bug in many instances of firmware: Instead of lying about their
age, these systems instead lie about the number of CPUs that they have.
Before commit 35ce7f29a44a, this could result in huge numbers of useless
rcuo kthreads being created.
It appears that experience indicates that I should have told the
people suffering from this problem to fix their broken firmware, but
I instead produced what turned out to be a partial fix. The missing
piece supplied by this commit makes sure that rcu_barrier() knows not to
post callbacks for no-CBs CPUs that have not yet come online, because
otherwise rcu_barrier() will hang on systems having firmware that lies
about the number of CPUs.
It is tempting to simply have rcu_barrier() refuse to post a callback on
any no-CBs CPU that does not have an rcuo kthread. This unfortunately
does not work because rcu_barrier() is required to wait for all pending
callbacks. It is therefore required to wait even for those callbacks
that cannot possibly be invoked. Even if doing so hangs the system.
Given that posting a callback to a no-CBs CPU that does not yet have an
rcuo kthread can hang rcu_barrier(), It is tempting to report an error
in this case. Unfortunately, this will result in false positives at
boot time, when it is perfectly legal to post callbacks to the boot CPU
before the scheduler has started, in other words, before it is legal
to invoke rcu_barrier().
So this commit instead has rcu_barrier() avoid posting callbacks to
CPUs having neither rcuo kthread nor pending callbacks, and has it
complain bitterly if it finds CPUs having no rcuo kthread but some
pending callbacks. And when rcu_barrier() does find CPUs having no rcuo
kthread but pending callbacks, as noted earlier, it has no choice but
to hang indefinitely.
Reported-by: Yanko Kaneti <yaneti@declera.com>
Reported-by: Jay Vosburgh <jay.vosburgh@canonical.com>
Reported-by: Meelis Roos <mroos@linux.ee>
Reported-by: Eric B Munson <emunson@akamai.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Eric B Munson <emunson@akamai.com>
Tested-by: Jay Vosburgh <jay.vosburgh@canonical.com>
Tested-by: Yanko Kaneti <yaneti@declera.com>
Tested-by: Kevin Fenzi <kevin@scrye.com>
Tested-by: Meelis Roos <mroos@linux.ee>
2014-10-28 00:15:54 +08:00
|
|
|
|
|
|
|
/* Having no rcuo kthread but CBs after scheduler starts is bad! */
|
2015-03-04 06:57:58 +08:00
|
|
|
if (!READ_ONCE(rdp->nocb_kthread) && rhp &&
|
2015-01-20 13:43:40 +08:00
|
|
|
rcu_scheduler_fully_active) {
|
rcu: Make rcu_barrier() understand about missing rcuo kthreads
Commit 35ce7f29a44a (rcu: Create rcuo kthreads only for onlined CPUs)
avoids creating rcuo kthreads for CPUs that never come online. This
fixes a bug in many instances of firmware: Instead of lying about their
age, these systems instead lie about the number of CPUs that they have.
Before commit 35ce7f29a44a, this could result in huge numbers of useless
rcuo kthreads being created.
It appears that experience indicates that I should have told the
people suffering from this problem to fix their broken firmware, but
I instead produced what turned out to be a partial fix. The missing
piece supplied by this commit makes sure that rcu_barrier() knows not to
post callbacks for no-CBs CPUs that have not yet come online, because
otherwise rcu_barrier() will hang on systems having firmware that lies
about the number of CPUs.
It is tempting to simply have rcu_barrier() refuse to post a callback on
any no-CBs CPU that does not have an rcuo kthread. This unfortunately
does not work because rcu_barrier() is required to wait for all pending
callbacks. It is therefore required to wait even for those callbacks
that cannot possibly be invoked. Even if doing so hangs the system.
Given that posting a callback to a no-CBs CPU that does not yet have an
rcuo kthread can hang rcu_barrier(), It is tempting to report an error
in this case. Unfortunately, this will result in false positives at
boot time, when it is perfectly legal to post callbacks to the boot CPU
before the scheduler has started, in other words, before it is legal
to invoke rcu_barrier().
So this commit instead has rcu_barrier() avoid posting callbacks to
CPUs having neither rcuo kthread nor pending callbacks, and has it
complain bitterly if it finds CPUs having no rcuo kthread but some
pending callbacks. And when rcu_barrier() does find CPUs having no rcuo
kthread but pending callbacks, as noted earlier, it has no choice but
to hang indefinitely.
Reported-by: Yanko Kaneti <yaneti@declera.com>
Reported-by: Jay Vosburgh <jay.vosburgh@canonical.com>
Reported-by: Meelis Roos <mroos@linux.ee>
Reported-by: Eric B Munson <emunson@akamai.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Eric B Munson <emunson@akamai.com>
Tested-by: Jay Vosburgh <jay.vosburgh@canonical.com>
Tested-by: Yanko Kaneti <yaneti@declera.com>
Tested-by: Kevin Fenzi <kevin@scrye.com>
Tested-by: Meelis Roos <mroos@linux.ee>
2014-10-28 00:15:54 +08:00
|
|
|
/* RCU callback enqueued before CPU first came online??? */
|
|
|
|
pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n",
|
|
|
|
cpu, rhp->func);
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
}
|
2014-12-19 04:31:27 +08:00
|
|
|
#endif /* #ifdef CONFIG_PROVE_RCU */
|
rcu: Make rcu_barrier() understand about missing rcuo kthreads
Commit 35ce7f29a44a (rcu: Create rcuo kthreads only for onlined CPUs)
avoids creating rcuo kthreads for CPUs that never come online. This
fixes a bug in many instances of firmware: Instead of lying about their
age, these systems instead lie about the number of CPUs that they have.
Before commit 35ce7f29a44a, this could result in huge numbers of useless
rcuo kthreads being created.
It appears that experience indicates that I should have told the
people suffering from this problem to fix their broken firmware, but
I instead produced what turned out to be a partial fix. The missing
piece supplied by this commit makes sure that rcu_barrier() knows not to
post callbacks for no-CBs CPUs that have not yet come online, because
otherwise rcu_barrier() will hang on systems having firmware that lies
about the number of CPUs.
It is tempting to simply have rcu_barrier() refuse to post a callback on
any no-CBs CPU that does not have an rcuo kthread. This unfortunately
does not work because rcu_barrier() is required to wait for all pending
callbacks. It is therefore required to wait even for those callbacks
that cannot possibly be invoked. Even if doing so hangs the system.
Given that posting a callback to a no-CBs CPU that does not yet have an
rcuo kthread can hang rcu_barrier(), It is tempting to report an error
in this case. Unfortunately, this will result in false positives at
boot time, when it is perfectly legal to post callbacks to the boot CPU
before the scheduler has started, in other words, before it is legal
to invoke rcu_barrier().
So this commit instead has rcu_barrier() avoid posting callbacks to
CPUs having neither rcuo kthread nor pending callbacks, and has it
complain bitterly if it finds CPUs having no rcuo kthread but some
pending callbacks. And when rcu_barrier() does find CPUs having no rcuo
kthread but pending callbacks, as noted earlier, it has no choice but
to hang indefinitely.
Reported-by: Yanko Kaneti <yaneti@declera.com>
Reported-by: Jay Vosburgh <jay.vosburgh@canonical.com>
Reported-by: Meelis Roos <mroos@linux.ee>
Reported-by: Eric B Munson <emunson@akamai.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Eric B Munson <emunson@akamai.com>
Tested-by: Jay Vosburgh <jay.vosburgh@canonical.com>
Tested-by: Yanko Kaneti <yaneti@declera.com>
Tested-by: Kevin Fenzi <kevin@scrye.com>
Tested-by: Meelis Roos <mroos@linux.ee>
2014-10-28 00:15:54 +08:00
|
|
|
|
2014-12-19 04:31:27 +08:00
|
|
|
return !!ret;
|
rcu: Make rcu_barrier() understand about missing rcuo kthreads
Commit 35ce7f29a44a (rcu: Create rcuo kthreads only for onlined CPUs)
avoids creating rcuo kthreads for CPUs that never come online. This
fixes a bug in many instances of firmware: Instead of lying about their
age, these systems instead lie about the number of CPUs that they have.
Before commit 35ce7f29a44a, this could result in huge numbers of useless
rcuo kthreads being created.
It appears that experience indicates that I should have told the
people suffering from this problem to fix their broken firmware, but
I instead produced what turned out to be a partial fix. The missing
piece supplied by this commit makes sure that rcu_barrier() knows not to
post callbacks for no-CBs CPUs that have not yet come online, because
otherwise rcu_barrier() will hang on systems having firmware that lies
about the number of CPUs.
It is tempting to simply have rcu_barrier() refuse to post a callback on
any no-CBs CPU that does not have an rcuo kthread. This unfortunately
does not work because rcu_barrier() is required to wait for all pending
callbacks. It is therefore required to wait even for those callbacks
that cannot possibly be invoked. Even if doing so hangs the system.
Given that posting a callback to a no-CBs CPU that does not yet have an
rcuo kthread can hang rcu_barrier(), It is tempting to report an error
in this case. Unfortunately, this will result in false positives at
boot time, when it is perfectly legal to post callbacks to the boot CPU
before the scheduler has started, in other words, before it is legal
to invoke rcu_barrier().
So this commit instead has rcu_barrier() avoid posting callbacks to
CPUs having neither rcuo kthread nor pending callbacks, and has it
complain bitterly if it finds CPUs having no rcuo kthread but some
pending callbacks. And when rcu_barrier() does find CPUs having no rcuo
kthread but pending callbacks, as noted earlier, it has no choice but
to hang indefinitely.
Reported-by: Yanko Kaneti <yaneti@declera.com>
Reported-by: Jay Vosburgh <jay.vosburgh@canonical.com>
Reported-by: Meelis Roos <mroos@linux.ee>
Reported-by: Eric B Munson <emunson@akamai.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Eric B Munson <emunson@akamai.com>
Tested-by: Jay Vosburgh <jay.vosburgh@canonical.com>
Tested-by: Yanko Kaneti <yaneti@declera.com>
Tested-by: Kevin Fenzi <kevin@scrye.com>
Tested-by: Meelis Roos <mroos@linux.ee>
2014-10-28 00:15:54 +08:00
|
|
|
}
|
|
|
|
|
2012-08-20 12:35:53 +08:00
|
|
|
/*
|
|
|
|
* Enqueue the specified string of rcu_head structures onto the specified
|
|
|
|
* CPU's no-CBs lists. The CPU is specified by rdp, the head of the
|
|
|
|
* string by rhp, and the tail of the string by rhtp. The non-lazy/lazy
|
|
|
|
* counts are supplied by rhcount and rhcount_lazy.
|
|
|
|
*
|
|
|
|
* If warranted, also wake up the kthread servicing this CPUs queues.
|
|
|
|
*/
|
|
|
|
static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
|
|
|
|
struct rcu_head *rhp,
|
|
|
|
struct rcu_head **rhtp,
|
rcu: Break call_rcu() deadlock involving scheduler and perf
Dave Jones got the following lockdep splat:
> ======================================================
> [ INFO: possible circular locking dependency detected ]
> 3.12.0-rc3+ #92 Not tainted
> -------------------------------------------------------
> trinity-child2/15191 is trying to acquire lock:
> (&rdp->nocb_wq){......}, at: [<ffffffff8108ff43>] __wake_up+0x23/0x50
>
> but task is already holding lock:
> (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> which lock already depends on the new lock.
>
>
> the existing dependency chain (in reverse order) is:
>
> -> #3 (&ctx->lock){-.-...}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff811500ff>] __perf_event_task_sched_out+0x2df/0x5e0
> [<ffffffff81091b83>] perf_event_task_sched_out+0x93/0xa0
> [<ffffffff81732052>] __schedule+0x1d2/0xa20
> [<ffffffff81732f30>] preempt_schedule_irq+0x50/0xb0
> [<ffffffff817352b6>] retint_kernel+0x26/0x30
> [<ffffffff813eed04>] tty_flip_buffer_push+0x34/0x50
> [<ffffffff813f0504>] pty_write+0x54/0x60
> [<ffffffff813e900d>] n_tty_write+0x32d/0x4e0
> [<ffffffff813e5838>] tty_write+0x158/0x2d0
> [<ffffffff811c4850>] vfs_write+0xc0/0x1f0
> [<ffffffff811c52cc>] SyS_write+0x4c/0xa0
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> -> #2 (&rq->lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff810980b2>] wake_up_new_task+0xc2/0x2e0
> [<ffffffff81054336>] do_fork+0x126/0x460
> [<ffffffff81054696>] kernel_thread+0x26/0x30
> [<ffffffff8171ff93>] rest_init+0x23/0x140
> [<ffffffff81ee1e4b>] start_kernel+0x3f6/0x403
> [<ffffffff81ee1571>] x86_64_start_reservations+0x2a/0x2c
> [<ffffffff81ee1664>] x86_64_start_kernel+0xf1/0xf4
>
> -> #1 (&p->pi_lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff810979d1>] try_to_wake_up+0x31/0x350
> [<ffffffff81097d62>] default_wake_function+0x12/0x20
> [<ffffffff81084af8>] autoremove_wake_function+0x18/0x40
> [<ffffffff8108ea38>] __wake_up_common+0x58/0x90
> [<ffffffff8108ff59>] __wake_up+0x39/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111b8d>] call_rcu+0x1d/0x20
> [<ffffffff81093697>] cpu_attach_domain+0x287/0x360
> [<ffffffff81099d7e>] build_sched_domains+0xe5e/0x10a0
> [<ffffffff81efa7fc>] sched_init_smp+0x3b7/0x47a
> [<ffffffff81ee1f4e>] kernel_init_freeable+0xf6/0x202
> [<ffffffff817200be>] kernel_init+0xe/0x190
> [<ffffffff8173d22c>] ret_from_fork+0x7c/0xb0
>
> -> #0 (&rdp->nocb_wq){......}:
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> other info that might help us debug this:
>
> Chain exists of:
> &rdp->nocb_wq --> &rq->lock --> &ctx->lock
>
> Possible unsafe locking scenario:
>
> CPU0 CPU1
> ---- ----
> lock(&ctx->lock);
> lock(&rq->lock);
> lock(&ctx->lock);
> lock(&rdp->nocb_wq);
>
> *** DEADLOCK ***
>
> 1 lock held by trinity-child2/15191:
> #0: (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> stack backtrace:
> CPU: 2 PID: 15191 Comm: trinity-child2 Not tainted 3.12.0-rc3+ #92
> ffffffff82565b70 ffff880070c2dbf8 ffffffff8172a363 ffffffff824edf40
> ffff880070c2dc38 ffffffff81726741 ffff880070c2dc90 ffff88022383b1c0
> ffff88022383aac0 0000000000000000 ffff88022383b188 ffff88022383b1c0
> Call Trace:
> [<ffffffff8172a363>] dump_stack+0x4e/0x82
> [<ffffffff81726741>] print_circular_bug+0x200/0x20f
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810c6439>] ? get_lock_stats+0x19/0x60
> [<ffffffff8100b2f4>] ? native_sched_clock+0x24/0x80
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff8109bc8f>] ? local_clock+0x3f/0x50
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff810c9af5>] ? trace_hardirqs_on_caller+0x115/0x1e0
> [<ffffffff810c9bcd>] ? trace_hardirqs_on+0xd/0x10
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
The underlying problem is that perf is invoking call_rcu() with the
scheduler locks held, but in NOCB mode, call_rcu() will with high
probability invoke the scheduler -- which just might want to use its
locks. The reason that call_rcu() needs to invoke the scheduler is
to wake up the corresponding rcuo callback-offload kthread, which
does the job of starting up a grace period and invoking the callbacks
afterwards.
One solution (championed on a related problem by Lai Jiangshan) is to
simply defer the wakeup to some point where scheduler locks are no longer
held. Since we don't want to unnecessarily incur the cost of such
deferral, the task before us is threefold:
1. Determine when it is likely that a relevant scheduler lock is held.
2. Defer the wakeup in such cases.
3. Ensure that all deferred wakeups eventually happen, preferably
sooner rather than later.
We use irqs_disabled_flags() as a proxy for relevant scheduler locks
being held. This works because the relevant locks are always acquired
with interrupts disabled. We may defer more often than needed, but that
is at least safe.
The wakeup deferral is tracked via a new field in the per-CPU and
per-RCU-flavor rcu_data structure, namely ->nocb_defer_wakeup.
This flag is checked by the RCU core processing. The __rcu_pending()
function now checks this flag, which causes rcu_check_callbacks()
to initiate RCU core processing at each scheduling-clock interrupt
where this flag is set. Of course this is not sufficient because
scheduling-clock interrupts are often turned off (the things we used to
be able to count on!). So the flags are also checked on entry to any
state that RCU considers to be idle, which includes both NO_HZ_IDLE idle
state and NO_HZ_FULL user-mode-execution state.
This approach should allow call_rcu() to be invoked regardless of what
locks you might be holding, the key word being "should".
Reported-by: Dave Jones <davej@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
2013-10-05 05:33:34 +08:00
|
|
|
int rhcount, int rhcount_lazy,
|
|
|
|
unsigned long flags)
|
2012-08-20 12:35:53 +08:00
|
|
|
{
|
|
|
|
int len;
|
|
|
|
struct rcu_head **old_rhpp;
|
|
|
|
struct task_struct *t;
|
|
|
|
|
|
|
|
/* Enqueue the callback on the nocb list and update counts. */
|
2014-12-19 04:31:27 +08:00
|
|
|
atomic_long_add(rhcount, &rdp->nocb_q_count);
|
|
|
|
/* rcu_barrier() relies on ->nocb_q_count add before xchg. */
|
2012-08-20 12:35:53 +08:00
|
|
|
old_rhpp = xchg(&rdp->nocb_tail, rhtp);
|
2015-03-04 06:57:58 +08:00
|
|
|
WRITE_ONCE(*old_rhpp, rhp);
|
2012-08-20 12:35:53 +08:00
|
|
|
atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
|
2014-08-13 01:47:48 +08:00
|
|
|
smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
|
2012-08-20 12:35:53 +08:00
|
|
|
|
|
|
|
/* If we are not being polled and there is a kthread, awaken it ... */
|
2015-03-04 06:57:58 +08:00
|
|
|
t = READ_ONCE(rdp->nocb_kthread);
|
2013-10-16 03:47:04 +08:00
|
|
|
if (rcu_nocb_poll || !t) {
|
2013-08-15 07:24:26 +08:00
|
|
|
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
|
|
|
|
TPS("WakeNotPoll"));
|
2012-08-20 12:35:53 +08:00
|
|
|
return;
|
2013-08-15 07:24:26 +08:00
|
|
|
}
|
2012-08-20 12:35:53 +08:00
|
|
|
len = atomic_long_read(&rdp->nocb_q_count);
|
|
|
|
if (old_rhpp == &rdp->nocb_head) {
|
rcu: Break call_rcu() deadlock involving scheduler and perf
Dave Jones got the following lockdep splat:
> ======================================================
> [ INFO: possible circular locking dependency detected ]
> 3.12.0-rc3+ #92 Not tainted
> -------------------------------------------------------
> trinity-child2/15191 is trying to acquire lock:
> (&rdp->nocb_wq){......}, at: [<ffffffff8108ff43>] __wake_up+0x23/0x50
>
> but task is already holding lock:
> (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> which lock already depends on the new lock.
>
>
> the existing dependency chain (in reverse order) is:
>
> -> #3 (&ctx->lock){-.-...}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff811500ff>] __perf_event_task_sched_out+0x2df/0x5e0
> [<ffffffff81091b83>] perf_event_task_sched_out+0x93/0xa0
> [<ffffffff81732052>] __schedule+0x1d2/0xa20
> [<ffffffff81732f30>] preempt_schedule_irq+0x50/0xb0
> [<ffffffff817352b6>] retint_kernel+0x26/0x30
> [<ffffffff813eed04>] tty_flip_buffer_push+0x34/0x50
> [<ffffffff813f0504>] pty_write+0x54/0x60
> [<ffffffff813e900d>] n_tty_write+0x32d/0x4e0
> [<ffffffff813e5838>] tty_write+0x158/0x2d0
> [<ffffffff811c4850>] vfs_write+0xc0/0x1f0
> [<ffffffff811c52cc>] SyS_write+0x4c/0xa0
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> -> #2 (&rq->lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff810980b2>] wake_up_new_task+0xc2/0x2e0
> [<ffffffff81054336>] do_fork+0x126/0x460
> [<ffffffff81054696>] kernel_thread+0x26/0x30
> [<ffffffff8171ff93>] rest_init+0x23/0x140
> [<ffffffff81ee1e4b>] start_kernel+0x3f6/0x403
> [<ffffffff81ee1571>] x86_64_start_reservations+0x2a/0x2c
> [<ffffffff81ee1664>] x86_64_start_kernel+0xf1/0xf4
>
> -> #1 (&p->pi_lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff810979d1>] try_to_wake_up+0x31/0x350
> [<ffffffff81097d62>] default_wake_function+0x12/0x20
> [<ffffffff81084af8>] autoremove_wake_function+0x18/0x40
> [<ffffffff8108ea38>] __wake_up_common+0x58/0x90
> [<ffffffff8108ff59>] __wake_up+0x39/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111b8d>] call_rcu+0x1d/0x20
> [<ffffffff81093697>] cpu_attach_domain+0x287/0x360
> [<ffffffff81099d7e>] build_sched_domains+0xe5e/0x10a0
> [<ffffffff81efa7fc>] sched_init_smp+0x3b7/0x47a
> [<ffffffff81ee1f4e>] kernel_init_freeable+0xf6/0x202
> [<ffffffff817200be>] kernel_init+0xe/0x190
> [<ffffffff8173d22c>] ret_from_fork+0x7c/0xb0
>
> -> #0 (&rdp->nocb_wq){......}:
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> other info that might help us debug this:
>
> Chain exists of:
> &rdp->nocb_wq --> &rq->lock --> &ctx->lock
>
> Possible unsafe locking scenario:
>
> CPU0 CPU1
> ---- ----
> lock(&ctx->lock);
> lock(&rq->lock);
> lock(&ctx->lock);
> lock(&rdp->nocb_wq);
>
> *** DEADLOCK ***
>
> 1 lock held by trinity-child2/15191:
> #0: (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> stack backtrace:
> CPU: 2 PID: 15191 Comm: trinity-child2 Not tainted 3.12.0-rc3+ #92
> ffffffff82565b70 ffff880070c2dbf8 ffffffff8172a363 ffffffff824edf40
> ffff880070c2dc38 ffffffff81726741 ffff880070c2dc90 ffff88022383b1c0
> ffff88022383aac0 0000000000000000 ffff88022383b188 ffff88022383b1c0
> Call Trace:
> [<ffffffff8172a363>] dump_stack+0x4e/0x82
> [<ffffffff81726741>] print_circular_bug+0x200/0x20f
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810c6439>] ? get_lock_stats+0x19/0x60
> [<ffffffff8100b2f4>] ? native_sched_clock+0x24/0x80
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff8109bc8f>] ? local_clock+0x3f/0x50
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff810c9af5>] ? trace_hardirqs_on_caller+0x115/0x1e0
> [<ffffffff810c9bcd>] ? trace_hardirqs_on+0xd/0x10
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
The underlying problem is that perf is invoking call_rcu() with the
scheduler locks held, but in NOCB mode, call_rcu() will with high
probability invoke the scheduler -- which just might want to use its
locks. The reason that call_rcu() needs to invoke the scheduler is
to wake up the corresponding rcuo callback-offload kthread, which
does the job of starting up a grace period and invoking the callbacks
afterwards.
One solution (championed on a related problem by Lai Jiangshan) is to
simply defer the wakeup to some point where scheduler locks are no longer
held. Since we don't want to unnecessarily incur the cost of such
deferral, the task before us is threefold:
1. Determine when it is likely that a relevant scheduler lock is held.
2. Defer the wakeup in such cases.
3. Ensure that all deferred wakeups eventually happen, preferably
sooner rather than later.
We use irqs_disabled_flags() as a proxy for relevant scheduler locks
being held. This works because the relevant locks are always acquired
with interrupts disabled. We may defer more often than needed, but that
is at least safe.
The wakeup deferral is tracked via a new field in the per-CPU and
per-RCU-flavor rcu_data structure, namely ->nocb_defer_wakeup.
This flag is checked by the RCU core processing. The __rcu_pending()
function now checks this flag, which causes rcu_check_callbacks()
to initiate RCU core processing at each scheduling-clock interrupt
where this flag is set. Of course this is not sufficient because
scheduling-clock interrupts are often turned off (the things we used to
be able to count on!). So the flags are also checked on entry to any
state that RCU considers to be idle, which includes both NO_HZ_IDLE idle
state and NO_HZ_FULL user-mode-execution state.
This approach should allow call_rcu() to be invoked regardless of what
locks you might be holding, the key word being "should".
Reported-by: Dave Jones <davej@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
2013-10-05 05:33:34 +08:00
|
|
|
if (!irqs_disabled_flags(flags)) {
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
/* ... if queue was empty ... */
|
|
|
|
wake_nocb_leader(rdp, false);
|
rcu: Break call_rcu() deadlock involving scheduler and perf
Dave Jones got the following lockdep splat:
> ======================================================
> [ INFO: possible circular locking dependency detected ]
> 3.12.0-rc3+ #92 Not tainted
> -------------------------------------------------------
> trinity-child2/15191 is trying to acquire lock:
> (&rdp->nocb_wq){......}, at: [<ffffffff8108ff43>] __wake_up+0x23/0x50
>
> but task is already holding lock:
> (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> which lock already depends on the new lock.
>
>
> the existing dependency chain (in reverse order) is:
>
> -> #3 (&ctx->lock){-.-...}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff811500ff>] __perf_event_task_sched_out+0x2df/0x5e0
> [<ffffffff81091b83>] perf_event_task_sched_out+0x93/0xa0
> [<ffffffff81732052>] __schedule+0x1d2/0xa20
> [<ffffffff81732f30>] preempt_schedule_irq+0x50/0xb0
> [<ffffffff817352b6>] retint_kernel+0x26/0x30
> [<ffffffff813eed04>] tty_flip_buffer_push+0x34/0x50
> [<ffffffff813f0504>] pty_write+0x54/0x60
> [<ffffffff813e900d>] n_tty_write+0x32d/0x4e0
> [<ffffffff813e5838>] tty_write+0x158/0x2d0
> [<ffffffff811c4850>] vfs_write+0xc0/0x1f0
> [<ffffffff811c52cc>] SyS_write+0x4c/0xa0
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> -> #2 (&rq->lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff810980b2>] wake_up_new_task+0xc2/0x2e0
> [<ffffffff81054336>] do_fork+0x126/0x460
> [<ffffffff81054696>] kernel_thread+0x26/0x30
> [<ffffffff8171ff93>] rest_init+0x23/0x140
> [<ffffffff81ee1e4b>] start_kernel+0x3f6/0x403
> [<ffffffff81ee1571>] x86_64_start_reservations+0x2a/0x2c
> [<ffffffff81ee1664>] x86_64_start_kernel+0xf1/0xf4
>
> -> #1 (&p->pi_lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff810979d1>] try_to_wake_up+0x31/0x350
> [<ffffffff81097d62>] default_wake_function+0x12/0x20
> [<ffffffff81084af8>] autoremove_wake_function+0x18/0x40
> [<ffffffff8108ea38>] __wake_up_common+0x58/0x90
> [<ffffffff8108ff59>] __wake_up+0x39/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111b8d>] call_rcu+0x1d/0x20
> [<ffffffff81093697>] cpu_attach_domain+0x287/0x360
> [<ffffffff81099d7e>] build_sched_domains+0xe5e/0x10a0
> [<ffffffff81efa7fc>] sched_init_smp+0x3b7/0x47a
> [<ffffffff81ee1f4e>] kernel_init_freeable+0xf6/0x202
> [<ffffffff817200be>] kernel_init+0xe/0x190
> [<ffffffff8173d22c>] ret_from_fork+0x7c/0xb0
>
> -> #0 (&rdp->nocb_wq){......}:
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> other info that might help us debug this:
>
> Chain exists of:
> &rdp->nocb_wq --> &rq->lock --> &ctx->lock
>
> Possible unsafe locking scenario:
>
> CPU0 CPU1
> ---- ----
> lock(&ctx->lock);
> lock(&rq->lock);
> lock(&ctx->lock);
> lock(&rdp->nocb_wq);
>
> *** DEADLOCK ***
>
> 1 lock held by trinity-child2/15191:
> #0: (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> stack backtrace:
> CPU: 2 PID: 15191 Comm: trinity-child2 Not tainted 3.12.0-rc3+ #92
> ffffffff82565b70 ffff880070c2dbf8 ffffffff8172a363 ffffffff824edf40
> ffff880070c2dc38 ffffffff81726741 ffff880070c2dc90 ffff88022383b1c0
> ffff88022383aac0 0000000000000000 ffff88022383b188 ffff88022383b1c0
> Call Trace:
> [<ffffffff8172a363>] dump_stack+0x4e/0x82
> [<ffffffff81726741>] print_circular_bug+0x200/0x20f
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810c6439>] ? get_lock_stats+0x19/0x60
> [<ffffffff8100b2f4>] ? native_sched_clock+0x24/0x80
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff8109bc8f>] ? local_clock+0x3f/0x50
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff810c9af5>] ? trace_hardirqs_on_caller+0x115/0x1e0
> [<ffffffff810c9bcd>] ? trace_hardirqs_on+0xd/0x10
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
The underlying problem is that perf is invoking call_rcu() with the
scheduler locks held, but in NOCB mode, call_rcu() will with high
probability invoke the scheduler -- which just might want to use its
locks. The reason that call_rcu() needs to invoke the scheduler is
to wake up the corresponding rcuo callback-offload kthread, which
does the job of starting up a grace period and invoking the callbacks
afterwards.
One solution (championed on a related problem by Lai Jiangshan) is to
simply defer the wakeup to some point where scheduler locks are no longer
held. Since we don't want to unnecessarily incur the cost of such
deferral, the task before us is threefold:
1. Determine when it is likely that a relevant scheduler lock is held.
2. Defer the wakeup in such cases.
3. Ensure that all deferred wakeups eventually happen, preferably
sooner rather than later.
We use irqs_disabled_flags() as a proxy for relevant scheduler locks
being held. This works because the relevant locks are always acquired
with interrupts disabled. We may defer more often than needed, but that
is at least safe.
The wakeup deferral is tracked via a new field in the per-CPU and
per-RCU-flavor rcu_data structure, namely ->nocb_defer_wakeup.
This flag is checked by the RCU core processing. The __rcu_pending()
function now checks this flag, which causes rcu_check_callbacks()
to initiate RCU core processing at each scheduling-clock interrupt
where this flag is set. Of course this is not sufficient because
scheduling-clock interrupts are often turned off (the things we used to
be able to count on!). So the flags are also checked on entry to any
state that RCU considers to be idle, which includes both NO_HZ_IDLE idle
state and NO_HZ_FULL user-mode-execution state.
This approach should allow call_rcu() to be invoked regardless of what
locks you might be holding, the key word being "should".
Reported-by: Dave Jones <davej@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
2013-10-05 05:33:34 +08:00
|
|
|
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
|
|
|
|
TPS("WakeEmpty"));
|
|
|
|
} else {
|
2014-07-30 05:50:47 +08:00
|
|
|
rdp->nocb_defer_wakeup = RCU_NOGP_WAKE;
|
rcu: Break call_rcu() deadlock involving scheduler and perf
Dave Jones got the following lockdep splat:
> ======================================================
> [ INFO: possible circular locking dependency detected ]
> 3.12.0-rc3+ #92 Not tainted
> -------------------------------------------------------
> trinity-child2/15191 is trying to acquire lock:
> (&rdp->nocb_wq){......}, at: [<ffffffff8108ff43>] __wake_up+0x23/0x50
>
> but task is already holding lock:
> (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> which lock already depends on the new lock.
>
>
> the existing dependency chain (in reverse order) is:
>
> -> #3 (&ctx->lock){-.-...}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff811500ff>] __perf_event_task_sched_out+0x2df/0x5e0
> [<ffffffff81091b83>] perf_event_task_sched_out+0x93/0xa0
> [<ffffffff81732052>] __schedule+0x1d2/0xa20
> [<ffffffff81732f30>] preempt_schedule_irq+0x50/0xb0
> [<ffffffff817352b6>] retint_kernel+0x26/0x30
> [<ffffffff813eed04>] tty_flip_buffer_push+0x34/0x50
> [<ffffffff813f0504>] pty_write+0x54/0x60
> [<ffffffff813e900d>] n_tty_write+0x32d/0x4e0
> [<ffffffff813e5838>] tty_write+0x158/0x2d0
> [<ffffffff811c4850>] vfs_write+0xc0/0x1f0
> [<ffffffff811c52cc>] SyS_write+0x4c/0xa0
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> -> #2 (&rq->lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff810980b2>] wake_up_new_task+0xc2/0x2e0
> [<ffffffff81054336>] do_fork+0x126/0x460
> [<ffffffff81054696>] kernel_thread+0x26/0x30
> [<ffffffff8171ff93>] rest_init+0x23/0x140
> [<ffffffff81ee1e4b>] start_kernel+0x3f6/0x403
> [<ffffffff81ee1571>] x86_64_start_reservations+0x2a/0x2c
> [<ffffffff81ee1664>] x86_64_start_kernel+0xf1/0xf4
>
> -> #1 (&p->pi_lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff810979d1>] try_to_wake_up+0x31/0x350
> [<ffffffff81097d62>] default_wake_function+0x12/0x20
> [<ffffffff81084af8>] autoremove_wake_function+0x18/0x40
> [<ffffffff8108ea38>] __wake_up_common+0x58/0x90
> [<ffffffff8108ff59>] __wake_up+0x39/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111b8d>] call_rcu+0x1d/0x20
> [<ffffffff81093697>] cpu_attach_domain+0x287/0x360
> [<ffffffff81099d7e>] build_sched_domains+0xe5e/0x10a0
> [<ffffffff81efa7fc>] sched_init_smp+0x3b7/0x47a
> [<ffffffff81ee1f4e>] kernel_init_freeable+0xf6/0x202
> [<ffffffff817200be>] kernel_init+0xe/0x190
> [<ffffffff8173d22c>] ret_from_fork+0x7c/0xb0
>
> -> #0 (&rdp->nocb_wq){......}:
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> other info that might help us debug this:
>
> Chain exists of:
> &rdp->nocb_wq --> &rq->lock --> &ctx->lock
>
> Possible unsafe locking scenario:
>
> CPU0 CPU1
> ---- ----
> lock(&ctx->lock);
> lock(&rq->lock);
> lock(&ctx->lock);
> lock(&rdp->nocb_wq);
>
> *** DEADLOCK ***
>
> 1 lock held by trinity-child2/15191:
> #0: (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> stack backtrace:
> CPU: 2 PID: 15191 Comm: trinity-child2 Not tainted 3.12.0-rc3+ #92
> ffffffff82565b70 ffff880070c2dbf8 ffffffff8172a363 ffffffff824edf40
> ffff880070c2dc38 ffffffff81726741 ffff880070c2dc90 ffff88022383b1c0
> ffff88022383aac0 0000000000000000 ffff88022383b188 ffff88022383b1c0
> Call Trace:
> [<ffffffff8172a363>] dump_stack+0x4e/0x82
> [<ffffffff81726741>] print_circular_bug+0x200/0x20f
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810c6439>] ? get_lock_stats+0x19/0x60
> [<ffffffff8100b2f4>] ? native_sched_clock+0x24/0x80
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff8109bc8f>] ? local_clock+0x3f/0x50
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff810c9af5>] ? trace_hardirqs_on_caller+0x115/0x1e0
> [<ffffffff810c9bcd>] ? trace_hardirqs_on+0xd/0x10
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
The underlying problem is that perf is invoking call_rcu() with the
scheduler locks held, but in NOCB mode, call_rcu() will with high
probability invoke the scheduler -- which just might want to use its
locks. The reason that call_rcu() needs to invoke the scheduler is
to wake up the corresponding rcuo callback-offload kthread, which
does the job of starting up a grace period and invoking the callbacks
afterwards.
One solution (championed on a related problem by Lai Jiangshan) is to
simply defer the wakeup to some point where scheduler locks are no longer
held. Since we don't want to unnecessarily incur the cost of such
deferral, the task before us is threefold:
1. Determine when it is likely that a relevant scheduler lock is held.
2. Defer the wakeup in such cases.
3. Ensure that all deferred wakeups eventually happen, preferably
sooner rather than later.
We use irqs_disabled_flags() as a proxy for relevant scheduler locks
being held. This works because the relevant locks are always acquired
with interrupts disabled. We may defer more often than needed, but that
is at least safe.
The wakeup deferral is tracked via a new field in the per-CPU and
per-RCU-flavor rcu_data structure, namely ->nocb_defer_wakeup.
This flag is checked by the RCU core processing. The __rcu_pending()
function now checks this flag, which causes rcu_check_callbacks()
to initiate RCU core processing at each scheduling-clock interrupt
where this flag is set. Of course this is not sufficient because
scheduling-clock interrupts are often turned off (the things we used to
be able to count on!). So the flags are also checked on entry to any
state that RCU considers to be idle, which includes both NO_HZ_IDLE idle
state and NO_HZ_FULL user-mode-execution state.
This approach should allow call_rcu() to be invoked regardless of what
locks you might be holding, the key word being "should".
Reported-by: Dave Jones <davej@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
2013-10-05 05:33:34 +08:00
|
|
|
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
|
|
|
|
TPS("WakeEmptyIsDeferred"));
|
|
|
|
}
|
2012-08-20 12:35:53 +08:00
|
|
|
rdp->qlen_last_fqs_check = 0;
|
|
|
|
} else if (len > rdp->qlen_last_fqs_check + qhimark) {
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
/* ... or if many callbacks queued. */
|
2014-07-30 05:50:47 +08:00
|
|
|
if (!irqs_disabled_flags(flags)) {
|
|
|
|
wake_nocb_leader(rdp, true);
|
|
|
|
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
|
|
|
|
TPS("WakeOvf"));
|
|
|
|
} else {
|
|
|
|
rdp->nocb_defer_wakeup = RCU_NOGP_WAKE_FORCE;
|
|
|
|
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
|
|
|
|
TPS("WakeOvfIsDeferred"));
|
|
|
|
}
|
2012-08-20 12:35:53 +08:00
|
|
|
rdp->qlen_last_fqs_check = LONG_MAX / 2;
|
2013-08-15 07:24:26 +08:00
|
|
|
} else {
|
|
|
|
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeNot"));
|
2012-08-20 12:35:53 +08:00
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is a helper for __call_rcu(), which invokes this when the normal
|
|
|
|
* callback queue is inoperable. If this is not a no-CBs CPU, this
|
|
|
|
* function returns failure back to __call_rcu(), which can complain
|
|
|
|
* appropriately.
|
|
|
|
*
|
|
|
|
* Otherwise, this function queues the callback where the corresponding
|
|
|
|
* "rcuo" kthread can find it.
|
|
|
|
*/
|
|
|
|
static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
|
rcu: Break call_rcu() deadlock involving scheduler and perf
Dave Jones got the following lockdep splat:
> ======================================================
> [ INFO: possible circular locking dependency detected ]
> 3.12.0-rc3+ #92 Not tainted
> -------------------------------------------------------
> trinity-child2/15191 is trying to acquire lock:
> (&rdp->nocb_wq){......}, at: [<ffffffff8108ff43>] __wake_up+0x23/0x50
>
> but task is already holding lock:
> (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> which lock already depends on the new lock.
>
>
> the existing dependency chain (in reverse order) is:
>
> -> #3 (&ctx->lock){-.-...}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff811500ff>] __perf_event_task_sched_out+0x2df/0x5e0
> [<ffffffff81091b83>] perf_event_task_sched_out+0x93/0xa0
> [<ffffffff81732052>] __schedule+0x1d2/0xa20
> [<ffffffff81732f30>] preempt_schedule_irq+0x50/0xb0
> [<ffffffff817352b6>] retint_kernel+0x26/0x30
> [<ffffffff813eed04>] tty_flip_buffer_push+0x34/0x50
> [<ffffffff813f0504>] pty_write+0x54/0x60
> [<ffffffff813e900d>] n_tty_write+0x32d/0x4e0
> [<ffffffff813e5838>] tty_write+0x158/0x2d0
> [<ffffffff811c4850>] vfs_write+0xc0/0x1f0
> [<ffffffff811c52cc>] SyS_write+0x4c/0xa0
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> -> #2 (&rq->lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff810980b2>] wake_up_new_task+0xc2/0x2e0
> [<ffffffff81054336>] do_fork+0x126/0x460
> [<ffffffff81054696>] kernel_thread+0x26/0x30
> [<ffffffff8171ff93>] rest_init+0x23/0x140
> [<ffffffff81ee1e4b>] start_kernel+0x3f6/0x403
> [<ffffffff81ee1571>] x86_64_start_reservations+0x2a/0x2c
> [<ffffffff81ee1664>] x86_64_start_kernel+0xf1/0xf4
>
> -> #1 (&p->pi_lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff810979d1>] try_to_wake_up+0x31/0x350
> [<ffffffff81097d62>] default_wake_function+0x12/0x20
> [<ffffffff81084af8>] autoremove_wake_function+0x18/0x40
> [<ffffffff8108ea38>] __wake_up_common+0x58/0x90
> [<ffffffff8108ff59>] __wake_up+0x39/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111b8d>] call_rcu+0x1d/0x20
> [<ffffffff81093697>] cpu_attach_domain+0x287/0x360
> [<ffffffff81099d7e>] build_sched_domains+0xe5e/0x10a0
> [<ffffffff81efa7fc>] sched_init_smp+0x3b7/0x47a
> [<ffffffff81ee1f4e>] kernel_init_freeable+0xf6/0x202
> [<ffffffff817200be>] kernel_init+0xe/0x190
> [<ffffffff8173d22c>] ret_from_fork+0x7c/0xb0
>
> -> #0 (&rdp->nocb_wq){......}:
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> other info that might help us debug this:
>
> Chain exists of:
> &rdp->nocb_wq --> &rq->lock --> &ctx->lock
>
> Possible unsafe locking scenario:
>
> CPU0 CPU1
> ---- ----
> lock(&ctx->lock);
> lock(&rq->lock);
> lock(&ctx->lock);
> lock(&rdp->nocb_wq);
>
> *** DEADLOCK ***
>
> 1 lock held by trinity-child2/15191:
> #0: (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> stack backtrace:
> CPU: 2 PID: 15191 Comm: trinity-child2 Not tainted 3.12.0-rc3+ #92
> ffffffff82565b70 ffff880070c2dbf8 ffffffff8172a363 ffffffff824edf40
> ffff880070c2dc38 ffffffff81726741 ffff880070c2dc90 ffff88022383b1c0
> ffff88022383aac0 0000000000000000 ffff88022383b188 ffff88022383b1c0
> Call Trace:
> [<ffffffff8172a363>] dump_stack+0x4e/0x82
> [<ffffffff81726741>] print_circular_bug+0x200/0x20f
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810c6439>] ? get_lock_stats+0x19/0x60
> [<ffffffff8100b2f4>] ? native_sched_clock+0x24/0x80
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff8109bc8f>] ? local_clock+0x3f/0x50
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff810c9af5>] ? trace_hardirqs_on_caller+0x115/0x1e0
> [<ffffffff810c9bcd>] ? trace_hardirqs_on+0xd/0x10
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
The underlying problem is that perf is invoking call_rcu() with the
scheduler locks held, but in NOCB mode, call_rcu() will with high
probability invoke the scheduler -- which just might want to use its
locks. The reason that call_rcu() needs to invoke the scheduler is
to wake up the corresponding rcuo callback-offload kthread, which
does the job of starting up a grace period and invoking the callbacks
afterwards.
One solution (championed on a related problem by Lai Jiangshan) is to
simply defer the wakeup to some point where scheduler locks are no longer
held. Since we don't want to unnecessarily incur the cost of such
deferral, the task before us is threefold:
1. Determine when it is likely that a relevant scheduler lock is held.
2. Defer the wakeup in such cases.
3. Ensure that all deferred wakeups eventually happen, preferably
sooner rather than later.
We use irqs_disabled_flags() as a proxy for relevant scheduler locks
being held. This works because the relevant locks are always acquired
with interrupts disabled. We may defer more often than needed, but that
is at least safe.
The wakeup deferral is tracked via a new field in the per-CPU and
per-RCU-flavor rcu_data structure, namely ->nocb_defer_wakeup.
This flag is checked by the RCU core processing. The __rcu_pending()
function now checks this flag, which causes rcu_check_callbacks()
to initiate RCU core processing at each scheduling-clock interrupt
where this flag is set. Of course this is not sufficient because
scheduling-clock interrupts are often turned off (the things we used to
be able to count on!). So the flags are also checked on entry to any
state that RCU considers to be idle, which includes both NO_HZ_IDLE idle
state and NO_HZ_FULL user-mode-execution state.
This approach should allow call_rcu() to be invoked regardless of what
locks you might be holding, the key word being "should".
Reported-by: Dave Jones <davej@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
2013-10-05 05:33:34 +08:00
|
|
|
bool lazy, unsigned long flags)
|
2012-08-20 12:35:53 +08:00
|
|
|
{
|
|
|
|
|
2013-03-27 06:47:24 +08:00
|
|
|
if (!rcu_is_nocb_cpu(rdp->cpu))
|
2014-07-09 06:26:14 +08:00
|
|
|
return false;
|
rcu: Break call_rcu() deadlock involving scheduler and perf
Dave Jones got the following lockdep splat:
> ======================================================
> [ INFO: possible circular locking dependency detected ]
> 3.12.0-rc3+ #92 Not tainted
> -------------------------------------------------------
> trinity-child2/15191 is trying to acquire lock:
> (&rdp->nocb_wq){......}, at: [<ffffffff8108ff43>] __wake_up+0x23/0x50
>
> but task is already holding lock:
> (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> which lock already depends on the new lock.
>
>
> the existing dependency chain (in reverse order) is:
>
> -> #3 (&ctx->lock){-.-...}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff811500ff>] __perf_event_task_sched_out+0x2df/0x5e0
> [<ffffffff81091b83>] perf_event_task_sched_out+0x93/0xa0
> [<ffffffff81732052>] __schedule+0x1d2/0xa20
> [<ffffffff81732f30>] preempt_schedule_irq+0x50/0xb0
> [<ffffffff817352b6>] retint_kernel+0x26/0x30
> [<ffffffff813eed04>] tty_flip_buffer_push+0x34/0x50
> [<ffffffff813f0504>] pty_write+0x54/0x60
> [<ffffffff813e900d>] n_tty_write+0x32d/0x4e0
> [<ffffffff813e5838>] tty_write+0x158/0x2d0
> [<ffffffff811c4850>] vfs_write+0xc0/0x1f0
> [<ffffffff811c52cc>] SyS_write+0x4c/0xa0
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> -> #2 (&rq->lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff810980b2>] wake_up_new_task+0xc2/0x2e0
> [<ffffffff81054336>] do_fork+0x126/0x460
> [<ffffffff81054696>] kernel_thread+0x26/0x30
> [<ffffffff8171ff93>] rest_init+0x23/0x140
> [<ffffffff81ee1e4b>] start_kernel+0x3f6/0x403
> [<ffffffff81ee1571>] x86_64_start_reservations+0x2a/0x2c
> [<ffffffff81ee1664>] x86_64_start_kernel+0xf1/0xf4
>
> -> #1 (&p->pi_lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff810979d1>] try_to_wake_up+0x31/0x350
> [<ffffffff81097d62>] default_wake_function+0x12/0x20
> [<ffffffff81084af8>] autoremove_wake_function+0x18/0x40
> [<ffffffff8108ea38>] __wake_up_common+0x58/0x90
> [<ffffffff8108ff59>] __wake_up+0x39/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111b8d>] call_rcu+0x1d/0x20
> [<ffffffff81093697>] cpu_attach_domain+0x287/0x360
> [<ffffffff81099d7e>] build_sched_domains+0xe5e/0x10a0
> [<ffffffff81efa7fc>] sched_init_smp+0x3b7/0x47a
> [<ffffffff81ee1f4e>] kernel_init_freeable+0xf6/0x202
> [<ffffffff817200be>] kernel_init+0xe/0x190
> [<ffffffff8173d22c>] ret_from_fork+0x7c/0xb0
>
> -> #0 (&rdp->nocb_wq){......}:
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> other info that might help us debug this:
>
> Chain exists of:
> &rdp->nocb_wq --> &rq->lock --> &ctx->lock
>
> Possible unsafe locking scenario:
>
> CPU0 CPU1
> ---- ----
> lock(&ctx->lock);
> lock(&rq->lock);
> lock(&ctx->lock);
> lock(&rdp->nocb_wq);
>
> *** DEADLOCK ***
>
> 1 lock held by trinity-child2/15191:
> #0: (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> stack backtrace:
> CPU: 2 PID: 15191 Comm: trinity-child2 Not tainted 3.12.0-rc3+ #92
> ffffffff82565b70 ffff880070c2dbf8 ffffffff8172a363 ffffffff824edf40
> ffff880070c2dc38 ffffffff81726741 ffff880070c2dc90 ffff88022383b1c0
> ffff88022383aac0 0000000000000000 ffff88022383b188 ffff88022383b1c0
> Call Trace:
> [<ffffffff8172a363>] dump_stack+0x4e/0x82
> [<ffffffff81726741>] print_circular_bug+0x200/0x20f
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810c6439>] ? get_lock_stats+0x19/0x60
> [<ffffffff8100b2f4>] ? native_sched_clock+0x24/0x80
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff8109bc8f>] ? local_clock+0x3f/0x50
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff810c9af5>] ? trace_hardirqs_on_caller+0x115/0x1e0
> [<ffffffff810c9bcd>] ? trace_hardirqs_on+0xd/0x10
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
The underlying problem is that perf is invoking call_rcu() with the
scheduler locks held, but in NOCB mode, call_rcu() will with high
probability invoke the scheduler -- which just might want to use its
locks. The reason that call_rcu() needs to invoke the scheduler is
to wake up the corresponding rcuo callback-offload kthread, which
does the job of starting up a grace period and invoking the callbacks
afterwards.
One solution (championed on a related problem by Lai Jiangshan) is to
simply defer the wakeup to some point where scheduler locks are no longer
held. Since we don't want to unnecessarily incur the cost of such
deferral, the task before us is threefold:
1. Determine when it is likely that a relevant scheduler lock is held.
2. Defer the wakeup in such cases.
3. Ensure that all deferred wakeups eventually happen, preferably
sooner rather than later.
We use irqs_disabled_flags() as a proxy for relevant scheduler locks
being held. This works because the relevant locks are always acquired
with interrupts disabled. We may defer more often than needed, but that
is at least safe.
The wakeup deferral is tracked via a new field in the per-CPU and
per-RCU-flavor rcu_data structure, namely ->nocb_defer_wakeup.
This flag is checked by the RCU core processing. The __rcu_pending()
function now checks this flag, which causes rcu_check_callbacks()
to initiate RCU core processing at each scheduling-clock interrupt
where this flag is set. Of course this is not sufficient because
scheduling-clock interrupts are often turned off (the things we used to
be able to count on!). So the flags are also checked on entry to any
state that RCU considers to be idle, which includes both NO_HZ_IDLE idle
state and NO_HZ_FULL user-mode-execution state.
This approach should allow call_rcu() to be invoked regardless of what
locks you might be holding, the key word being "should".
Reported-by: Dave Jones <davej@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
2013-10-05 05:33:34 +08:00
|
|
|
__call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags);
|
2013-02-10 09:42:16 +08:00
|
|
|
if (__is_kfree_rcu_offset((unsigned long)rhp->func))
|
|
|
|
trace_rcu_kfree_callback(rdp->rsp->name, rhp,
|
|
|
|
(unsigned long)rhp->func,
|
2013-08-16 01:12:12 +08:00
|
|
|
-atomic_long_read(&rdp->nocb_q_count_lazy),
|
|
|
|
-atomic_long_read(&rdp->nocb_q_count));
|
2013-02-10 09:42:16 +08:00
|
|
|
else
|
|
|
|
trace_rcu_callback(rdp->rsp->name, rhp,
|
2013-08-16 01:12:12 +08:00
|
|
|
-atomic_long_read(&rdp->nocb_q_count_lazy),
|
|
|
|
-atomic_long_read(&rdp->nocb_q_count));
|
2014-08-13 02:27:31 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If called from an extended quiescent state with interrupts
|
|
|
|
* disabled, invoke the RCU core in order to allow the idle-entry
|
|
|
|
* deferred-wakeup check to function.
|
|
|
|
*/
|
|
|
|
if (irqs_disabled_flags(flags) &&
|
|
|
|
!rcu_is_watching() &&
|
|
|
|
cpu_online(smp_processor_id()))
|
|
|
|
invoke_rcu_core();
|
|
|
|
|
2014-07-09 06:26:14 +08:00
|
|
|
return true;
|
2012-08-20 12:35:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Adopt orphaned callbacks on a no-CBs CPU, or return 0 if this is
|
|
|
|
* not a no-CBs CPU.
|
|
|
|
*/
|
|
|
|
static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
|
rcu: Break call_rcu() deadlock involving scheduler and perf
Dave Jones got the following lockdep splat:
> ======================================================
> [ INFO: possible circular locking dependency detected ]
> 3.12.0-rc3+ #92 Not tainted
> -------------------------------------------------------
> trinity-child2/15191 is trying to acquire lock:
> (&rdp->nocb_wq){......}, at: [<ffffffff8108ff43>] __wake_up+0x23/0x50
>
> but task is already holding lock:
> (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> which lock already depends on the new lock.
>
>
> the existing dependency chain (in reverse order) is:
>
> -> #3 (&ctx->lock){-.-...}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff811500ff>] __perf_event_task_sched_out+0x2df/0x5e0
> [<ffffffff81091b83>] perf_event_task_sched_out+0x93/0xa0
> [<ffffffff81732052>] __schedule+0x1d2/0xa20
> [<ffffffff81732f30>] preempt_schedule_irq+0x50/0xb0
> [<ffffffff817352b6>] retint_kernel+0x26/0x30
> [<ffffffff813eed04>] tty_flip_buffer_push+0x34/0x50
> [<ffffffff813f0504>] pty_write+0x54/0x60
> [<ffffffff813e900d>] n_tty_write+0x32d/0x4e0
> [<ffffffff813e5838>] tty_write+0x158/0x2d0
> [<ffffffff811c4850>] vfs_write+0xc0/0x1f0
> [<ffffffff811c52cc>] SyS_write+0x4c/0xa0
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> -> #2 (&rq->lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff810980b2>] wake_up_new_task+0xc2/0x2e0
> [<ffffffff81054336>] do_fork+0x126/0x460
> [<ffffffff81054696>] kernel_thread+0x26/0x30
> [<ffffffff8171ff93>] rest_init+0x23/0x140
> [<ffffffff81ee1e4b>] start_kernel+0x3f6/0x403
> [<ffffffff81ee1571>] x86_64_start_reservations+0x2a/0x2c
> [<ffffffff81ee1664>] x86_64_start_kernel+0xf1/0xf4
>
> -> #1 (&p->pi_lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff810979d1>] try_to_wake_up+0x31/0x350
> [<ffffffff81097d62>] default_wake_function+0x12/0x20
> [<ffffffff81084af8>] autoremove_wake_function+0x18/0x40
> [<ffffffff8108ea38>] __wake_up_common+0x58/0x90
> [<ffffffff8108ff59>] __wake_up+0x39/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111b8d>] call_rcu+0x1d/0x20
> [<ffffffff81093697>] cpu_attach_domain+0x287/0x360
> [<ffffffff81099d7e>] build_sched_domains+0xe5e/0x10a0
> [<ffffffff81efa7fc>] sched_init_smp+0x3b7/0x47a
> [<ffffffff81ee1f4e>] kernel_init_freeable+0xf6/0x202
> [<ffffffff817200be>] kernel_init+0xe/0x190
> [<ffffffff8173d22c>] ret_from_fork+0x7c/0xb0
>
> -> #0 (&rdp->nocb_wq){......}:
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> other info that might help us debug this:
>
> Chain exists of:
> &rdp->nocb_wq --> &rq->lock --> &ctx->lock
>
> Possible unsafe locking scenario:
>
> CPU0 CPU1
> ---- ----
> lock(&ctx->lock);
> lock(&rq->lock);
> lock(&ctx->lock);
> lock(&rdp->nocb_wq);
>
> *** DEADLOCK ***
>
> 1 lock held by trinity-child2/15191:
> #0: (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> stack backtrace:
> CPU: 2 PID: 15191 Comm: trinity-child2 Not tainted 3.12.0-rc3+ #92
> ffffffff82565b70 ffff880070c2dbf8 ffffffff8172a363 ffffffff824edf40
> ffff880070c2dc38 ffffffff81726741 ffff880070c2dc90 ffff88022383b1c0
> ffff88022383aac0 0000000000000000 ffff88022383b188 ffff88022383b1c0
> Call Trace:
> [<ffffffff8172a363>] dump_stack+0x4e/0x82
> [<ffffffff81726741>] print_circular_bug+0x200/0x20f
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810c6439>] ? get_lock_stats+0x19/0x60
> [<ffffffff8100b2f4>] ? native_sched_clock+0x24/0x80
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff8109bc8f>] ? local_clock+0x3f/0x50
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff810c9af5>] ? trace_hardirqs_on_caller+0x115/0x1e0
> [<ffffffff810c9bcd>] ? trace_hardirqs_on+0xd/0x10
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
The underlying problem is that perf is invoking call_rcu() with the
scheduler locks held, but in NOCB mode, call_rcu() will with high
probability invoke the scheduler -- which just might want to use its
locks. The reason that call_rcu() needs to invoke the scheduler is
to wake up the corresponding rcuo callback-offload kthread, which
does the job of starting up a grace period and invoking the callbacks
afterwards.
One solution (championed on a related problem by Lai Jiangshan) is to
simply defer the wakeup to some point where scheduler locks are no longer
held. Since we don't want to unnecessarily incur the cost of such
deferral, the task before us is threefold:
1. Determine when it is likely that a relevant scheduler lock is held.
2. Defer the wakeup in such cases.
3. Ensure that all deferred wakeups eventually happen, preferably
sooner rather than later.
We use irqs_disabled_flags() as a proxy for relevant scheduler locks
being held. This works because the relevant locks are always acquired
with interrupts disabled. We may defer more often than needed, but that
is at least safe.
The wakeup deferral is tracked via a new field in the per-CPU and
per-RCU-flavor rcu_data structure, namely ->nocb_defer_wakeup.
This flag is checked by the RCU core processing. The __rcu_pending()
function now checks this flag, which causes rcu_check_callbacks()
to initiate RCU core processing at each scheduling-clock interrupt
where this flag is set. Of course this is not sufficient because
scheduling-clock interrupts are often turned off (the things we used to
be able to count on!). So the flags are also checked on entry to any
state that RCU considers to be idle, which includes both NO_HZ_IDLE idle
state and NO_HZ_FULL user-mode-execution state.
This approach should allow call_rcu() to be invoked regardless of what
locks you might be holding, the key word being "should".
Reported-by: Dave Jones <davej@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
2013-10-05 05:33:34 +08:00
|
|
|
struct rcu_data *rdp,
|
|
|
|
unsigned long flags)
|
2012-08-20 12:35:53 +08:00
|
|
|
{
|
|
|
|
long ql = rsp->qlen;
|
|
|
|
long qll = rsp->qlen_lazy;
|
|
|
|
|
|
|
|
/* If this is not a no-CBs CPU, tell the caller to do it the old way. */
|
2013-03-27 06:47:24 +08:00
|
|
|
if (!rcu_is_nocb_cpu(smp_processor_id()))
|
2014-07-09 06:26:15 +08:00
|
|
|
return false;
|
2012-08-20 12:35:53 +08:00
|
|
|
rsp->qlen = 0;
|
|
|
|
rsp->qlen_lazy = 0;
|
|
|
|
|
|
|
|
/* First, enqueue the donelist, if any. This preserves CB ordering. */
|
|
|
|
if (rsp->orphan_donelist != NULL) {
|
|
|
|
__call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist,
|
rcu: Break call_rcu() deadlock involving scheduler and perf
Dave Jones got the following lockdep splat:
> ======================================================
> [ INFO: possible circular locking dependency detected ]
> 3.12.0-rc3+ #92 Not tainted
> -------------------------------------------------------
> trinity-child2/15191 is trying to acquire lock:
> (&rdp->nocb_wq){......}, at: [<ffffffff8108ff43>] __wake_up+0x23/0x50
>
> but task is already holding lock:
> (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> which lock already depends on the new lock.
>
>
> the existing dependency chain (in reverse order) is:
>
> -> #3 (&ctx->lock){-.-...}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff811500ff>] __perf_event_task_sched_out+0x2df/0x5e0
> [<ffffffff81091b83>] perf_event_task_sched_out+0x93/0xa0
> [<ffffffff81732052>] __schedule+0x1d2/0xa20
> [<ffffffff81732f30>] preempt_schedule_irq+0x50/0xb0
> [<ffffffff817352b6>] retint_kernel+0x26/0x30
> [<ffffffff813eed04>] tty_flip_buffer_push+0x34/0x50
> [<ffffffff813f0504>] pty_write+0x54/0x60
> [<ffffffff813e900d>] n_tty_write+0x32d/0x4e0
> [<ffffffff813e5838>] tty_write+0x158/0x2d0
> [<ffffffff811c4850>] vfs_write+0xc0/0x1f0
> [<ffffffff811c52cc>] SyS_write+0x4c/0xa0
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> -> #2 (&rq->lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff810980b2>] wake_up_new_task+0xc2/0x2e0
> [<ffffffff81054336>] do_fork+0x126/0x460
> [<ffffffff81054696>] kernel_thread+0x26/0x30
> [<ffffffff8171ff93>] rest_init+0x23/0x140
> [<ffffffff81ee1e4b>] start_kernel+0x3f6/0x403
> [<ffffffff81ee1571>] x86_64_start_reservations+0x2a/0x2c
> [<ffffffff81ee1664>] x86_64_start_kernel+0xf1/0xf4
>
> -> #1 (&p->pi_lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff810979d1>] try_to_wake_up+0x31/0x350
> [<ffffffff81097d62>] default_wake_function+0x12/0x20
> [<ffffffff81084af8>] autoremove_wake_function+0x18/0x40
> [<ffffffff8108ea38>] __wake_up_common+0x58/0x90
> [<ffffffff8108ff59>] __wake_up+0x39/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111b8d>] call_rcu+0x1d/0x20
> [<ffffffff81093697>] cpu_attach_domain+0x287/0x360
> [<ffffffff81099d7e>] build_sched_domains+0xe5e/0x10a0
> [<ffffffff81efa7fc>] sched_init_smp+0x3b7/0x47a
> [<ffffffff81ee1f4e>] kernel_init_freeable+0xf6/0x202
> [<ffffffff817200be>] kernel_init+0xe/0x190
> [<ffffffff8173d22c>] ret_from_fork+0x7c/0xb0
>
> -> #0 (&rdp->nocb_wq){......}:
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> other info that might help us debug this:
>
> Chain exists of:
> &rdp->nocb_wq --> &rq->lock --> &ctx->lock
>
> Possible unsafe locking scenario:
>
> CPU0 CPU1
> ---- ----
> lock(&ctx->lock);
> lock(&rq->lock);
> lock(&ctx->lock);
> lock(&rdp->nocb_wq);
>
> *** DEADLOCK ***
>
> 1 lock held by trinity-child2/15191:
> #0: (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> stack backtrace:
> CPU: 2 PID: 15191 Comm: trinity-child2 Not tainted 3.12.0-rc3+ #92
> ffffffff82565b70 ffff880070c2dbf8 ffffffff8172a363 ffffffff824edf40
> ffff880070c2dc38 ffffffff81726741 ffff880070c2dc90 ffff88022383b1c0
> ffff88022383aac0 0000000000000000 ffff88022383b188 ffff88022383b1c0
> Call Trace:
> [<ffffffff8172a363>] dump_stack+0x4e/0x82
> [<ffffffff81726741>] print_circular_bug+0x200/0x20f
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810c6439>] ? get_lock_stats+0x19/0x60
> [<ffffffff8100b2f4>] ? native_sched_clock+0x24/0x80
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff8109bc8f>] ? local_clock+0x3f/0x50
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff810c9af5>] ? trace_hardirqs_on_caller+0x115/0x1e0
> [<ffffffff810c9bcd>] ? trace_hardirqs_on+0xd/0x10
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
The underlying problem is that perf is invoking call_rcu() with the
scheduler locks held, but in NOCB mode, call_rcu() will with high
probability invoke the scheduler -- which just might want to use its
locks. The reason that call_rcu() needs to invoke the scheduler is
to wake up the corresponding rcuo callback-offload kthread, which
does the job of starting up a grace period and invoking the callbacks
afterwards.
One solution (championed on a related problem by Lai Jiangshan) is to
simply defer the wakeup to some point where scheduler locks are no longer
held. Since we don't want to unnecessarily incur the cost of such
deferral, the task before us is threefold:
1. Determine when it is likely that a relevant scheduler lock is held.
2. Defer the wakeup in such cases.
3. Ensure that all deferred wakeups eventually happen, preferably
sooner rather than later.
We use irqs_disabled_flags() as a proxy for relevant scheduler locks
being held. This works because the relevant locks are always acquired
with interrupts disabled. We may defer more often than needed, but that
is at least safe.
The wakeup deferral is tracked via a new field in the per-CPU and
per-RCU-flavor rcu_data structure, namely ->nocb_defer_wakeup.
This flag is checked by the RCU core processing. The __rcu_pending()
function now checks this flag, which causes rcu_check_callbacks()
to initiate RCU core processing at each scheduling-clock interrupt
where this flag is set. Of course this is not sufficient because
scheduling-clock interrupts are often turned off (the things we used to
be able to count on!). So the flags are also checked on entry to any
state that RCU considers to be idle, which includes both NO_HZ_IDLE idle
state and NO_HZ_FULL user-mode-execution state.
This approach should allow call_rcu() to be invoked regardless of what
locks you might be holding, the key word being "should".
Reported-by: Dave Jones <davej@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
2013-10-05 05:33:34 +08:00
|
|
|
rsp->orphan_donetail, ql, qll, flags);
|
2012-08-20 12:35:53 +08:00
|
|
|
ql = qll = 0;
|
|
|
|
rsp->orphan_donelist = NULL;
|
|
|
|
rsp->orphan_donetail = &rsp->orphan_donelist;
|
|
|
|
}
|
|
|
|
if (rsp->orphan_nxtlist != NULL) {
|
|
|
|
__call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist,
|
rcu: Break call_rcu() deadlock involving scheduler and perf
Dave Jones got the following lockdep splat:
> ======================================================
> [ INFO: possible circular locking dependency detected ]
> 3.12.0-rc3+ #92 Not tainted
> -------------------------------------------------------
> trinity-child2/15191 is trying to acquire lock:
> (&rdp->nocb_wq){......}, at: [<ffffffff8108ff43>] __wake_up+0x23/0x50
>
> but task is already holding lock:
> (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> which lock already depends on the new lock.
>
>
> the existing dependency chain (in reverse order) is:
>
> -> #3 (&ctx->lock){-.-...}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff811500ff>] __perf_event_task_sched_out+0x2df/0x5e0
> [<ffffffff81091b83>] perf_event_task_sched_out+0x93/0xa0
> [<ffffffff81732052>] __schedule+0x1d2/0xa20
> [<ffffffff81732f30>] preempt_schedule_irq+0x50/0xb0
> [<ffffffff817352b6>] retint_kernel+0x26/0x30
> [<ffffffff813eed04>] tty_flip_buffer_push+0x34/0x50
> [<ffffffff813f0504>] pty_write+0x54/0x60
> [<ffffffff813e900d>] n_tty_write+0x32d/0x4e0
> [<ffffffff813e5838>] tty_write+0x158/0x2d0
> [<ffffffff811c4850>] vfs_write+0xc0/0x1f0
> [<ffffffff811c52cc>] SyS_write+0x4c/0xa0
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> -> #2 (&rq->lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff810980b2>] wake_up_new_task+0xc2/0x2e0
> [<ffffffff81054336>] do_fork+0x126/0x460
> [<ffffffff81054696>] kernel_thread+0x26/0x30
> [<ffffffff8171ff93>] rest_init+0x23/0x140
> [<ffffffff81ee1e4b>] start_kernel+0x3f6/0x403
> [<ffffffff81ee1571>] x86_64_start_reservations+0x2a/0x2c
> [<ffffffff81ee1664>] x86_64_start_kernel+0xf1/0xf4
>
> -> #1 (&p->pi_lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff810979d1>] try_to_wake_up+0x31/0x350
> [<ffffffff81097d62>] default_wake_function+0x12/0x20
> [<ffffffff81084af8>] autoremove_wake_function+0x18/0x40
> [<ffffffff8108ea38>] __wake_up_common+0x58/0x90
> [<ffffffff8108ff59>] __wake_up+0x39/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111b8d>] call_rcu+0x1d/0x20
> [<ffffffff81093697>] cpu_attach_domain+0x287/0x360
> [<ffffffff81099d7e>] build_sched_domains+0xe5e/0x10a0
> [<ffffffff81efa7fc>] sched_init_smp+0x3b7/0x47a
> [<ffffffff81ee1f4e>] kernel_init_freeable+0xf6/0x202
> [<ffffffff817200be>] kernel_init+0xe/0x190
> [<ffffffff8173d22c>] ret_from_fork+0x7c/0xb0
>
> -> #0 (&rdp->nocb_wq){......}:
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> other info that might help us debug this:
>
> Chain exists of:
> &rdp->nocb_wq --> &rq->lock --> &ctx->lock
>
> Possible unsafe locking scenario:
>
> CPU0 CPU1
> ---- ----
> lock(&ctx->lock);
> lock(&rq->lock);
> lock(&ctx->lock);
> lock(&rdp->nocb_wq);
>
> *** DEADLOCK ***
>
> 1 lock held by trinity-child2/15191:
> #0: (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> stack backtrace:
> CPU: 2 PID: 15191 Comm: trinity-child2 Not tainted 3.12.0-rc3+ #92
> ffffffff82565b70 ffff880070c2dbf8 ffffffff8172a363 ffffffff824edf40
> ffff880070c2dc38 ffffffff81726741 ffff880070c2dc90 ffff88022383b1c0
> ffff88022383aac0 0000000000000000 ffff88022383b188 ffff88022383b1c0
> Call Trace:
> [<ffffffff8172a363>] dump_stack+0x4e/0x82
> [<ffffffff81726741>] print_circular_bug+0x200/0x20f
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810c6439>] ? get_lock_stats+0x19/0x60
> [<ffffffff8100b2f4>] ? native_sched_clock+0x24/0x80
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff8109bc8f>] ? local_clock+0x3f/0x50
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff810c9af5>] ? trace_hardirqs_on_caller+0x115/0x1e0
> [<ffffffff810c9bcd>] ? trace_hardirqs_on+0xd/0x10
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
The underlying problem is that perf is invoking call_rcu() with the
scheduler locks held, but in NOCB mode, call_rcu() will with high
probability invoke the scheduler -- which just might want to use its
locks. The reason that call_rcu() needs to invoke the scheduler is
to wake up the corresponding rcuo callback-offload kthread, which
does the job of starting up a grace period and invoking the callbacks
afterwards.
One solution (championed on a related problem by Lai Jiangshan) is to
simply defer the wakeup to some point where scheduler locks are no longer
held. Since we don't want to unnecessarily incur the cost of such
deferral, the task before us is threefold:
1. Determine when it is likely that a relevant scheduler lock is held.
2. Defer the wakeup in such cases.
3. Ensure that all deferred wakeups eventually happen, preferably
sooner rather than later.
We use irqs_disabled_flags() as a proxy for relevant scheduler locks
being held. This works because the relevant locks are always acquired
with interrupts disabled. We may defer more often than needed, but that
is at least safe.
The wakeup deferral is tracked via a new field in the per-CPU and
per-RCU-flavor rcu_data structure, namely ->nocb_defer_wakeup.
This flag is checked by the RCU core processing. The __rcu_pending()
function now checks this flag, which causes rcu_check_callbacks()
to initiate RCU core processing at each scheduling-clock interrupt
where this flag is set. Of course this is not sufficient because
scheduling-clock interrupts are often turned off (the things we used to
be able to count on!). So the flags are also checked on entry to any
state that RCU considers to be idle, which includes both NO_HZ_IDLE idle
state and NO_HZ_FULL user-mode-execution state.
This approach should allow call_rcu() to be invoked regardless of what
locks you might be holding, the key word being "should".
Reported-by: Dave Jones <davej@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
2013-10-05 05:33:34 +08:00
|
|
|
rsp->orphan_nxttail, ql, qll, flags);
|
2012-08-20 12:35:53 +08:00
|
|
|
ql = qll = 0;
|
|
|
|
rsp->orphan_nxtlist = NULL;
|
|
|
|
rsp->orphan_nxttail = &rsp->orphan_nxtlist;
|
|
|
|
}
|
2014-07-09 06:26:15 +08:00
|
|
|
return true;
|
2012-08-20 12:35:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2013-01-08 05:37:42 +08:00
|
|
|
* If necessary, kick off a new grace period, and either way wait
|
|
|
|
* for a subsequent grace period to complete.
|
2012-08-20 12:35:53 +08:00
|
|
|
*/
|
2013-01-08 05:37:42 +08:00
|
|
|
static void rcu_nocb_wait_gp(struct rcu_data *rdp)
|
2012-08-20 12:35:53 +08:00
|
|
|
{
|
2013-01-08 05:37:42 +08:00
|
|
|
unsigned long c;
|
2013-02-11 12:48:58 +08:00
|
|
|
bool d;
|
2013-01-08 05:37:42 +08:00
|
|
|
unsigned long flags;
|
rcu: Make callers awaken grace-period kthread
The rcu_start_gp_advanced() function currently uses irq_work_queue()
to defer wakeups of the RCU grace-period kthread. This deferring
is necessary to avoid RCU-scheduler deadlocks involving the rcu_node
structure's lock, meaning that RCU cannot call any of the scheduler's
wake-up functions while holding one of these locks.
Unfortunately, the second and subsequent calls to irq_work_queue() are
ignored, and the first call will be ignored (aside from queuing the work
item) if the scheduler-clock tick is turned off. This is OK for many
uses, especially those where irq_work_queue() is called from an interrupt
or softirq handler, because in those cases the scheduler-clock-tick state
will be re-evaluated, which will turn the scheduler-clock tick back on.
On the next tick, any deferred work will then be processed.
However, this strategy does not always work for RCU, which can be invoked
at process level from idle CPUs. In this case, the tick might never
be turned back on, indefinitely defering a grace-period start request.
Note that the RCU CPU stall detector cannot see this condition, because
there is no RCU grace period in progress. Therefore, we can (and do!)
see long tens-of-seconds stalls in grace-period handling. In theory,
we could see a full grace-period hang, but rcutorture testing to date
has seen only the tens-of-seconds stalls. Event tracing demonstrates
that irq_work_queue() is being called repeatedly to no effect during
these stalls: The "newreq" event appears repeatedly from a task that is
not one of the grace-period kthreads.
In theory, irq_work_queue() might be fixed to avoid this sort of issue,
but RCU's requirements are unusual and it is quite straightforward to pass
wake-up responsibility up through RCU's call chain, so that the wakeup
happens when the offending locks are released.
This commit therefore makes this change. The rcu_start_gp_advanced(),
rcu_start_future_gp(), rcu_accelerate_cbs(), rcu_advance_cbs(),
__note_gp_changes(), and rcu_start_gp() functions now return a boolean
which indicates when a wake-up is needed. A new rcu_gp_kthread_wake()
does the wakeup when it is necessary and safe to do so: No self-wakes,
no wake-ups if the ->gp_flags field indicates there is no need (as in
someone else did the wake-up before we got around to it), and no wake-ups
before the grace-period kthread has been created.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
2014-03-12 04:02:16 +08:00
|
|
|
bool needwake;
|
2013-01-08 05:37:42 +08:00
|
|
|
struct rcu_node *rnp = rdp->mynode;
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&rnp->lock, flags);
|
2013-12-12 05:59:10 +08:00
|
|
|
smp_mb__after_unlock_lock();
|
rcu: Make callers awaken grace-period kthread
The rcu_start_gp_advanced() function currently uses irq_work_queue()
to defer wakeups of the RCU grace-period kthread. This deferring
is necessary to avoid RCU-scheduler deadlocks involving the rcu_node
structure's lock, meaning that RCU cannot call any of the scheduler's
wake-up functions while holding one of these locks.
Unfortunately, the second and subsequent calls to irq_work_queue() are
ignored, and the first call will be ignored (aside from queuing the work
item) if the scheduler-clock tick is turned off. This is OK for many
uses, especially those where irq_work_queue() is called from an interrupt
or softirq handler, because in those cases the scheduler-clock-tick state
will be re-evaluated, which will turn the scheduler-clock tick back on.
On the next tick, any deferred work will then be processed.
However, this strategy does not always work for RCU, which can be invoked
at process level from idle CPUs. In this case, the tick might never
be turned back on, indefinitely defering a grace-period start request.
Note that the RCU CPU stall detector cannot see this condition, because
there is no RCU grace period in progress. Therefore, we can (and do!)
see long tens-of-seconds stalls in grace-period handling. In theory,
we could see a full grace-period hang, but rcutorture testing to date
has seen only the tens-of-seconds stalls. Event tracing demonstrates
that irq_work_queue() is being called repeatedly to no effect during
these stalls: The "newreq" event appears repeatedly from a task that is
not one of the grace-period kthreads.
In theory, irq_work_queue() might be fixed to avoid this sort of issue,
but RCU's requirements are unusual and it is quite straightforward to pass
wake-up responsibility up through RCU's call chain, so that the wakeup
happens when the offending locks are released.
This commit therefore makes this change. The rcu_start_gp_advanced(),
rcu_start_future_gp(), rcu_accelerate_cbs(), rcu_advance_cbs(),
__note_gp_changes(), and rcu_start_gp() functions now return a boolean
which indicates when a wake-up is needed. A new rcu_gp_kthread_wake()
does the wakeup when it is necessary and safe to do so: No self-wakes,
no wake-ups if the ->gp_flags field indicates there is no need (as in
someone else did the wake-up before we got around to it), and no wake-ups
before the grace-period kthread has been created.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
2014-03-12 04:02:16 +08:00
|
|
|
needwake = rcu_start_future_gp(rnp, rdp, &c);
|
2012-12-31 07:21:01 +08:00
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
rcu: Make callers awaken grace-period kthread
The rcu_start_gp_advanced() function currently uses irq_work_queue()
to defer wakeups of the RCU grace-period kthread. This deferring
is necessary to avoid RCU-scheduler deadlocks involving the rcu_node
structure's lock, meaning that RCU cannot call any of the scheduler's
wake-up functions while holding one of these locks.
Unfortunately, the second and subsequent calls to irq_work_queue() are
ignored, and the first call will be ignored (aside from queuing the work
item) if the scheduler-clock tick is turned off. This is OK for many
uses, especially those where irq_work_queue() is called from an interrupt
or softirq handler, because in those cases the scheduler-clock-tick state
will be re-evaluated, which will turn the scheduler-clock tick back on.
On the next tick, any deferred work will then be processed.
However, this strategy does not always work for RCU, which can be invoked
at process level from idle CPUs. In this case, the tick might never
be turned back on, indefinitely defering a grace-period start request.
Note that the RCU CPU stall detector cannot see this condition, because
there is no RCU grace period in progress. Therefore, we can (and do!)
see long tens-of-seconds stalls in grace-period handling. In theory,
we could see a full grace-period hang, but rcutorture testing to date
has seen only the tens-of-seconds stalls. Event tracing demonstrates
that irq_work_queue() is being called repeatedly to no effect during
these stalls: The "newreq" event appears repeatedly from a task that is
not one of the grace-period kthreads.
In theory, irq_work_queue() might be fixed to avoid this sort of issue,
but RCU's requirements are unusual and it is quite straightforward to pass
wake-up responsibility up through RCU's call chain, so that the wakeup
happens when the offending locks are released.
This commit therefore makes this change. The rcu_start_gp_advanced(),
rcu_start_future_gp(), rcu_accelerate_cbs(), rcu_advance_cbs(),
__note_gp_changes(), and rcu_start_gp() functions now return a boolean
which indicates when a wake-up is needed. A new rcu_gp_kthread_wake()
does the wakeup when it is necessary and safe to do so: No self-wakes,
no wake-ups if the ->gp_flags field indicates there is no need (as in
someone else did the wake-up before we got around to it), and no wake-ups
before the grace-period kthread has been created.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
2014-03-12 04:02:16 +08:00
|
|
|
if (needwake)
|
|
|
|
rcu_gp_kthread_wake(rdp->rsp);
|
2012-08-20 12:35:53 +08:00
|
|
|
|
|
|
|
/*
|
2013-01-08 05:37:42 +08:00
|
|
|
* Wait for the grace period. Do so interruptibly to avoid messing
|
|
|
|
* up the load average.
|
2012-08-20 12:35:53 +08:00
|
|
|
*/
|
2013-07-13 05:18:47 +08:00
|
|
|
trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait"));
|
2013-01-08 05:37:42 +08:00
|
|
|
for (;;) {
|
2013-02-11 12:48:58 +08:00
|
|
|
wait_event_interruptible(
|
|
|
|
rnp->nocb_gp_wq[c & 0x1],
|
2015-03-04 06:57:58 +08:00
|
|
|
(d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c)));
|
2013-02-11 12:48:58 +08:00
|
|
|
if (likely(d))
|
2013-01-08 05:37:42 +08:00
|
|
|
break;
|
2014-08-15 01:28:23 +08:00
|
|
|
WARN_ON(signal_pending(current));
|
2013-07-13 05:18:47 +08:00
|
|
|
trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait"));
|
2013-01-08 05:37:42 +08:00
|
|
|
}
|
2013-07-13 05:18:47 +08:00
|
|
|
trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait"));
|
2013-01-08 05:37:42 +08:00
|
|
|
smp_mb(); /* Ensure that CB invocation happens after GP end. */
|
2012-08-20 12:35:53 +08:00
|
|
|
}
|
|
|
|
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
/*
|
|
|
|
* Leaders come here to wait for additional callbacks to show up.
|
|
|
|
* This function does not return until callbacks appear.
|
|
|
|
*/
|
|
|
|
static void nocb_leader_wait(struct rcu_data *my_rdp)
|
|
|
|
{
|
|
|
|
bool firsttime = true;
|
|
|
|
bool gotcbs;
|
|
|
|
struct rcu_data *rdp;
|
|
|
|
struct rcu_head **tail;
|
|
|
|
|
|
|
|
wait_again:
|
|
|
|
|
|
|
|
/* Wait for callbacks to appear. */
|
|
|
|
if (!rcu_nocb_poll) {
|
|
|
|
trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep");
|
|
|
|
wait_event_interruptible(my_rdp->nocb_wq,
|
2015-03-04 06:57:58 +08:00
|
|
|
!READ_ONCE(my_rdp->nocb_leader_sleep));
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
/* Memory barrier handled by smp_mb() calls below and repoll. */
|
|
|
|
} else if (firsttime) {
|
|
|
|
firsttime = false; /* Don't drown trace log with "Poll"! */
|
|
|
|
trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Poll");
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Each pass through the following loop checks a follower for CBs.
|
|
|
|
* We are our own first follower. Any CBs found are moved to
|
|
|
|
* nocb_gp_head, where they await a grace period.
|
|
|
|
*/
|
|
|
|
gotcbs = false;
|
|
|
|
for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
|
2015-03-04 06:57:58 +08:00
|
|
|
rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head);
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
if (!rdp->nocb_gp_head)
|
|
|
|
continue; /* No CBs here, try next follower. */
|
|
|
|
|
|
|
|
/* Move callbacks to wait-for-GP list, which is empty. */
|
2015-03-04 06:57:58 +08:00
|
|
|
WRITE_ONCE(rdp->nocb_head, NULL);
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
|
|
|
|
gotcbs = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If there were no callbacks, sleep a bit, rescan after a
|
|
|
|
* memory barrier, and go retry.
|
|
|
|
*/
|
|
|
|
if (unlikely(!gotcbs)) {
|
|
|
|
if (!rcu_nocb_poll)
|
|
|
|
trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu,
|
|
|
|
"WokeEmpty");
|
2014-08-15 01:28:23 +08:00
|
|
|
WARN_ON(signal_pending(current));
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
schedule_timeout_interruptible(1);
|
|
|
|
|
|
|
|
/* Rescan in case we were a victim of memory ordering. */
|
2014-08-28 04:43:40 +08:00
|
|
|
my_rdp->nocb_leader_sleep = true;
|
|
|
|
smp_mb(); /* Ensure _sleep true before scan. */
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower)
|
2015-03-04 06:57:58 +08:00
|
|
|
if (READ_ONCE(rdp->nocb_head)) {
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
/* Found CB, so short-circuit next wait. */
|
2014-08-28 04:43:40 +08:00
|
|
|
my_rdp->nocb_leader_sleep = false;
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
goto wait_again;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Wait for one grace period. */
|
|
|
|
rcu_nocb_wait_gp(my_rdp);
|
|
|
|
|
|
|
|
/*
|
2014-08-28 04:43:40 +08:00
|
|
|
* We left ->nocb_leader_sleep unset to reduce cache thrashing.
|
|
|
|
* We set it now, but recheck for new callbacks while
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
* traversing our follower list.
|
|
|
|
*/
|
2014-08-28 04:43:40 +08:00
|
|
|
my_rdp->nocb_leader_sleep = true;
|
|
|
|
smp_mb(); /* Ensure _sleep true before scan of ->nocb_head. */
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
|
|
|
|
/* Each pass through the following loop wakes a follower, if needed. */
|
|
|
|
for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
|
2015-03-04 06:57:58 +08:00
|
|
|
if (READ_ONCE(rdp->nocb_head))
|
2014-08-28 04:43:40 +08:00
|
|
|
my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
if (!rdp->nocb_gp_head)
|
|
|
|
continue; /* No CBs, so no need to wake follower. */
|
|
|
|
|
|
|
|
/* Append callbacks to follower's "done" list. */
|
|
|
|
tail = xchg(&rdp->nocb_follower_tail, rdp->nocb_gp_tail);
|
|
|
|
*tail = rdp->nocb_gp_head;
|
2014-08-13 04:54:21 +08:00
|
|
|
smp_mb__after_atomic(); /* Store *tail before wakeup. */
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
|
|
|
|
/*
|
|
|
|
* List was empty, wake up the follower.
|
|
|
|
* Memory barriers supplied by atomic_long_add().
|
|
|
|
*/
|
|
|
|
wake_up(&rdp->nocb_wq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we (the leader) don't have CBs, go wait some more. */
|
|
|
|
if (!my_rdp->nocb_follower_head)
|
|
|
|
goto wait_again;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Followers come here to wait for additional callbacks to show up.
|
|
|
|
* This function does not return until callbacks appear.
|
|
|
|
*/
|
|
|
|
static void nocb_follower_wait(struct rcu_data *rdp)
|
|
|
|
{
|
|
|
|
bool firsttime = true;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
if (!rcu_nocb_poll) {
|
|
|
|
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
|
|
|
|
"FollowerSleep");
|
|
|
|
wait_event_interruptible(rdp->nocb_wq,
|
2015-03-04 06:57:58 +08:00
|
|
|
READ_ONCE(rdp->nocb_follower_head));
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
} else if (firsttime) {
|
|
|
|
/* Don't drown trace log with "Poll"! */
|
|
|
|
firsttime = false;
|
|
|
|
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "Poll");
|
|
|
|
}
|
|
|
|
if (smp_load_acquire(&rdp->nocb_follower_head)) {
|
|
|
|
/* ^^^ Ensure CB invocation follows _head test. */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (!rcu_nocb_poll)
|
|
|
|
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
|
|
|
|
"WokeEmpty");
|
2014-08-15 01:28:23 +08:00
|
|
|
WARN_ON(signal_pending(current));
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
schedule_timeout_interruptible(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-20 12:35:53 +08:00
|
|
|
/*
|
|
|
|
* Per-rcu_data kthread, but only for no-CBs CPUs. Each kthread invokes
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
* callbacks queued by the corresponding no-CBs CPU, however, there is
|
|
|
|
* an optional leader-follower relationship so that the grace-period
|
|
|
|
* kthreads don't have to do quite so many wakeups.
|
2012-08-20 12:35:53 +08:00
|
|
|
*/
|
|
|
|
static int rcu_nocb_kthread(void *arg)
|
|
|
|
{
|
|
|
|
int c, cl;
|
|
|
|
struct rcu_head *list;
|
|
|
|
struct rcu_head *next;
|
|
|
|
struct rcu_head **tail;
|
|
|
|
struct rcu_data *rdp = arg;
|
|
|
|
|
|
|
|
/* Each pass through this loop invokes one batch of callbacks */
|
|
|
|
for (;;) {
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
/* Wait for callbacks. */
|
|
|
|
if (rdp->nocb_leader == rdp)
|
|
|
|
nocb_leader_wait(rdp);
|
|
|
|
else
|
|
|
|
nocb_follower_wait(rdp);
|
|
|
|
|
|
|
|
/* Pull the ready-to-invoke callbacks onto local list. */
|
2015-03-04 06:57:58 +08:00
|
|
|
list = READ_ONCE(rdp->nocb_follower_head);
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
BUG_ON(!list);
|
|
|
|
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
|
2015-03-04 06:57:58 +08:00
|
|
|
WRITE_ONCE(rdp->nocb_follower_head, NULL);
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
|
2012-08-20 12:35:53 +08:00
|
|
|
|
|
|
|
/* Each pass through the following loop invokes a callback. */
|
2014-12-19 04:31:27 +08:00
|
|
|
trace_rcu_batch_start(rdp->rsp->name,
|
|
|
|
atomic_long_read(&rdp->nocb_q_count_lazy),
|
|
|
|
atomic_long_read(&rdp->nocb_q_count), -1);
|
2012-08-20 12:35:53 +08:00
|
|
|
c = cl = 0;
|
|
|
|
while (list) {
|
|
|
|
next = list->next;
|
|
|
|
/* Wait for enqueuing to complete, if needed. */
|
|
|
|
while (next == NULL && &list->next != tail) {
|
2013-08-16 04:23:23 +08:00
|
|
|
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
|
|
|
|
TPS("WaitQueue"));
|
2012-08-20 12:35:53 +08:00
|
|
|
schedule_timeout_interruptible(1);
|
2013-08-16 04:23:23 +08:00
|
|
|
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
|
|
|
|
TPS("WokeQueue"));
|
2012-08-20 12:35:53 +08:00
|
|
|
next = list->next;
|
|
|
|
}
|
|
|
|
debug_rcu_head_unqueue(list);
|
|
|
|
local_bh_disable();
|
|
|
|
if (__rcu_reclaim(rdp->rsp->name, list))
|
|
|
|
cl++;
|
|
|
|
c++;
|
|
|
|
local_bh_enable();
|
|
|
|
list = next;
|
|
|
|
}
|
|
|
|
trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
|
2014-12-19 04:31:27 +08:00
|
|
|
smp_mb__before_atomic(); /* _add after CB invocation. */
|
|
|
|
atomic_long_add(-c, &rdp->nocb_q_count);
|
|
|
|
atomic_long_add(-cl, &rdp->nocb_q_count_lazy);
|
2012-10-29 22:29:20 +08:00
|
|
|
rdp->n_nocbs_invoked += c;
|
2012-08-20 12:35:53 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
rcu: Break call_rcu() deadlock involving scheduler and perf
Dave Jones got the following lockdep splat:
> ======================================================
> [ INFO: possible circular locking dependency detected ]
> 3.12.0-rc3+ #92 Not tainted
> -------------------------------------------------------
> trinity-child2/15191 is trying to acquire lock:
> (&rdp->nocb_wq){......}, at: [<ffffffff8108ff43>] __wake_up+0x23/0x50
>
> but task is already holding lock:
> (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> which lock already depends on the new lock.
>
>
> the existing dependency chain (in reverse order) is:
>
> -> #3 (&ctx->lock){-.-...}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff811500ff>] __perf_event_task_sched_out+0x2df/0x5e0
> [<ffffffff81091b83>] perf_event_task_sched_out+0x93/0xa0
> [<ffffffff81732052>] __schedule+0x1d2/0xa20
> [<ffffffff81732f30>] preempt_schedule_irq+0x50/0xb0
> [<ffffffff817352b6>] retint_kernel+0x26/0x30
> [<ffffffff813eed04>] tty_flip_buffer_push+0x34/0x50
> [<ffffffff813f0504>] pty_write+0x54/0x60
> [<ffffffff813e900d>] n_tty_write+0x32d/0x4e0
> [<ffffffff813e5838>] tty_write+0x158/0x2d0
> [<ffffffff811c4850>] vfs_write+0xc0/0x1f0
> [<ffffffff811c52cc>] SyS_write+0x4c/0xa0
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> -> #2 (&rq->lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff810980b2>] wake_up_new_task+0xc2/0x2e0
> [<ffffffff81054336>] do_fork+0x126/0x460
> [<ffffffff81054696>] kernel_thread+0x26/0x30
> [<ffffffff8171ff93>] rest_init+0x23/0x140
> [<ffffffff81ee1e4b>] start_kernel+0x3f6/0x403
> [<ffffffff81ee1571>] x86_64_start_reservations+0x2a/0x2c
> [<ffffffff81ee1664>] x86_64_start_kernel+0xf1/0xf4
>
> -> #1 (&p->pi_lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff810979d1>] try_to_wake_up+0x31/0x350
> [<ffffffff81097d62>] default_wake_function+0x12/0x20
> [<ffffffff81084af8>] autoremove_wake_function+0x18/0x40
> [<ffffffff8108ea38>] __wake_up_common+0x58/0x90
> [<ffffffff8108ff59>] __wake_up+0x39/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111b8d>] call_rcu+0x1d/0x20
> [<ffffffff81093697>] cpu_attach_domain+0x287/0x360
> [<ffffffff81099d7e>] build_sched_domains+0xe5e/0x10a0
> [<ffffffff81efa7fc>] sched_init_smp+0x3b7/0x47a
> [<ffffffff81ee1f4e>] kernel_init_freeable+0xf6/0x202
> [<ffffffff817200be>] kernel_init+0xe/0x190
> [<ffffffff8173d22c>] ret_from_fork+0x7c/0xb0
>
> -> #0 (&rdp->nocb_wq){......}:
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> other info that might help us debug this:
>
> Chain exists of:
> &rdp->nocb_wq --> &rq->lock --> &ctx->lock
>
> Possible unsafe locking scenario:
>
> CPU0 CPU1
> ---- ----
> lock(&ctx->lock);
> lock(&rq->lock);
> lock(&ctx->lock);
> lock(&rdp->nocb_wq);
>
> *** DEADLOCK ***
>
> 1 lock held by trinity-child2/15191:
> #0: (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> stack backtrace:
> CPU: 2 PID: 15191 Comm: trinity-child2 Not tainted 3.12.0-rc3+ #92
> ffffffff82565b70 ffff880070c2dbf8 ffffffff8172a363 ffffffff824edf40
> ffff880070c2dc38 ffffffff81726741 ffff880070c2dc90 ffff88022383b1c0
> ffff88022383aac0 0000000000000000 ffff88022383b188 ffff88022383b1c0
> Call Trace:
> [<ffffffff8172a363>] dump_stack+0x4e/0x82
> [<ffffffff81726741>] print_circular_bug+0x200/0x20f
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810c6439>] ? get_lock_stats+0x19/0x60
> [<ffffffff8100b2f4>] ? native_sched_clock+0x24/0x80
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff8109bc8f>] ? local_clock+0x3f/0x50
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff810c9af5>] ? trace_hardirqs_on_caller+0x115/0x1e0
> [<ffffffff810c9bcd>] ? trace_hardirqs_on+0xd/0x10
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
The underlying problem is that perf is invoking call_rcu() with the
scheduler locks held, but in NOCB mode, call_rcu() will with high
probability invoke the scheduler -- which just might want to use its
locks. The reason that call_rcu() needs to invoke the scheduler is
to wake up the corresponding rcuo callback-offload kthread, which
does the job of starting up a grace period and invoking the callbacks
afterwards.
One solution (championed on a related problem by Lai Jiangshan) is to
simply defer the wakeup to some point where scheduler locks are no longer
held. Since we don't want to unnecessarily incur the cost of such
deferral, the task before us is threefold:
1. Determine when it is likely that a relevant scheduler lock is held.
2. Defer the wakeup in such cases.
3. Ensure that all deferred wakeups eventually happen, preferably
sooner rather than later.
We use irqs_disabled_flags() as a proxy for relevant scheduler locks
being held. This works because the relevant locks are always acquired
with interrupts disabled. We may defer more often than needed, but that
is at least safe.
The wakeup deferral is tracked via a new field in the per-CPU and
per-RCU-flavor rcu_data structure, namely ->nocb_defer_wakeup.
This flag is checked by the RCU core processing. The __rcu_pending()
function now checks this flag, which causes rcu_check_callbacks()
to initiate RCU core processing at each scheduling-clock interrupt
where this flag is set. Of course this is not sufficient because
scheduling-clock interrupts are often turned off (the things we used to
be able to count on!). So the flags are also checked on entry to any
state that RCU considers to be idle, which includes both NO_HZ_IDLE idle
state and NO_HZ_FULL user-mode-execution state.
This approach should allow call_rcu() to be invoked regardless of what
locks you might be holding, the key word being "should".
Reported-by: Dave Jones <davej@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
2013-10-05 05:33:34 +08:00
|
|
|
/* Is a deferred wakeup of rcu_nocb_kthread() required? */
|
2014-07-30 05:50:47 +08:00
|
|
|
static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
|
rcu: Break call_rcu() deadlock involving scheduler and perf
Dave Jones got the following lockdep splat:
> ======================================================
> [ INFO: possible circular locking dependency detected ]
> 3.12.0-rc3+ #92 Not tainted
> -------------------------------------------------------
> trinity-child2/15191 is trying to acquire lock:
> (&rdp->nocb_wq){......}, at: [<ffffffff8108ff43>] __wake_up+0x23/0x50
>
> but task is already holding lock:
> (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> which lock already depends on the new lock.
>
>
> the existing dependency chain (in reverse order) is:
>
> -> #3 (&ctx->lock){-.-...}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff811500ff>] __perf_event_task_sched_out+0x2df/0x5e0
> [<ffffffff81091b83>] perf_event_task_sched_out+0x93/0xa0
> [<ffffffff81732052>] __schedule+0x1d2/0xa20
> [<ffffffff81732f30>] preempt_schedule_irq+0x50/0xb0
> [<ffffffff817352b6>] retint_kernel+0x26/0x30
> [<ffffffff813eed04>] tty_flip_buffer_push+0x34/0x50
> [<ffffffff813f0504>] pty_write+0x54/0x60
> [<ffffffff813e900d>] n_tty_write+0x32d/0x4e0
> [<ffffffff813e5838>] tty_write+0x158/0x2d0
> [<ffffffff811c4850>] vfs_write+0xc0/0x1f0
> [<ffffffff811c52cc>] SyS_write+0x4c/0xa0
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> -> #2 (&rq->lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff810980b2>] wake_up_new_task+0xc2/0x2e0
> [<ffffffff81054336>] do_fork+0x126/0x460
> [<ffffffff81054696>] kernel_thread+0x26/0x30
> [<ffffffff8171ff93>] rest_init+0x23/0x140
> [<ffffffff81ee1e4b>] start_kernel+0x3f6/0x403
> [<ffffffff81ee1571>] x86_64_start_reservations+0x2a/0x2c
> [<ffffffff81ee1664>] x86_64_start_kernel+0xf1/0xf4
>
> -> #1 (&p->pi_lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff810979d1>] try_to_wake_up+0x31/0x350
> [<ffffffff81097d62>] default_wake_function+0x12/0x20
> [<ffffffff81084af8>] autoremove_wake_function+0x18/0x40
> [<ffffffff8108ea38>] __wake_up_common+0x58/0x90
> [<ffffffff8108ff59>] __wake_up+0x39/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111b8d>] call_rcu+0x1d/0x20
> [<ffffffff81093697>] cpu_attach_domain+0x287/0x360
> [<ffffffff81099d7e>] build_sched_domains+0xe5e/0x10a0
> [<ffffffff81efa7fc>] sched_init_smp+0x3b7/0x47a
> [<ffffffff81ee1f4e>] kernel_init_freeable+0xf6/0x202
> [<ffffffff817200be>] kernel_init+0xe/0x190
> [<ffffffff8173d22c>] ret_from_fork+0x7c/0xb0
>
> -> #0 (&rdp->nocb_wq){......}:
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> other info that might help us debug this:
>
> Chain exists of:
> &rdp->nocb_wq --> &rq->lock --> &ctx->lock
>
> Possible unsafe locking scenario:
>
> CPU0 CPU1
> ---- ----
> lock(&ctx->lock);
> lock(&rq->lock);
> lock(&ctx->lock);
> lock(&rdp->nocb_wq);
>
> *** DEADLOCK ***
>
> 1 lock held by trinity-child2/15191:
> #0: (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> stack backtrace:
> CPU: 2 PID: 15191 Comm: trinity-child2 Not tainted 3.12.0-rc3+ #92
> ffffffff82565b70 ffff880070c2dbf8 ffffffff8172a363 ffffffff824edf40
> ffff880070c2dc38 ffffffff81726741 ffff880070c2dc90 ffff88022383b1c0
> ffff88022383aac0 0000000000000000 ffff88022383b188 ffff88022383b1c0
> Call Trace:
> [<ffffffff8172a363>] dump_stack+0x4e/0x82
> [<ffffffff81726741>] print_circular_bug+0x200/0x20f
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810c6439>] ? get_lock_stats+0x19/0x60
> [<ffffffff8100b2f4>] ? native_sched_clock+0x24/0x80
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff8109bc8f>] ? local_clock+0x3f/0x50
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff810c9af5>] ? trace_hardirqs_on_caller+0x115/0x1e0
> [<ffffffff810c9bcd>] ? trace_hardirqs_on+0xd/0x10
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
The underlying problem is that perf is invoking call_rcu() with the
scheduler locks held, but in NOCB mode, call_rcu() will with high
probability invoke the scheduler -- which just might want to use its
locks. The reason that call_rcu() needs to invoke the scheduler is
to wake up the corresponding rcuo callback-offload kthread, which
does the job of starting up a grace period and invoking the callbacks
afterwards.
One solution (championed on a related problem by Lai Jiangshan) is to
simply defer the wakeup to some point where scheduler locks are no longer
held. Since we don't want to unnecessarily incur the cost of such
deferral, the task before us is threefold:
1. Determine when it is likely that a relevant scheduler lock is held.
2. Defer the wakeup in such cases.
3. Ensure that all deferred wakeups eventually happen, preferably
sooner rather than later.
We use irqs_disabled_flags() as a proxy for relevant scheduler locks
being held. This works because the relevant locks are always acquired
with interrupts disabled. We may defer more often than needed, but that
is at least safe.
The wakeup deferral is tracked via a new field in the per-CPU and
per-RCU-flavor rcu_data structure, namely ->nocb_defer_wakeup.
This flag is checked by the RCU core processing. The __rcu_pending()
function now checks this flag, which causes rcu_check_callbacks()
to initiate RCU core processing at each scheduling-clock interrupt
where this flag is set. Of course this is not sufficient because
scheduling-clock interrupts are often turned off (the things we used to
be able to count on!). So the flags are also checked on entry to any
state that RCU considers to be idle, which includes both NO_HZ_IDLE idle
state and NO_HZ_FULL user-mode-execution state.
This approach should allow call_rcu() to be invoked regardless of what
locks you might be holding, the key word being "should".
Reported-by: Dave Jones <davej@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
2013-10-05 05:33:34 +08:00
|
|
|
{
|
2015-03-04 06:57:58 +08:00
|
|
|
return READ_ONCE(rdp->nocb_defer_wakeup);
|
rcu: Break call_rcu() deadlock involving scheduler and perf
Dave Jones got the following lockdep splat:
> ======================================================
> [ INFO: possible circular locking dependency detected ]
> 3.12.0-rc3+ #92 Not tainted
> -------------------------------------------------------
> trinity-child2/15191 is trying to acquire lock:
> (&rdp->nocb_wq){......}, at: [<ffffffff8108ff43>] __wake_up+0x23/0x50
>
> but task is already holding lock:
> (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> which lock already depends on the new lock.
>
>
> the existing dependency chain (in reverse order) is:
>
> -> #3 (&ctx->lock){-.-...}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff811500ff>] __perf_event_task_sched_out+0x2df/0x5e0
> [<ffffffff81091b83>] perf_event_task_sched_out+0x93/0xa0
> [<ffffffff81732052>] __schedule+0x1d2/0xa20
> [<ffffffff81732f30>] preempt_schedule_irq+0x50/0xb0
> [<ffffffff817352b6>] retint_kernel+0x26/0x30
> [<ffffffff813eed04>] tty_flip_buffer_push+0x34/0x50
> [<ffffffff813f0504>] pty_write+0x54/0x60
> [<ffffffff813e900d>] n_tty_write+0x32d/0x4e0
> [<ffffffff813e5838>] tty_write+0x158/0x2d0
> [<ffffffff811c4850>] vfs_write+0xc0/0x1f0
> [<ffffffff811c52cc>] SyS_write+0x4c/0xa0
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> -> #2 (&rq->lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff810980b2>] wake_up_new_task+0xc2/0x2e0
> [<ffffffff81054336>] do_fork+0x126/0x460
> [<ffffffff81054696>] kernel_thread+0x26/0x30
> [<ffffffff8171ff93>] rest_init+0x23/0x140
> [<ffffffff81ee1e4b>] start_kernel+0x3f6/0x403
> [<ffffffff81ee1571>] x86_64_start_reservations+0x2a/0x2c
> [<ffffffff81ee1664>] x86_64_start_kernel+0xf1/0xf4
>
> -> #1 (&p->pi_lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff810979d1>] try_to_wake_up+0x31/0x350
> [<ffffffff81097d62>] default_wake_function+0x12/0x20
> [<ffffffff81084af8>] autoremove_wake_function+0x18/0x40
> [<ffffffff8108ea38>] __wake_up_common+0x58/0x90
> [<ffffffff8108ff59>] __wake_up+0x39/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111b8d>] call_rcu+0x1d/0x20
> [<ffffffff81093697>] cpu_attach_domain+0x287/0x360
> [<ffffffff81099d7e>] build_sched_domains+0xe5e/0x10a0
> [<ffffffff81efa7fc>] sched_init_smp+0x3b7/0x47a
> [<ffffffff81ee1f4e>] kernel_init_freeable+0xf6/0x202
> [<ffffffff817200be>] kernel_init+0xe/0x190
> [<ffffffff8173d22c>] ret_from_fork+0x7c/0xb0
>
> -> #0 (&rdp->nocb_wq){......}:
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> other info that might help us debug this:
>
> Chain exists of:
> &rdp->nocb_wq --> &rq->lock --> &ctx->lock
>
> Possible unsafe locking scenario:
>
> CPU0 CPU1
> ---- ----
> lock(&ctx->lock);
> lock(&rq->lock);
> lock(&ctx->lock);
> lock(&rdp->nocb_wq);
>
> *** DEADLOCK ***
>
> 1 lock held by trinity-child2/15191:
> #0: (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> stack backtrace:
> CPU: 2 PID: 15191 Comm: trinity-child2 Not tainted 3.12.0-rc3+ #92
> ffffffff82565b70 ffff880070c2dbf8 ffffffff8172a363 ffffffff824edf40
> ffff880070c2dc38 ffffffff81726741 ffff880070c2dc90 ffff88022383b1c0
> ffff88022383aac0 0000000000000000 ffff88022383b188 ffff88022383b1c0
> Call Trace:
> [<ffffffff8172a363>] dump_stack+0x4e/0x82
> [<ffffffff81726741>] print_circular_bug+0x200/0x20f
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810c6439>] ? get_lock_stats+0x19/0x60
> [<ffffffff8100b2f4>] ? native_sched_clock+0x24/0x80
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff8109bc8f>] ? local_clock+0x3f/0x50
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff810c9af5>] ? trace_hardirqs_on_caller+0x115/0x1e0
> [<ffffffff810c9bcd>] ? trace_hardirqs_on+0xd/0x10
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
The underlying problem is that perf is invoking call_rcu() with the
scheduler locks held, but in NOCB mode, call_rcu() will with high
probability invoke the scheduler -- which just might want to use its
locks. The reason that call_rcu() needs to invoke the scheduler is
to wake up the corresponding rcuo callback-offload kthread, which
does the job of starting up a grace period and invoking the callbacks
afterwards.
One solution (championed on a related problem by Lai Jiangshan) is to
simply defer the wakeup to some point where scheduler locks are no longer
held. Since we don't want to unnecessarily incur the cost of such
deferral, the task before us is threefold:
1. Determine when it is likely that a relevant scheduler lock is held.
2. Defer the wakeup in such cases.
3. Ensure that all deferred wakeups eventually happen, preferably
sooner rather than later.
We use irqs_disabled_flags() as a proxy for relevant scheduler locks
being held. This works because the relevant locks are always acquired
with interrupts disabled. We may defer more often than needed, but that
is at least safe.
The wakeup deferral is tracked via a new field in the per-CPU and
per-RCU-flavor rcu_data structure, namely ->nocb_defer_wakeup.
This flag is checked by the RCU core processing. The __rcu_pending()
function now checks this flag, which causes rcu_check_callbacks()
to initiate RCU core processing at each scheduling-clock interrupt
where this flag is set. Of course this is not sufficient because
scheduling-clock interrupts are often turned off (the things we used to
be able to count on!). So the flags are also checked on entry to any
state that RCU considers to be idle, which includes both NO_HZ_IDLE idle
state and NO_HZ_FULL user-mode-execution state.
This approach should allow call_rcu() to be invoked regardless of what
locks you might be holding, the key word being "should".
Reported-by: Dave Jones <davej@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
2013-10-05 05:33:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Do a deferred wakeup of rcu_nocb_kthread(). */
|
|
|
|
static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
|
|
|
|
{
|
2014-07-30 05:50:47 +08:00
|
|
|
int ndw;
|
|
|
|
|
rcu: Break call_rcu() deadlock involving scheduler and perf
Dave Jones got the following lockdep splat:
> ======================================================
> [ INFO: possible circular locking dependency detected ]
> 3.12.0-rc3+ #92 Not tainted
> -------------------------------------------------------
> trinity-child2/15191 is trying to acquire lock:
> (&rdp->nocb_wq){......}, at: [<ffffffff8108ff43>] __wake_up+0x23/0x50
>
> but task is already holding lock:
> (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> which lock already depends on the new lock.
>
>
> the existing dependency chain (in reverse order) is:
>
> -> #3 (&ctx->lock){-.-...}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff811500ff>] __perf_event_task_sched_out+0x2df/0x5e0
> [<ffffffff81091b83>] perf_event_task_sched_out+0x93/0xa0
> [<ffffffff81732052>] __schedule+0x1d2/0xa20
> [<ffffffff81732f30>] preempt_schedule_irq+0x50/0xb0
> [<ffffffff817352b6>] retint_kernel+0x26/0x30
> [<ffffffff813eed04>] tty_flip_buffer_push+0x34/0x50
> [<ffffffff813f0504>] pty_write+0x54/0x60
> [<ffffffff813e900d>] n_tty_write+0x32d/0x4e0
> [<ffffffff813e5838>] tty_write+0x158/0x2d0
> [<ffffffff811c4850>] vfs_write+0xc0/0x1f0
> [<ffffffff811c52cc>] SyS_write+0x4c/0xa0
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> -> #2 (&rq->lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff810980b2>] wake_up_new_task+0xc2/0x2e0
> [<ffffffff81054336>] do_fork+0x126/0x460
> [<ffffffff81054696>] kernel_thread+0x26/0x30
> [<ffffffff8171ff93>] rest_init+0x23/0x140
> [<ffffffff81ee1e4b>] start_kernel+0x3f6/0x403
> [<ffffffff81ee1571>] x86_64_start_reservations+0x2a/0x2c
> [<ffffffff81ee1664>] x86_64_start_kernel+0xf1/0xf4
>
> -> #1 (&p->pi_lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff810979d1>] try_to_wake_up+0x31/0x350
> [<ffffffff81097d62>] default_wake_function+0x12/0x20
> [<ffffffff81084af8>] autoremove_wake_function+0x18/0x40
> [<ffffffff8108ea38>] __wake_up_common+0x58/0x90
> [<ffffffff8108ff59>] __wake_up+0x39/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111b8d>] call_rcu+0x1d/0x20
> [<ffffffff81093697>] cpu_attach_domain+0x287/0x360
> [<ffffffff81099d7e>] build_sched_domains+0xe5e/0x10a0
> [<ffffffff81efa7fc>] sched_init_smp+0x3b7/0x47a
> [<ffffffff81ee1f4e>] kernel_init_freeable+0xf6/0x202
> [<ffffffff817200be>] kernel_init+0xe/0x190
> [<ffffffff8173d22c>] ret_from_fork+0x7c/0xb0
>
> -> #0 (&rdp->nocb_wq){......}:
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> other info that might help us debug this:
>
> Chain exists of:
> &rdp->nocb_wq --> &rq->lock --> &ctx->lock
>
> Possible unsafe locking scenario:
>
> CPU0 CPU1
> ---- ----
> lock(&ctx->lock);
> lock(&rq->lock);
> lock(&ctx->lock);
> lock(&rdp->nocb_wq);
>
> *** DEADLOCK ***
>
> 1 lock held by trinity-child2/15191:
> #0: (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> stack backtrace:
> CPU: 2 PID: 15191 Comm: trinity-child2 Not tainted 3.12.0-rc3+ #92
> ffffffff82565b70 ffff880070c2dbf8 ffffffff8172a363 ffffffff824edf40
> ffff880070c2dc38 ffffffff81726741 ffff880070c2dc90 ffff88022383b1c0
> ffff88022383aac0 0000000000000000 ffff88022383b188 ffff88022383b1c0
> Call Trace:
> [<ffffffff8172a363>] dump_stack+0x4e/0x82
> [<ffffffff81726741>] print_circular_bug+0x200/0x20f
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810c6439>] ? get_lock_stats+0x19/0x60
> [<ffffffff8100b2f4>] ? native_sched_clock+0x24/0x80
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff8109bc8f>] ? local_clock+0x3f/0x50
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff810c9af5>] ? trace_hardirqs_on_caller+0x115/0x1e0
> [<ffffffff810c9bcd>] ? trace_hardirqs_on+0xd/0x10
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
The underlying problem is that perf is invoking call_rcu() with the
scheduler locks held, but in NOCB mode, call_rcu() will with high
probability invoke the scheduler -- which just might want to use its
locks. The reason that call_rcu() needs to invoke the scheduler is
to wake up the corresponding rcuo callback-offload kthread, which
does the job of starting up a grace period and invoking the callbacks
afterwards.
One solution (championed on a related problem by Lai Jiangshan) is to
simply defer the wakeup to some point where scheduler locks are no longer
held. Since we don't want to unnecessarily incur the cost of such
deferral, the task before us is threefold:
1. Determine when it is likely that a relevant scheduler lock is held.
2. Defer the wakeup in such cases.
3. Ensure that all deferred wakeups eventually happen, preferably
sooner rather than later.
We use irqs_disabled_flags() as a proxy for relevant scheduler locks
being held. This works because the relevant locks are always acquired
with interrupts disabled. We may defer more often than needed, but that
is at least safe.
The wakeup deferral is tracked via a new field in the per-CPU and
per-RCU-flavor rcu_data structure, namely ->nocb_defer_wakeup.
This flag is checked by the RCU core processing. The __rcu_pending()
function now checks this flag, which causes rcu_check_callbacks()
to initiate RCU core processing at each scheduling-clock interrupt
where this flag is set. Of course this is not sufficient because
scheduling-clock interrupts are often turned off (the things we used to
be able to count on!). So the flags are also checked on entry to any
state that RCU considers to be idle, which includes both NO_HZ_IDLE idle
state and NO_HZ_FULL user-mode-execution state.
This approach should allow call_rcu() to be invoked regardless of what
locks you might be holding, the key word being "should".
Reported-by: Dave Jones <davej@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
2013-10-05 05:33:34 +08:00
|
|
|
if (!rcu_nocb_need_deferred_wakeup(rdp))
|
|
|
|
return;
|
2015-03-04 06:57:58 +08:00
|
|
|
ndw = READ_ONCE(rdp->nocb_defer_wakeup);
|
|
|
|
WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOGP_WAKE_NOT);
|
2014-07-30 05:50:47 +08:00
|
|
|
wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
|
|
|
|
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
|
rcu: Break call_rcu() deadlock involving scheduler and perf
Dave Jones got the following lockdep splat:
> ======================================================
> [ INFO: possible circular locking dependency detected ]
> 3.12.0-rc3+ #92 Not tainted
> -------------------------------------------------------
> trinity-child2/15191 is trying to acquire lock:
> (&rdp->nocb_wq){......}, at: [<ffffffff8108ff43>] __wake_up+0x23/0x50
>
> but task is already holding lock:
> (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> which lock already depends on the new lock.
>
>
> the existing dependency chain (in reverse order) is:
>
> -> #3 (&ctx->lock){-.-...}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff811500ff>] __perf_event_task_sched_out+0x2df/0x5e0
> [<ffffffff81091b83>] perf_event_task_sched_out+0x93/0xa0
> [<ffffffff81732052>] __schedule+0x1d2/0xa20
> [<ffffffff81732f30>] preempt_schedule_irq+0x50/0xb0
> [<ffffffff817352b6>] retint_kernel+0x26/0x30
> [<ffffffff813eed04>] tty_flip_buffer_push+0x34/0x50
> [<ffffffff813f0504>] pty_write+0x54/0x60
> [<ffffffff813e900d>] n_tty_write+0x32d/0x4e0
> [<ffffffff813e5838>] tty_write+0x158/0x2d0
> [<ffffffff811c4850>] vfs_write+0xc0/0x1f0
> [<ffffffff811c52cc>] SyS_write+0x4c/0xa0
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> -> #2 (&rq->lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff810980b2>] wake_up_new_task+0xc2/0x2e0
> [<ffffffff81054336>] do_fork+0x126/0x460
> [<ffffffff81054696>] kernel_thread+0x26/0x30
> [<ffffffff8171ff93>] rest_init+0x23/0x140
> [<ffffffff81ee1e4b>] start_kernel+0x3f6/0x403
> [<ffffffff81ee1571>] x86_64_start_reservations+0x2a/0x2c
> [<ffffffff81ee1664>] x86_64_start_kernel+0xf1/0xf4
>
> -> #1 (&p->pi_lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff810979d1>] try_to_wake_up+0x31/0x350
> [<ffffffff81097d62>] default_wake_function+0x12/0x20
> [<ffffffff81084af8>] autoremove_wake_function+0x18/0x40
> [<ffffffff8108ea38>] __wake_up_common+0x58/0x90
> [<ffffffff8108ff59>] __wake_up+0x39/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111b8d>] call_rcu+0x1d/0x20
> [<ffffffff81093697>] cpu_attach_domain+0x287/0x360
> [<ffffffff81099d7e>] build_sched_domains+0xe5e/0x10a0
> [<ffffffff81efa7fc>] sched_init_smp+0x3b7/0x47a
> [<ffffffff81ee1f4e>] kernel_init_freeable+0xf6/0x202
> [<ffffffff817200be>] kernel_init+0xe/0x190
> [<ffffffff8173d22c>] ret_from_fork+0x7c/0xb0
>
> -> #0 (&rdp->nocb_wq){......}:
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> other info that might help us debug this:
>
> Chain exists of:
> &rdp->nocb_wq --> &rq->lock --> &ctx->lock
>
> Possible unsafe locking scenario:
>
> CPU0 CPU1
> ---- ----
> lock(&ctx->lock);
> lock(&rq->lock);
> lock(&ctx->lock);
> lock(&rdp->nocb_wq);
>
> *** DEADLOCK ***
>
> 1 lock held by trinity-child2/15191:
> #0: (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> stack backtrace:
> CPU: 2 PID: 15191 Comm: trinity-child2 Not tainted 3.12.0-rc3+ #92
> ffffffff82565b70 ffff880070c2dbf8 ffffffff8172a363 ffffffff824edf40
> ffff880070c2dc38 ffffffff81726741 ffff880070c2dc90 ffff88022383b1c0
> ffff88022383aac0 0000000000000000 ffff88022383b188 ffff88022383b1c0
> Call Trace:
> [<ffffffff8172a363>] dump_stack+0x4e/0x82
> [<ffffffff81726741>] print_circular_bug+0x200/0x20f
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810c6439>] ? get_lock_stats+0x19/0x60
> [<ffffffff8100b2f4>] ? native_sched_clock+0x24/0x80
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff8109bc8f>] ? local_clock+0x3f/0x50
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff810c9af5>] ? trace_hardirqs_on_caller+0x115/0x1e0
> [<ffffffff810c9bcd>] ? trace_hardirqs_on+0xd/0x10
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
The underlying problem is that perf is invoking call_rcu() with the
scheduler locks held, but in NOCB mode, call_rcu() will with high
probability invoke the scheduler -- which just might want to use its
locks. The reason that call_rcu() needs to invoke the scheduler is
to wake up the corresponding rcuo callback-offload kthread, which
does the job of starting up a grace period and invoking the callbacks
afterwards.
One solution (championed on a related problem by Lai Jiangshan) is to
simply defer the wakeup to some point where scheduler locks are no longer
held. Since we don't want to unnecessarily incur the cost of such
deferral, the task before us is threefold:
1. Determine when it is likely that a relevant scheduler lock is held.
2. Defer the wakeup in such cases.
3. Ensure that all deferred wakeups eventually happen, preferably
sooner rather than later.
We use irqs_disabled_flags() as a proxy for relevant scheduler locks
being held. This works because the relevant locks are always acquired
with interrupts disabled. We may defer more often than needed, but that
is at least safe.
The wakeup deferral is tracked via a new field in the per-CPU and
per-RCU-flavor rcu_data structure, namely ->nocb_defer_wakeup.
This flag is checked by the RCU core processing. The __rcu_pending()
function now checks this flag, which causes rcu_check_callbacks()
to initiate RCU core processing at each scheduling-clock interrupt
where this flag is set. Of course this is not sufficient because
scheduling-clock interrupts are often turned off (the things we used to
be able to count on!). So the flags are also checked on entry to any
state that RCU considers to be idle, which includes both NO_HZ_IDLE idle
state and NO_HZ_FULL user-mode-execution state.
This approach should allow call_rcu() to be invoked regardless of what
locks you might be holding, the key word being "should".
Reported-by: Dave Jones <davej@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
2013-10-05 05:33:34 +08:00
|
|
|
}
|
|
|
|
|
2014-07-26 02:21:47 +08:00
|
|
|
void __init rcu_init_nohz(void)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
bool need_rcu_nocb_mask = true;
|
|
|
|
struct rcu_state *rsp;
|
|
|
|
|
|
|
|
#ifdef CONFIG_RCU_NOCB_CPU_NONE
|
|
|
|
need_rcu_nocb_mask = false;
|
|
|
|
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */
|
|
|
|
|
|
|
|
#if defined(CONFIG_NO_HZ_FULL)
|
|
|
|
if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
|
|
|
|
need_rcu_nocb_mask = true;
|
|
|
|
#endif /* #if defined(CONFIG_NO_HZ_FULL) */
|
|
|
|
|
|
|
|
if (!have_rcu_nocb_mask && need_rcu_nocb_mask) {
|
2014-07-26 07:02:07 +08:00
|
|
|
if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
|
|
|
|
pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
|
|
|
|
return;
|
|
|
|
}
|
2014-07-26 02:21:47 +08:00
|
|
|
have_rcu_nocb_mask = true;
|
|
|
|
}
|
|
|
|
if (!have_rcu_nocb_mask)
|
|
|
|
return;
|
|
|
|
|
|
|
|
#ifdef CONFIG_RCU_NOCB_CPU_ZERO
|
|
|
|
pr_info("\tOffload RCU callbacks from CPU 0\n");
|
|
|
|
cpumask_set_cpu(0, rcu_nocb_mask);
|
|
|
|
#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */
|
|
|
|
#ifdef CONFIG_RCU_NOCB_CPU_ALL
|
|
|
|
pr_info("\tOffload RCU callbacks from all CPUs\n");
|
|
|
|
cpumask_copy(rcu_nocb_mask, cpu_possible_mask);
|
|
|
|
#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */
|
|
|
|
#if defined(CONFIG_NO_HZ_FULL)
|
|
|
|
if (tick_nohz_full_running)
|
|
|
|
cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
|
|
|
|
#endif /* #if defined(CONFIG_NO_HZ_FULL) */
|
|
|
|
|
|
|
|
if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
|
|
|
|
pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n");
|
|
|
|
cpumask_and(rcu_nocb_mask, cpu_possible_mask,
|
|
|
|
rcu_nocb_mask);
|
|
|
|
}
|
2015-02-14 06:37:25 +08:00
|
|
|
pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
|
|
|
|
cpumask_pr_args(rcu_nocb_mask));
|
2014-07-26 02:21:47 +08:00
|
|
|
if (rcu_nocb_poll)
|
|
|
|
pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
|
|
|
|
|
|
|
|
for_each_rcu_flavor(rsp) {
|
2015-01-20 12:39:20 +08:00
|
|
|
for_each_cpu(cpu, rcu_nocb_mask)
|
|
|
|
init_nocb_callback_list(per_cpu_ptr(rsp->rda, cpu));
|
2014-07-12 02:30:24 +08:00
|
|
|
rcu_organize_nocb_kthreads(rsp);
|
2014-07-26 02:21:47 +08:00
|
|
|
}
|
rcu: Break call_rcu() deadlock involving scheduler and perf
Dave Jones got the following lockdep splat:
> ======================================================
> [ INFO: possible circular locking dependency detected ]
> 3.12.0-rc3+ #92 Not tainted
> -------------------------------------------------------
> trinity-child2/15191 is trying to acquire lock:
> (&rdp->nocb_wq){......}, at: [<ffffffff8108ff43>] __wake_up+0x23/0x50
>
> but task is already holding lock:
> (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> which lock already depends on the new lock.
>
>
> the existing dependency chain (in reverse order) is:
>
> -> #3 (&ctx->lock){-.-...}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff811500ff>] __perf_event_task_sched_out+0x2df/0x5e0
> [<ffffffff81091b83>] perf_event_task_sched_out+0x93/0xa0
> [<ffffffff81732052>] __schedule+0x1d2/0xa20
> [<ffffffff81732f30>] preempt_schedule_irq+0x50/0xb0
> [<ffffffff817352b6>] retint_kernel+0x26/0x30
> [<ffffffff813eed04>] tty_flip_buffer_push+0x34/0x50
> [<ffffffff813f0504>] pty_write+0x54/0x60
> [<ffffffff813e900d>] n_tty_write+0x32d/0x4e0
> [<ffffffff813e5838>] tty_write+0x158/0x2d0
> [<ffffffff811c4850>] vfs_write+0xc0/0x1f0
> [<ffffffff811c52cc>] SyS_write+0x4c/0xa0
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> -> #2 (&rq->lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff810980b2>] wake_up_new_task+0xc2/0x2e0
> [<ffffffff81054336>] do_fork+0x126/0x460
> [<ffffffff81054696>] kernel_thread+0x26/0x30
> [<ffffffff8171ff93>] rest_init+0x23/0x140
> [<ffffffff81ee1e4b>] start_kernel+0x3f6/0x403
> [<ffffffff81ee1571>] x86_64_start_reservations+0x2a/0x2c
> [<ffffffff81ee1664>] x86_64_start_kernel+0xf1/0xf4
>
> -> #1 (&p->pi_lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff810979d1>] try_to_wake_up+0x31/0x350
> [<ffffffff81097d62>] default_wake_function+0x12/0x20
> [<ffffffff81084af8>] autoremove_wake_function+0x18/0x40
> [<ffffffff8108ea38>] __wake_up_common+0x58/0x90
> [<ffffffff8108ff59>] __wake_up+0x39/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111b8d>] call_rcu+0x1d/0x20
> [<ffffffff81093697>] cpu_attach_domain+0x287/0x360
> [<ffffffff81099d7e>] build_sched_domains+0xe5e/0x10a0
> [<ffffffff81efa7fc>] sched_init_smp+0x3b7/0x47a
> [<ffffffff81ee1f4e>] kernel_init_freeable+0xf6/0x202
> [<ffffffff817200be>] kernel_init+0xe/0x190
> [<ffffffff8173d22c>] ret_from_fork+0x7c/0xb0
>
> -> #0 (&rdp->nocb_wq){......}:
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> other info that might help us debug this:
>
> Chain exists of:
> &rdp->nocb_wq --> &rq->lock --> &ctx->lock
>
> Possible unsafe locking scenario:
>
> CPU0 CPU1
> ---- ----
> lock(&ctx->lock);
> lock(&rq->lock);
> lock(&ctx->lock);
> lock(&rdp->nocb_wq);
>
> *** DEADLOCK ***
>
> 1 lock held by trinity-child2/15191:
> #0: (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> stack backtrace:
> CPU: 2 PID: 15191 Comm: trinity-child2 Not tainted 3.12.0-rc3+ #92
> ffffffff82565b70 ffff880070c2dbf8 ffffffff8172a363 ffffffff824edf40
> ffff880070c2dc38 ffffffff81726741 ffff880070c2dc90 ffff88022383b1c0
> ffff88022383aac0 0000000000000000 ffff88022383b188 ffff88022383b1c0
> Call Trace:
> [<ffffffff8172a363>] dump_stack+0x4e/0x82
> [<ffffffff81726741>] print_circular_bug+0x200/0x20f
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810c6439>] ? get_lock_stats+0x19/0x60
> [<ffffffff8100b2f4>] ? native_sched_clock+0x24/0x80
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff8109bc8f>] ? local_clock+0x3f/0x50
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff810c9af5>] ? trace_hardirqs_on_caller+0x115/0x1e0
> [<ffffffff810c9bcd>] ? trace_hardirqs_on+0xd/0x10
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
The underlying problem is that perf is invoking call_rcu() with the
scheduler locks held, but in NOCB mode, call_rcu() will with high
probability invoke the scheduler -- which just might want to use its
locks. The reason that call_rcu() needs to invoke the scheduler is
to wake up the corresponding rcuo callback-offload kthread, which
does the job of starting up a grace period and invoking the callbacks
afterwards.
One solution (championed on a related problem by Lai Jiangshan) is to
simply defer the wakeup to some point where scheduler locks are no longer
held. Since we don't want to unnecessarily incur the cost of such
deferral, the task before us is threefold:
1. Determine when it is likely that a relevant scheduler lock is held.
2. Defer the wakeup in such cases.
3. Ensure that all deferred wakeups eventually happen, preferably
sooner rather than later.
We use irqs_disabled_flags() as a proxy for relevant scheduler locks
being held. This works because the relevant locks are always acquired
with interrupts disabled. We may defer more often than needed, but that
is at least safe.
The wakeup deferral is tracked via a new field in the per-CPU and
per-RCU-flavor rcu_data structure, namely ->nocb_defer_wakeup.
This flag is checked by the RCU core processing. The __rcu_pending()
function now checks this flag, which causes rcu_check_callbacks()
to initiate RCU core processing at each scheduling-clock interrupt
where this flag is set. Of course this is not sufficient because
scheduling-clock interrupts are often turned off (the things we used to
be able to count on!). So the flags are also checked on entry to any
state that RCU considers to be idle, which includes both NO_HZ_IDLE idle
state and NO_HZ_FULL user-mode-execution state.
This approach should allow call_rcu() to be invoked regardless of what
locks you might be holding, the key word being "should".
Reported-by: Dave Jones <davej@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
2013-10-05 05:33:34 +08:00
|
|
|
}
|
|
|
|
|
2012-08-20 12:35:53 +08:00
|
|
|
/* Initialize per-rcu_data variables for no-CBs CPUs. */
|
|
|
|
static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
|
|
|
|
{
|
|
|
|
rdp->nocb_tail = &rdp->nocb_head;
|
|
|
|
init_waitqueue_head(&rdp->nocb_wq);
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
rdp->nocb_follower_tail = &rdp->nocb_follower_head;
|
2012-08-20 12:35:53 +08:00
|
|
|
}
|
|
|
|
|
2014-07-12 02:30:24 +08:00
|
|
|
/*
|
|
|
|
* If the specified CPU is a no-CBs CPU that does not already have its
|
|
|
|
* rcuo kthread for the specified RCU flavor, spawn it. If the CPUs are
|
|
|
|
* brought online out of order, this can require re-organizing the
|
|
|
|
* leader-follower relationships.
|
|
|
|
*/
|
|
|
|
static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
|
|
|
|
{
|
|
|
|
struct rcu_data *rdp;
|
|
|
|
struct rcu_data *rdp_last;
|
|
|
|
struct rcu_data *rdp_old_leader;
|
|
|
|
struct rcu_data *rdp_spawn = per_cpu_ptr(rsp->rda, cpu);
|
|
|
|
struct task_struct *t;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this isn't a no-CBs CPU or if it already has an rcuo kthread,
|
|
|
|
* then nothing to do.
|
|
|
|
*/
|
|
|
|
if (!rcu_is_nocb_cpu(cpu) || rdp_spawn->nocb_kthread)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* If we didn't spawn the leader first, reorganize! */
|
|
|
|
rdp_old_leader = rdp_spawn->nocb_leader;
|
|
|
|
if (rdp_old_leader != rdp_spawn && !rdp_old_leader->nocb_kthread) {
|
|
|
|
rdp_last = NULL;
|
|
|
|
rdp = rdp_old_leader;
|
|
|
|
do {
|
|
|
|
rdp->nocb_leader = rdp_spawn;
|
|
|
|
if (rdp_last && rdp != rdp_spawn)
|
|
|
|
rdp_last->nocb_next_follower = rdp;
|
2014-10-25 02:11:08 +08:00
|
|
|
if (rdp == rdp_spawn) {
|
|
|
|
rdp = rdp->nocb_next_follower;
|
|
|
|
} else {
|
|
|
|
rdp_last = rdp;
|
|
|
|
rdp = rdp->nocb_next_follower;
|
|
|
|
rdp_last->nocb_next_follower = NULL;
|
|
|
|
}
|
2014-07-12 02:30:24 +08:00
|
|
|
} while (rdp);
|
|
|
|
rdp_spawn->nocb_next_follower = rdp_old_leader;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Spawn the kthread for this CPU and RCU flavor. */
|
|
|
|
t = kthread_run(rcu_nocb_kthread, rdp_spawn,
|
|
|
|
"rcuo%c/%d", rsp->abbr, cpu);
|
|
|
|
BUG_ON(IS_ERR(t));
|
2015-03-04 06:57:58 +08:00
|
|
|
WRITE_ONCE(rdp_spawn->nocb_kthread, t);
|
2014-07-12 02:30:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the specified CPU is a no-CBs CPU that does not already have its
|
|
|
|
* rcuo kthreads, spawn them.
|
|
|
|
*/
|
|
|
|
static void rcu_spawn_all_nocb_kthreads(int cpu)
|
|
|
|
{
|
|
|
|
struct rcu_state *rsp;
|
|
|
|
|
|
|
|
if (rcu_scheduler_fully_active)
|
|
|
|
for_each_rcu_flavor(rsp)
|
|
|
|
rcu_spawn_one_nocb_kthread(rsp, cpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Once the scheduler is running, spawn rcuo kthreads for all online
|
|
|
|
* no-CBs CPUs. This assumes that the early_initcall()s happen before
|
|
|
|
* non-boot CPUs come online -- if this changes, we will need to add
|
|
|
|
* some mutual exclusion.
|
|
|
|
*/
|
|
|
|
static void __init rcu_spawn_nocb_kthreads(void)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
for_each_online_cpu(cpu)
|
|
|
|
rcu_spawn_all_nocb_kthreads(cpu);
|
|
|
|
}
|
|
|
|
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
/* How many follower CPU IDs per leader? Default of -1 for sqrt(nr_cpu_ids). */
|
|
|
|
static int rcu_nocb_leader_stride = -1;
|
|
|
|
module_param(rcu_nocb_leader_stride, int, 0444);
|
|
|
|
|
|
|
|
/*
|
2014-07-12 02:30:24 +08:00
|
|
|
* Initialize leader-follower relationships for all no-CBs CPU.
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
*/
|
2014-07-12 02:30:24 +08:00
|
|
|
static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp)
|
2012-08-20 12:35:53 +08:00
|
|
|
{
|
|
|
|
int cpu;
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
int ls = rcu_nocb_leader_stride;
|
|
|
|
int nl = 0; /* Next leader. */
|
2012-08-20 12:35:53 +08:00
|
|
|
struct rcu_data *rdp;
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
struct rcu_data *rdp_leader = NULL; /* Suppress misguided gcc warn. */
|
|
|
|
struct rcu_data *rdp_prev = NULL;
|
2012-08-20 12:35:53 +08:00
|
|
|
|
2014-07-18 08:11:01 +08:00
|
|
|
if (!have_rcu_nocb_mask)
|
2012-08-20 12:35:53 +08:00
|
|
|
return;
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
if (ls == -1) {
|
|
|
|
ls = int_sqrt(nr_cpu_ids);
|
|
|
|
rcu_nocb_leader_stride = ls;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Each pass through this loop sets up one rcu_data structure and
|
|
|
|
* spawns one rcu_nocb_kthread().
|
|
|
|
*/
|
2012-08-20 12:35:53 +08:00
|
|
|
for_each_cpu(cpu, rcu_nocb_mask) {
|
|
|
|
rdp = per_cpu_ptr(rsp->rda, cpu);
|
rcu: Parallelize and economize NOCB kthread wakeups
An 80-CPU system with a context-switch-heavy workload can require so
many NOCB kthread wakeups that the RCU grace-period kthreads spend several
tens of percent of a CPU just awakening things. This clearly will not
scale well: If you add enough CPUs, the RCU grace-period kthreads would
get behind, increasing grace-period latency.
To avoid this problem, this commit divides the NOCB kthreads into leaders
and followers, where the grace-period kthreads awaken the leaders each of
whom in turn awakens its followers. By default, the number of groups of
kthreads is the square root of the number of CPUs, but this default may
be overridden using the rcutree.rcu_nocb_leader_stride boot parameter.
This reduces the number of wakeups done per grace period by the RCU
grace-period kthread by the square root of the number of CPUs, but of
course by shifting those wakeups to the leaders. In addition, because
the leaders do grace periods on behalf of their respective followers,
the number of wakeups of the followers decreases by up to a factor of two.
Instead of being awakened once when new callbacks arrive and again
at the end of the grace period, the followers are awakened only at
the end of the grace period.
For a numerical example, in a 4096-CPU system, the grace-period kthread
would awaken 64 leaders, each of which would awaken its 63 followers
at the end of the grace period. This compares favorably with the 79
wakeups for the grace-period kthread on an 80-CPU system.
Reported-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2014-06-25 00:26:11 +08:00
|
|
|
if (rdp->cpu >= nl) {
|
|
|
|
/* New leader, set up for followers & next leader. */
|
|
|
|
nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
|
|
|
|
rdp->nocb_leader = rdp;
|
|
|
|
rdp_leader = rdp;
|
|
|
|
} else {
|
|
|
|
/* Another follower, link to previous leader. */
|
|
|
|
rdp->nocb_leader = rdp_leader;
|
|
|
|
rdp_prev->nocb_next_follower = rdp;
|
|
|
|
}
|
|
|
|
rdp_prev = rdp;
|
2012-08-20 12:35:53 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */
|
2013-01-08 05:37:42 +08:00
|
|
|
static bool init_nocb_callback_list(struct rcu_data *rdp)
|
2012-08-20 12:35:53 +08:00
|
|
|
{
|
2014-07-18 08:11:01 +08:00
|
|
|
if (!rcu_is_nocb_cpu(rdp->cpu))
|
2013-01-08 05:37:42 +08:00
|
|
|
return false;
|
2014-07-18 08:11:01 +08:00
|
|
|
|
2015-01-20 12:39:20 +08:00
|
|
|
/* If there are early-boot callbacks, move them to nocb lists. */
|
|
|
|
if (rdp->nxtlist) {
|
|
|
|
rdp->nocb_head = rdp->nxtlist;
|
|
|
|
rdp->nocb_tail = rdp->nxttail[RCU_NEXT_TAIL];
|
|
|
|
atomic_long_set(&rdp->nocb_q_count, rdp->qlen);
|
|
|
|
atomic_long_set(&rdp->nocb_q_count_lazy, rdp->qlen_lazy);
|
|
|
|
rdp->nxtlist = NULL;
|
|
|
|
rdp->qlen = 0;
|
|
|
|
rdp->qlen_lazy = 0;
|
|
|
|
}
|
2012-08-20 12:35:53 +08:00
|
|
|
rdp->nxttail[RCU_NEXT_TAIL] = NULL;
|
2013-01-08 05:37:42 +08:00
|
|
|
return true;
|
2012-08-20 12:35:53 +08:00
|
|
|
}
|
|
|
|
|
2013-01-08 05:37:42 +08:00
|
|
|
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
|
|
|
|
|
rcu: Make rcu_barrier() understand about missing rcuo kthreads
Commit 35ce7f29a44a (rcu: Create rcuo kthreads only for onlined CPUs)
avoids creating rcuo kthreads for CPUs that never come online. This
fixes a bug in many instances of firmware: Instead of lying about their
age, these systems instead lie about the number of CPUs that they have.
Before commit 35ce7f29a44a, this could result in huge numbers of useless
rcuo kthreads being created.
It appears that experience indicates that I should have told the
people suffering from this problem to fix their broken firmware, but
I instead produced what turned out to be a partial fix. The missing
piece supplied by this commit makes sure that rcu_barrier() knows not to
post callbacks for no-CBs CPUs that have not yet come online, because
otherwise rcu_barrier() will hang on systems having firmware that lies
about the number of CPUs.
It is tempting to simply have rcu_barrier() refuse to post a callback on
any no-CBs CPU that does not have an rcuo kthread. This unfortunately
does not work because rcu_barrier() is required to wait for all pending
callbacks. It is therefore required to wait even for those callbacks
that cannot possibly be invoked. Even if doing so hangs the system.
Given that posting a callback to a no-CBs CPU that does not yet have an
rcuo kthread can hang rcu_barrier(), It is tempting to report an error
in this case. Unfortunately, this will result in false positives at
boot time, when it is perfectly legal to post callbacks to the boot CPU
before the scheduler has started, in other words, before it is legal
to invoke rcu_barrier().
So this commit instead has rcu_barrier() avoid posting callbacks to
CPUs having neither rcuo kthread nor pending callbacks, and has it
complain bitterly if it finds CPUs having no rcuo kthread but some
pending callbacks. And when rcu_barrier() does find CPUs having no rcuo
kthread but pending callbacks, as noted earlier, it has no choice but
to hang indefinitely.
Reported-by: Yanko Kaneti <yaneti@declera.com>
Reported-by: Jay Vosburgh <jay.vosburgh@canonical.com>
Reported-by: Meelis Roos <mroos@linux.ee>
Reported-by: Eric B Munson <emunson@akamai.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Eric B Munson <emunson@akamai.com>
Tested-by: Jay Vosburgh <jay.vosburgh@canonical.com>
Tested-by: Yanko Kaneti <yaneti@declera.com>
Tested-by: Kevin Fenzi <kevin@scrye.com>
Tested-by: Meelis Roos <mroos@linux.ee>
2014-10-28 00:15:54 +08:00
|
|
|
static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
|
|
|
|
{
|
|
|
|
WARN_ON_ONCE(1); /* Should be dead code. */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-12-31 07:21:01 +08:00
|
|
|
static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
|
2012-08-20 12:35:53 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2013-02-11 12:48:58 +08:00
|
|
|
static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rcu_init_one_nocb(struct rcu_node *rnp)
|
|
|
|
{
|
|
|
|
}
|
2012-08-20 12:35:53 +08:00
|
|
|
|
|
|
|
static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
|
rcu: Break call_rcu() deadlock involving scheduler and perf
Dave Jones got the following lockdep splat:
> ======================================================
> [ INFO: possible circular locking dependency detected ]
> 3.12.0-rc3+ #92 Not tainted
> -------------------------------------------------------
> trinity-child2/15191 is trying to acquire lock:
> (&rdp->nocb_wq){......}, at: [<ffffffff8108ff43>] __wake_up+0x23/0x50
>
> but task is already holding lock:
> (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> which lock already depends on the new lock.
>
>
> the existing dependency chain (in reverse order) is:
>
> -> #3 (&ctx->lock){-.-...}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff811500ff>] __perf_event_task_sched_out+0x2df/0x5e0
> [<ffffffff81091b83>] perf_event_task_sched_out+0x93/0xa0
> [<ffffffff81732052>] __schedule+0x1d2/0xa20
> [<ffffffff81732f30>] preempt_schedule_irq+0x50/0xb0
> [<ffffffff817352b6>] retint_kernel+0x26/0x30
> [<ffffffff813eed04>] tty_flip_buffer_push+0x34/0x50
> [<ffffffff813f0504>] pty_write+0x54/0x60
> [<ffffffff813e900d>] n_tty_write+0x32d/0x4e0
> [<ffffffff813e5838>] tty_write+0x158/0x2d0
> [<ffffffff811c4850>] vfs_write+0xc0/0x1f0
> [<ffffffff811c52cc>] SyS_write+0x4c/0xa0
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> -> #2 (&rq->lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff810980b2>] wake_up_new_task+0xc2/0x2e0
> [<ffffffff81054336>] do_fork+0x126/0x460
> [<ffffffff81054696>] kernel_thread+0x26/0x30
> [<ffffffff8171ff93>] rest_init+0x23/0x140
> [<ffffffff81ee1e4b>] start_kernel+0x3f6/0x403
> [<ffffffff81ee1571>] x86_64_start_reservations+0x2a/0x2c
> [<ffffffff81ee1664>] x86_64_start_kernel+0xf1/0xf4
>
> -> #1 (&p->pi_lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff810979d1>] try_to_wake_up+0x31/0x350
> [<ffffffff81097d62>] default_wake_function+0x12/0x20
> [<ffffffff81084af8>] autoremove_wake_function+0x18/0x40
> [<ffffffff8108ea38>] __wake_up_common+0x58/0x90
> [<ffffffff8108ff59>] __wake_up+0x39/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111b8d>] call_rcu+0x1d/0x20
> [<ffffffff81093697>] cpu_attach_domain+0x287/0x360
> [<ffffffff81099d7e>] build_sched_domains+0xe5e/0x10a0
> [<ffffffff81efa7fc>] sched_init_smp+0x3b7/0x47a
> [<ffffffff81ee1f4e>] kernel_init_freeable+0xf6/0x202
> [<ffffffff817200be>] kernel_init+0xe/0x190
> [<ffffffff8173d22c>] ret_from_fork+0x7c/0xb0
>
> -> #0 (&rdp->nocb_wq){......}:
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> other info that might help us debug this:
>
> Chain exists of:
> &rdp->nocb_wq --> &rq->lock --> &ctx->lock
>
> Possible unsafe locking scenario:
>
> CPU0 CPU1
> ---- ----
> lock(&ctx->lock);
> lock(&rq->lock);
> lock(&ctx->lock);
> lock(&rdp->nocb_wq);
>
> *** DEADLOCK ***
>
> 1 lock held by trinity-child2/15191:
> #0: (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> stack backtrace:
> CPU: 2 PID: 15191 Comm: trinity-child2 Not tainted 3.12.0-rc3+ #92
> ffffffff82565b70 ffff880070c2dbf8 ffffffff8172a363 ffffffff824edf40
> ffff880070c2dc38 ffffffff81726741 ffff880070c2dc90 ffff88022383b1c0
> ffff88022383aac0 0000000000000000 ffff88022383b188 ffff88022383b1c0
> Call Trace:
> [<ffffffff8172a363>] dump_stack+0x4e/0x82
> [<ffffffff81726741>] print_circular_bug+0x200/0x20f
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810c6439>] ? get_lock_stats+0x19/0x60
> [<ffffffff8100b2f4>] ? native_sched_clock+0x24/0x80
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff8109bc8f>] ? local_clock+0x3f/0x50
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff810c9af5>] ? trace_hardirqs_on_caller+0x115/0x1e0
> [<ffffffff810c9bcd>] ? trace_hardirqs_on+0xd/0x10
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
The underlying problem is that perf is invoking call_rcu() with the
scheduler locks held, but in NOCB mode, call_rcu() will with high
probability invoke the scheduler -- which just might want to use its
locks. The reason that call_rcu() needs to invoke the scheduler is
to wake up the corresponding rcuo callback-offload kthread, which
does the job of starting up a grace period and invoking the callbacks
afterwards.
One solution (championed on a related problem by Lai Jiangshan) is to
simply defer the wakeup to some point where scheduler locks are no longer
held. Since we don't want to unnecessarily incur the cost of such
deferral, the task before us is threefold:
1. Determine when it is likely that a relevant scheduler lock is held.
2. Defer the wakeup in such cases.
3. Ensure that all deferred wakeups eventually happen, preferably
sooner rather than later.
We use irqs_disabled_flags() as a proxy for relevant scheduler locks
being held. This works because the relevant locks are always acquired
with interrupts disabled. We may defer more often than needed, but that
is at least safe.
The wakeup deferral is tracked via a new field in the per-CPU and
per-RCU-flavor rcu_data structure, namely ->nocb_defer_wakeup.
This flag is checked by the RCU core processing. The __rcu_pending()
function now checks this flag, which causes rcu_check_callbacks()
to initiate RCU core processing at each scheduling-clock interrupt
where this flag is set. Of course this is not sufficient because
scheduling-clock interrupts are often turned off (the things we used to
be able to count on!). So the flags are also checked on entry to any
state that RCU considers to be idle, which includes both NO_HZ_IDLE idle
state and NO_HZ_FULL user-mode-execution state.
This approach should allow call_rcu() to be invoked regardless of what
locks you might be holding, the key word being "should".
Reported-by: Dave Jones <davej@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
2013-10-05 05:33:34 +08:00
|
|
|
bool lazy, unsigned long flags)
|
2012-08-20 12:35:53 +08:00
|
|
|
{
|
2014-07-09 06:26:16 +08:00
|
|
|
return false;
|
2012-08-20 12:35:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
|
rcu: Break call_rcu() deadlock involving scheduler and perf
Dave Jones got the following lockdep splat:
> ======================================================
> [ INFO: possible circular locking dependency detected ]
> 3.12.0-rc3+ #92 Not tainted
> -------------------------------------------------------
> trinity-child2/15191 is trying to acquire lock:
> (&rdp->nocb_wq){......}, at: [<ffffffff8108ff43>] __wake_up+0x23/0x50
>
> but task is already holding lock:
> (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> which lock already depends on the new lock.
>
>
> the existing dependency chain (in reverse order) is:
>
> -> #3 (&ctx->lock){-.-...}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff811500ff>] __perf_event_task_sched_out+0x2df/0x5e0
> [<ffffffff81091b83>] perf_event_task_sched_out+0x93/0xa0
> [<ffffffff81732052>] __schedule+0x1d2/0xa20
> [<ffffffff81732f30>] preempt_schedule_irq+0x50/0xb0
> [<ffffffff817352b6>] retint_kernel+0x26/0x30
> [<ffffffff813eed04>] tty_flip_buffer_push+0x34/0x50
> [<ffffffff813f0504>] pty_write+0x54/0x60
> [<ffffffff813e900d>] n_tty_write+0x32d/0x4e0
> [<ffffffff813e5838>] tty_write+0x158/0x2d0
> [<ffffffff811c4850>] vfs_write+0xc0/0x1f0
> [<ffffffff811c52cc>] SyS_write+0x4c/0xa0
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> -> #2 (&rq->lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff810980b2>] wake_up_new_task+0xc2/0x2e0
> [<ffffffff81054336>] do_fork+0x126/0x460
> [<ffffffff81054696>] kernel_thread+0x26/0x30
> [<ffffffff8171ff93>] rest_init+0x23/0x140
> [<ffffffff81ee1e4b>] start_kernel+0x3f6/0x403
> [<ffffffff81ee1571>] x86_64_start_reservations+0x2a/0x2c
> [<ffffffff81ee1664>] x86_64_start_kernel+0xf1/0xf4
>
> -> #1 (&p->pi_lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff810979d1>] try_to_wake_up+0x31/0x350
> [<ffffffff81097d62>] default_wake_function+0x12/0x20
> [<ffffffff81084af8>] autoremove_wake_function+0x18/0x40
> [<ffffffff8108ea38>] __wake_up_common+0x58/0x90
> [<ffffffff8108ff59>] __wake_up+0x39/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111b8d>] call_rcu+0x1d/0x20
> [<ffffffff81093697>] cpu_attach_domain+0x287/0x360
> [<ffffffff81099d7e>] build_sched_domains+0xe5e/0x10a0
> [<ffffffff81efa7fc>] sched_init_smp+0x3b7/0x47a
> [<ffffffff81ee1f4e>] kernel_init_freeable+0xf6/0x202
> [<ffffffff817200be>] kernel_init+0xe/0x190
> [<ffffffff8173d22c>] ret_from_fork+0x7c/0xb0
>
> -> #0 (&rdp->nocb_wq){......}:
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> other info that might help us debug this:
>
> Chain exists of:
> &rdp->nocb_wq --> &rq->lock --> &ctx->lock
>
> Possible unsafe locking scenario:
>
> CPU0 CPU1
> ---- ----
> lock(&ctx->lock);
> lock(&rq->lock);
> lock(&ctx->lock);
> lock(&rdp->nocb_wq);
>
> *** DEADLOCK ***
>
> 1 lock held by trinity-child2/15191:
> #0: (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> stack backtrace:
> CPU: 2 PID: 15191 Comm: trinity-child2 Not tainted 3.12.0-rc3+ #92
> ffffffff82565b70 ffff880070c2dbf8 ffffffff8172a363 ffffffff824edf40
> ffff880070c2dc38 ffffffff81726741 ffff880070c2dc90 ffff88022383b1c0
> ffff88022383aac0 0000000000000000 ffff88022383b188 ffff88022383b1c0
> Call Trace:
> [<ffffffff8172a363>] dump_stack+0x4e/0x82
> [<ffffffff81726741>] print_circular_bug+0x200/0x20f
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810c6439>] ? get_lock_stats+0x19/0x60
> [<ffffffff8100b2f4>] ? native_sched_clock+0x24/0x80
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff8109bc8f>] ? local_clock+0x3f/0x50
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff810c9af5>] ? trace_hardirqs_on_caller+0x115/0x1e0
> [<ffffffff810c9bcd>] ? trace_hardirqs_on+0xd/0x10
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
The underlying problem is that perf is invoking call_rcu() with the
scheduler locks held, but in NOCB mode, call_rcu() will with high
probability invoke the scheduler -- which just might want to use its
locks. The reason that call_rcu() needs to invoke the scheduler is
to wake up the corresponding rcuo callback-offload kthread, which
does the job of starting up a grace period and invoking the callbacks
afterwards.
One solution (championed on a related problem by Lai Jiangshan) is to
simply defer the wakeup to some point where scheduler locks are no longer
held. Since we don't want to unnecessarily incur the cost of such
deferral, the task before us is threefold:
1. Determine when it is likely that a relevant scheduler lock is held.
2. Defer the wakeup in such cases.
3. Ensure that all deferred wakeups eventually happen, preferably
sooner rather than later.
We use irqs_disabled_flags() as a proxy for relevant scheduler locks
being held. This works because the relevant locks are always acquired
with interrupts disabled. We may defer more often than needed, but that
is at least safe.
The wakeup deferral is tracked via a new field in the per-CPU and
per-RCU-flavor rcu_data structure, namely ->nocb_defer_wakeup.
This flag is checked by the RCU core processing. The __rcu_pending()
function now checks this flag, which causes rcu_check_callbacks()
to initiate RCU core processing at each scheduling-clock interrupt
where this flag is set. Of course this is not sufficient because
scheduling-clock interrupts are often turned off (the things we used to
be able to count on!). So the flags are also checked on entry to any
state that RCU considers to be idle, which includes both NO_HZ_IDLE idle
state and NO_HZ_FULL user-mode-execution state.
This approach should allow call_rcu() to be invoked regardless of what
locks you might be holding, the key word being "should".
Reported-by: Dave Jones <davej@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
2013-10-05 05:33:34 +08:00
|
|
|
struct rcu_data *rdp,
|
|
|
|
unsigned long flags)
|
2012-08-20 12:35:53 +08:00
|
|
|
{
|
2014-07-09 06:26:17 +08:00
|
|
|
return false;
|
2012-08-20 12:35:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2014-07-30 05:50:47 +08:00
|
|
|
static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
|
rcu: Break call_rcu() deadlock involving scheduler and perf
Dave Jones got the following lockdep splat:
> ======================================================
> [ INFO: possible circular locking dependency detected ]
> 3.12.0-rc3+ #92 Not tainted
> -------------------------------------------------------
> trinity-child2/15191 is trying to acquire lock:
> (&rdp->nocb_wq){......}, at: [<ffffffff8108ff43>] __wake_up+0x23/0x50
>
> but task is already holding lock:
> (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> which lock already depends on the new lock.
>
>
> the existing dependency chain (in reverse order) is:
>
> -> #3 (&ctx->lock){-.-...}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff811500ff>] __perf_event_task_sched_out+0x2df/0x5e0
> [<ffffffff81091b83>] perf_event_task_sched_out+0x93/0xa0
> [<ffffffff81732052>] __schedule+0x1d2/0xa20
> [<ffffffff81732f30>] preempt_schedule_irq+0x50/0xb0
> [<ffffffff817352b6>] retint_kernel+0x26/0x30
> [<ffffffff813eed04>] tty_flip_buffer_push+0x34/0x50
> [<ffffffff813f0504>] pty_write+0x54/0x60
> [<ffffffff813e900d>] n_tty_write+0x32d/0x4e0
> [<ffffffff813e5838>] tty_write+0x158/0x2d0
> [<ffffffff811c4850>] vfs_write+0xc0/0x1f0
> [<ffffffff811c52cc>] SyS_write+0x4c/0xa0
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> -> #2 (&rq->lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80
> [<ffffffff810980b2>] wake_up_new_task+0xc2/0x2e0
> [<ffffffff81054336>] do_fork+0x126/0x460
> [<ffffffff81054696>] kernel_thread+0x26/0x30
> [<ffffffff8171ff93>] rest_init+0x23/0x140
> [<ffffffff81ee1e4b>] start_kernel+0x3f6/0x403
> [<ffffffff81ee1571>] x86_64_start_reservations+0x2a/0x2c
> [<ffffffff81ee1664>] x86_64_start_kernel+0xf1/0xf4
>
> -> #1 (&p->pi_lock){-.-.-.}:
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff810979d1>] try_to_wake_up+0x31/0x350
> [<ffffffff81097d62>] default_wake_function+0x12/0x20
> [<ffffffff81084af8>] autoremove_wake_function+0x18/0x40
> [<ffffffff8108ea38>] __wake_up_common+0x58/0x90
> [<ffffffff8108ff59>] __wake_up+0x39/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111b8d>] call_rcu+0x1d/0x20
> [<ffffffff81093697>] cpu_attach_domain+0x287/0x360
> [<ffffffff81099d7e>] build_sched_domains+0xe5e/0x10a0
> [<ffffffff81efa7fc>] sched_init_smp+0x3b7/0x47a
> [<ffffffff81ee1f4e>] kernel_init_freeable+0xf6/0x202
> [<ffffffff817200be>] kernel_init+0xe/0x190
> [<ffffffff8173d22c>] ret_from_fork+0x7c/0xb0
>
> -> #0 (&rdp->nocb_wq){......}:
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
>
> other info that might help us debug this:
>
> Chain exists of:
> &rdp->nocb_wq --> &rq->lock --> &ctx->lock
>
> Possible unsafe locking scenario:
>
> CPU0 CPU1
> ---- ----
> lock(&ctx->lock);
> lock(&rq->lock);
> lock(&ctx->lock);
> lock(&rdp->nocb_wq);
>
> *** DEADLOCK ***
>
> 1 lock held by trinity-child2/15191:
> #0: (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230
>
> stack backtrace:
> CPU: 2 PID: 15191 Comm: trinity-child2 Not tainted 3.12.0-rc3+ #92
> ffffffff82565b70 ffff880070c2dbf8 ffffffff8172a363 ffffffff824edf40
> ffff880070c2dc38 ffffffff81726741 ffff880070c2dc90 ffff88022383b1c0
> ffff88022383aac0 0000000000000000 ffff88022383b188 ffff88022383b1c0
> Call Trace:
> [<ffffffff8172a363>] dump_stack+0x4e/0x82
> [<ffffffff81726741>] print_circular_bug+0x200/0x20f
> [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0
> [<ffffffff810c6439>] ? get_lock_stats+0x19/0x60
> [<ffffffff8100b2f4>] ? native_sched_clock+0x24/0x80
> [<ffffffff810cc243>] lock_acquire+0x93/0x200
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90
> [<ffffffff8108ff43>] ? __wake_up+0x23/0x50
> [<ffffffff8108ff43>] __wake_up+0x23/0x50
> [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0
> [<ffffffff81111450>] __call_rcu+0x140/0x820
> [<ffffffff8109bc8f>] ? local_clock+0x3f/0x50
> [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30
> [<ffffffff81149abf>] put_ctx+0x4f/0x70
> [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230
> [<ffffffff81056b8d>] do_exit+0x30d/0xcc0
> [<ffffffff810c9af5>] ? trace_hardirqs_on_caller+0x115/0x1e0
> [<ffffffff810c9bcd>] ? trace_hardirqs_on+0xd/0x10
> [<ffffffff8105893c>] do_group_exit+0x4c/0xc0
> [<ffffffff810589c4>] SyS_exit_group+0x14/0x20
> [<ffffffff8173d4e4>] tracesys+0xdd/0xe2
The underlying problem is that perf is invoking call_rcu() with the
scheduler locks held, but in NOCB mode, call_rcu() will with high
probability invoke the scheduler -- which just might want to use its
locks. The reason that call_rcu() needs to invoke the scheduler is
to wake up the corresponding rcuo callback-offload kthread, which
does the job of starting up a grace period and invoking the callbacks
afterwards.
One solution (championed on a related problem by Lai Jiangshan) is to
simply defer the wakeup to some point where scheduler locks are no longer
held. Since we don't want to unnecessarily incur the cost of such
deferral, the task before us is threefold:
1. Determine when it is likely that a relevant scheduler lock is held.
2. Defer the wakeup in such cases.
3. Ensure that all deferred wakeups eventually happen, preferably
sooner rather than later.
We use irqs_disabled_flags() as a proxy for relevant scheduler locks
being held. This works because the relevant locks are always acquired
with interrupts disabled. We may defer more often than needed, but that
is at least safe.
The wakeup deferral is tracked via a new field in the per-CPU and
per-RCU-flavor rcu_data structure, namely ->nocb_defer_wakeup.
This flag is checked by the RCU core processing. The __rcu_pending()
function now checks this flag, which causes rcu_check_callbacks()
to initiate RCU core processing at each scheduling-clock interrupt
where this flag is set. Of course this is not sufficient because
scheduling-clock interrupts are often turned off (the things we used to
be able to count on!). So the flags are also checked on entry to any
state that RCU considers to be idle, which includes both NO_HZ_IDLE idle
state and NO_HZ_FULL user-mode-execution state.
This approach should allow call_rcu() to be invoked regardless of what
locks you might be holding, the key word being "should".
Reported-by: Dave Jones <davej@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
2013-10-05 05:33:34 +08:00
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2014-07-12 02:30:24 +08:00
|
|
|
static void rcu_spawn_all_nocb_kthreads(int cpu)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init rcu_spawn_nocb_kthreads(void)
|
2012-08-20 12:35:53 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2013-01-08 05:37:42 +08:00
|
|
|
static bool init_nocb_callback_list(struct rcu_data *rdp)
|
2012-08-20 12:35:53 +08:00
|
|
|
{
|
2013-01-08 05:37:42 +08:00
|
|
|
return false;
|
2012-08-20 12:35:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
|
2013-04-13 07:19:10 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* An adaptive-ticks CPU can potentially execute in kernel mode for an
|
|
|
|
* arbitrarily long period of time with the scheduling-clock tick turned
|
|
|
|
* off. RCU will be paying attention to this CPU because it is in the
|
|
|
|
* kernel, but the CPU cannot be guaranteed to be executing the RCU state
|
|
|
|
* machine because the scheduling-clock tick has been disabled. Therefore,
|
|
|
|
* if an adaptive-ticks CPU is failing to respond to the current grace
|
|
|
|
* period and has not be idle from an RCU perspective, kick it.
|
|
|
|
*/
|
2014-06-21 07:49:01 +08:00
|
|
|
static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
|
2013-04-13 07:19:10 +08:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_NO_HZ_FULL
|
|
|
|
if (tick_nohz_full_cpu(cpu))
|
|
|
|
smp_send_reschedule(cpu);
|
|
|
|
#endif /* #ifdef CONFIG_NO_HZ_FULL */
|
|
|
|
}
|
nohz_full: Add rcu_dyntick data for scalable detection of all-idle state
This commit adds fields to the rcu_dyntick structure that are used to
detect idle CPUs. These new fields differ from the existing ones in
that the existing ones consider a CPU executing in user mode to be idle,
where the new ones consider CPUs executing in user mode to be busy.
The handling of these new fields is otherwise quite similar to that for
the exiting fields. This commit also adds the initialization required
for these fields.
So, why is usermode execution treated differently, with RCU considering
it a quiescent state equivalent to idle, while in contrast the new
full-system idle state detection considers usermode execution to be
non-idle?
It turns out that although one of RCU's quiescent states is usermode
execution, it is not a full-system idle state. This is because the
purpose of the full-system idle state is not RCU, but rather determining
when accurate timekeeping can safely be disabled. Whenever accurate
timekeeping is required in a CONFIG_NO_HZ_FULL kernel, at least one
CPU must keep the scheduling-clock tick going. If even one CPU is
executing in user mode, accurate timekeeping is requires, particularly for
architectures where gettimeofday() and friends do not enter the kernel.
Only when all CPUs are really and truly idle can accurate timekeeping be
disabled, allowing all CPUs to turn off the scheduling clock interrupt,
thus greatly improving energy efficiency.
This naturally raises the question "Why is this code in RCU rather than in
timekeeping?", and the answer is that RCU has the data and infrastructure
to efficiently make this determination.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
2013-06-22 03:34:33 +08:00
|
|
|
|
|
|
|
|
|
|
|
#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
|
|
|
|
|
2013-06-22 07:37:22 +08:00
|
|
|
static int full_sysidle_state; /* Current system-idle state. */
|
2013-06-22 05:51:40 +08:00
|
|
|
#define RCU_SYSIDLE_NOT 0 /* Some CPU is not idle. */
|
|
|
|
#define RCU_SYSIDLE_SHORT 1 /* All CPUs idle for brief period. */
|
|
|
|
#define RCU_SYSIDLE_LONG 2 /* All CPUs idle for long enough. */
|
|
|
|
#define RCU_SYSIDLE_FULL 3 /* All CPUs idle, ready for sysidle. */
|
|
|
|
#define RCU_SYSIDLE_FULL_NOTED 4 /* Actually entered sysidle state. */
|
|
|
|
|
2013-06-22 04:00:57 +08:00
|
|
|
/*
|
|
|
|
* Invoked to note exit from irq or task transition to idle. Note that
|
|
|
|
* usermode execution does -not- count as idle here! After all, we want
|
|
|
|
* to detect full-system idle states, not RCU quiescent states and grace
|
|
|
|
* periods. The caller must have disabled interrupts.
|
|
|
|
*/
|
2014-09-03 05:13:44 +08:00
|
|
|
static void rcu_sysidle_enter(int irq)
|
2013-06-22 04:00:57 +08:00
|
|
|
{
|
|
|
|
unsigned long j;
|
2014-09-03 05:13:44 +08:00
|
|
|
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
2013-06-22 04:00:57 +08:00
|
|
|
|
2014-07-22 02:34:33 +08:00
|
|
|
/* If there are no nohz_full= CPUs, no need to track this. */
|
|
|
|
if (!tick_nohz_full_enabled())
|
|
|
|
return;
|
|
|
|
|
2013-06-22 04:00:57 +08:00
|
|
|
/* Adjust nesting, check for fully idle. */
|
|
|
|
if (irq) {
|
|
|
|
rdtp->dynticks_idle_nesting--;
|
|
|
|
WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0);
|
|
|
|
if (rdtp->dynticks_idle_nesting != 0)
|
|
|
|
return; /* Still not fully idle. */
|
|
|
|
} else {
|
|
|
|
if ((rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) ==
|
|
|
|
DYNTICK_TASK_NEST_VALUE) {
|
|
|
|
rdtp->dynticks_idle_nesting = 0;
|
|
|
|
} else {
|
|
|
|
rdtp->dynticks_idle_nesting -= DYNTICK_TASK_NEST_VALUE;
|
|
|
|
WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0);
|
|
|
|
return; /* Still not fully idle. */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Record start of fully idle period. */
|
|
|
|
j = jiffies;
|
2015-03-04 06:57:58 +08:00
|
|
|
WRITE_ONCE(rdtp->dynticks_idle_jiffies, j);
|
2014-03-18 01:06:10 +08:00
|
|
|
smp_mb__before_atomic();
|
2013-06-22 04:00:57 +08:00
|
|
|
atomic_inc(&rdtp->dynticks_idle);
|
2014-03-18 01:06:10 +08:00
|
|
|
smp_mb__after_atomic();
|
2013-06-22 04:00:57 +08:00
|
|
|
WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
|
|
|
|
}
|
|
|
|
|
2013-06-22 07:37:22 +08:00
|
|
|
/*
|
|
|
|
* Unconditionally force exit from full system-idle state. This is
|
|
|
|
* invoked when a normal CPU exits idle, but must be called separately
|
|
|
|
* for the timekeeping CPU (tick_do_timer_cpu). The reason for this
|
|
|
|
* is that the timekeeping CPU is permitted to take scheduling-clock
|
|
|
|
* interrupts while the system is in system-idle state, and of course
|
|
|
|
* rcu_sysidle_exit() has no way of distinguishing a scheduling-clock
|
|
|
|
* interrupt from any other type of interrupt.
|
|
|
|
*/
|
|
|
|
void rcu_sysidle_force_exit(void)
|
|
|
|
{
|
2015-03-04 06:57:58 +08:00
|
|
|
int oldstate = READ_ONCE(full_sysidle_state);
|
2013-06-22 07:37:22 +08:00
|
|
|
int newoldstate;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Each pass through the following loop attempts to exit full
|
|
|
|
* system-idle state. If contention proves to be a problem,
|
|
|
|
* a trylock-based contention tree could be used here.
|
|
|
|
*/
|
|
|
|
while (oldstate > RCU_SYSIDLE_SHORT) {
|
|
|
|
newoldstate = cmpxchg(&full_sysidle_state,
|
|
|
|
oldstate, RCU_SYSIDLE_NOT);
|
|
|
|
if (oldstate == newoldstate &&
|
|
|
|
oldstate == RCU_SYSIDLE_FULL_NOTED) {
|
|
|
|
rcu_kick_nohz_cpu(tick_do_timer_cpu);
|
|
|
|
return; /* We cleared it, done! */
|
|
|
|
}
|
|
|
|
oldstate = newoldstate;
|
|
|
|
}
|
|
|
|
smp_mb(); /* Order initial oldstate fetch vs. later non-idle work. */
|
|
|
|
}
|
|
|
|
|
2013-06-22 04:00:57 +08:00
|
|
|
/*
|
|
|
|
* Invoked to note entry to irq or task transition from idle. Note that
|
|
|
|
* usermode execution does -not- count as idle here! The caller must
|
|
|
|
* have disabled interrupts.
|
|
|
|
*/
|
2014-09-03 05:13:44 +08:00
|
|
|
static void rcu_sysidle_exit(int irq)
|
2013-06-22 04:00:57 +08:00
|
|
|
{
|
2014-09-03 05:13:44 +08:00
|
|
|
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
|
|
2014-07-22 02:34:33 +08:00
|
|
|
/* If there are no nohz_full= CPUs, no need to track this. */
|
|
|
|
if (!tick_nohz_full_enabled())
|
|
|
|
return;
|
|
|
|
|
2013-06-22 04:00:57 +08:00
|
|
|
/* Adjust nesting, check for already non-idle. */
|
|
|
|
if (irq) {
|
|
|
|
rdtp->dynticks_idle_nesting++;
|
|
|
|
WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0);
|
|
|
|
if (rdtp->dynticks_idle_nesting != 1)
|
|
|
|
return; /* Already non-idle. */
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Allow for irq misnesting. Yes, it really is possible
|
|
|
|
* to enter an irq handler then never leave it, and maybe
|
|
|
|
* also vice versa. Handle both possibilities.
|
|
|
|
*/
|
|
|
|
if (rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) {
|
|
|
|
rdtp->dynticks_idle_nesting += DYNTICK_TASK_NEST_VALUE;
|
|
|
|
WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0);
|
|
|
|
return; /* Already non-idle. */
|
|
|
|
} else {
|
|
|
|
rdtp->dynticks_idle_nesting = DYNTICK_TASK_EXIT_IDLE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Record end of idle period. */
|
2014-03-18 01:06:10 +08:00
|
|
|
smp_mb__before_atomic();
|
2013-06-22 04:00:57 +08:00
|
|
|
atomic_inc(&rdtp->dynticks_idle);
|
2014-03-18 01:06:10 +08:00
|
|
|
smp_mb__after_atomic();
|
2013-06-22 04:00:57 +08:00
|
|
|
WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
|
2013-06-22 07:37:22 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are the timekeeping CPU, we are permitted to be non-idle
|
|
|
|
* during a system-idle state. This must be the case, because
|
|
|
|
* the timekeeping CPU has to take scheduling-clock interrupts
|
|
|
|
* during the time that the system is transitioning to full
|
|
|
|
* system-idle state. This means that the timekeeping CPU must
|
|
|
|
* invoke rcu_sysidle_force_exit() directly if it does anything
|
|
|
|
* more than take a scheduling-clock interrupt.
|
|
|
|
*/
|
|
|
|
if (smp_processor_id() == tick_do_timer_cpu)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Update system-idle state: We are clearly no longer fully idle! */
|
|
|
|
rcu_sysidle_force_exit();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check to see if the current CPU is idle. Note that usermode execution
|
2015-02-25 03:05:36 +08:00
|
|
|
* does not count as idle. The caller must have disabled interrupts,
|
|
|
|
* and must be running on tick_do_timer_cpu.
|
2013-06-22 07:37:22 +08:00
|
|
|
*/
|
|
|
|
static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
|
|
|
|
unsigned long *maxj)
|
|
|
|
{
|
|
|
|
int cur;
|
|
|
|
unsigned long j;
|
|
|
|
struct rcu_dynticks *rdtp = rdp->dynticks;
|
|
|
|
|
2014-07-22 02:34:33 +08:00
|
|
|
/* If there are no nohz_full= CPUs, don't check system-wide idleness. */
|
|
|
|
if (!tick_nohz_full_enabled())
|
|
|
|
return;
|
|
|
|
|
2013-06-22 07:37:22 +08:00
|
|
|
/*
|
|
|
|
* If some other CPU has already reported non-idle, if this is
|
|
|
|
* not the flavor of RCU that tracks sysidle state, or if this
|
|
|
|
* is an offline or the timekeeping CPU, nothing to do.
|
|
|
|
*/
|
2014-07-22 02:26:54 +08:00
|
|
|
if (!*isidle || rdp->rsp != rcu_state_p ||
|
2013-06-22 07:37:22 +08:00
|
|
|
cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu)
|
|
|
|
return;
|
2015-02-25 03:05:36 +08:00
|
|
|
/* Verify affinity of current kthread. */
|
|
|
|
WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
|
2013-06-22 07:37:22 +08:00
|
|
|
|
|
|
|
/* Pick up current idle and NMI-nesting counter and check. */
|
|
|
|
cur = atomic_read(&rdtp->dynticks_idle);
|
|
|
|
if (cur & 0x1) {
|
|
|
|
*isidle = false; /* We are not idle! */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
smp_mb(); /* Read counters before timestamps. */
|
|
|
|
|
|
|
|
/* Pick up timestamps. */
|
2015-03-04 06:57:58 +08:00
|
|
|
j = READ_ONCE(rdtp->dynticks_idle_jiffies);
|
2013-06-22 07:37:22 +08:00
|
|
|
/* If this CPU entered idle more recently, update maxj timestamp. */
|
|
|
|
if (ULONG_CMP_LT(*maxj, j))
|
|
|
|
*maxj = j;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Is this the flavor of RCU that is handling full-system idle?
|
|
|
|
*/
|
|
|
|
static bool is_sysidle_rcu_state(struct rcu_state *rsp)
|
|
|
|
{
|
2014-07-22 02:26:54 +08:00
|
|
|
return rsp == rcu_state_p;
|
2013-06-22 07:37:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return a delay in jiffies based on the number of CPUs, rcu_node
|
|
|
|
* leaf fanout, and jiffies tick rate. The idea is to allow larger
|
|
|
|
* systems more time to transition to full-idle state in order to
|
|
|
|
* avoid the cache thrashing that otherwise occur on the state variable.
|
|
|
|
* Really small systems (less than a couple of tens of CPUs) should
|
|
|
|
* instead use a single global atomically incremented counter, and later
|
|
|
|
* versions of this will automatically reconfigure themselves accordingly.
|
|
|
|
*/
|
|
|
|
static unsigned long rcu_sysidle_delay(void)
|
|
|
|
{
|
|
|
|
if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
|
|
|
|
return 0;
|
|
|
|
return DIV_ROUND_UP(nr_cpu_ids * HZ, rcu_fanout_leaf * 1000);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Advance the full-system-idle state. This is invoked when all of
|
|
|
|
* the non-timekeeping CPUs are idle.
|
|
|
|
*/
|
|
|
|
static void rcu_sysidle(unsigned long j)
|
|
|
|
{
|
|
|
|
/* Check the current state. */
|
2015-03-04 06:57:58 +08:00
|
|
|
switch (READ_ONCE(full_sysidle_state)) {
|
2013-06-22 07:37:22 +08:00
|
|
|
case RCU_SYSIDLE_NOT:
|
|
|
|
|
|
|
|
/* First time all are idle, so note a short idle period. */
|
2015-03-04 06:57:58 +08:00
|
|
|
WRITE_ONCE(full_sysidle_state, RCU_SYSIDLE_SHORT);
|
2013-06-22 07:37:22 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case RCU_SYSIDLE_SHORT:
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Idle for a bit, time to advance to next state?
|
|
|
|
* cmpxchg failure means race with non-idle, let them win.
|
|
|
|
*/
|
|
|
|
if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay()))
|
|
|
|
(void)cmpxchg(&full_sysidle_state,
|
|
|
|
RCU_SYSIDLE_SHORT, RCU_SYSIDLE_LONG);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case RCU_SYSIDLE_LONG:
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do an additional check pass before advancing to full.
|
|
|
|
* cmpxchg failure means race with non-idle, let them win.
|
|
|
|
*/
|
|
|
|
if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay()))
|
|
|
|
(void)cmpxchg(&full_sysidle_state,
|
|
|
|
RCU_SYSIDLE_LONG, RCU_SYSIDLE_FULL);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Found a non-idle non-timekeeping CPU, so kick the system-idle state
|
|
|
|
* back to the beginning.
|
|
|
|
*/
|
|
|
|
static void rcu_sysidle_cancel(void)
|
|
|
|
{
|
|
|
|
smp_mb();
|
2014-04-08 04:34:07 +08:00
|
|
|
if (full_sysidle_state > RCU_SYSIDLE_SHORT)
|
2015-03-04 06:57:58 +08:00
|
|
|
WRITE_ONCE(full_sysidle_state, RCU_SYSIDLE_NOT);
|
2013-06-22 07:37:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Update the sysidle state based on the results of a force-quiescent-state
|
|
|
|
* scan of the CPUs' dyntick-idle state.
|
|
|
|
*/
|
|
|
|
static void rcu_sysidle_report(struct rcu_state *rsp, int isidle,
|
|
|
|
unsigned long maxj, bool gpkt)
|
|
|
|
{
|
2014-07-22 02:26:54 +08:00
|
|
|
if (rsp != rcu_state_p)
|
2013-06-22 07:37:22 +08:00
|
|
|
return; /* Wrong flavor, ignore. */
|
|
|
|
if (gpkt && nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
|
|
|
|
return; /* Running state machine from timekeeping CPU. */
|
|
|
|
if (isidle)
|
|
|
|
rcu_sysidle(maxj); /* More idle! */
|
|
|
|
else
|
|
|
|
rcu_sysidle_cancel(); /* Idle is over. */
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wrapper for rcu_sysidle_report() when called from the grace-period
|
|
|
|
* kthread's context.
|
|
|
|
*/
|
|
|
|
static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
|
|
|
|
unsigned long maxj)
|
|
|
|
{
|
2014-07-22 02:34:33 +08:00
|
|
|
/* If there are no nohz_full= CPUs, no need to track this. */
|
|
|
|
if (!tick_nohz_full_enabled())
|
|
|
|
return;
|
|
|
|
|
2013-06-22 07:37:22 +08:00
|
|
|
rcu_sysidle_report(rsp, isidle, maxj, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Callback and function for forcing an RCU grace period. */
|
|
|
|
struct rcu_sysidle_head {
|
|
|
|
struct rcu_head rh;
|
|
|
|
int inuse;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void rcu_sysidle_cb(struct rcu_head *rhp)
|
|
|
|
{
|
|
|
|
struct rcu_sysidle_head *rshp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The following memory barrier is needed to replace the
|
|
|
|
* memory barriers that would normally be in the memory
|
|
|
|
* allocator.
|
|
|
|
*/
|
|
|
|
smp_mb(); /* grace period precedes setting inuse. */
|
|
|
|
|
|
|
|
rshp = container_of(rhp, struct rcu_sysidle_head, rh);
|
2015-03-04 06:57:58 +08:00
|
|
|
WRITE_ONCE(rshp->inuse, 0);
|
2013-06-22 07:37:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check to see if the system is fully idle, other than the timekeeping CPU.
|
2014-07-22 02:34:33 +08:00
|
|
|
* The caller must have disabled interrupts. This is not intended to be
|
|
|
|
* called unless tick_nohz_full_enabled().
|
2013-06-22 07:37:22 +08:00
|
|
|
*/
|
|
|
|
bool rcu_sys_is_idle(void)
|
|
|
|
{
|
|
|
|
static struct rcu_sysidle_head rsh;
|
2015-03-04 06:57:58 +08:00
|
|
|
int rss = READ_ONCE(full_sysidle_state);
|
2013-06-22 07:37:22 +08:00
|
|
|
|
|
|
|
if (WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* Handle small-system case by doing a full scan of CPUs. */
|
|
|
|
if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) {
|
|
|
|
int oldrss = rss - 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* One pass to advance to each state up to _FULL.
|
|
|
|
* Give up if any pass fails to advance the state.
|
|
|
|
*/
|
|
|
|
while (rss < RCU_SYSIDLE_FULL && oldrss < rss) {
|
|
|
|
int cpu;
|
|
|
|
bool isidle = true;
|
|
|
|
unsigned long maxj = jiffies - ULONG_MAX / 4;
|
|
|
|
struct rcu_data *rdp;
|
|
|
|
|
|
|
|
/* Scan all the CPUs looking for nonidle CPUs. */
|
|
|
|
for_each_possible_cpu(cpu) {
|
2014-07-22 02:26:54 +08:00
|
|
|
rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
|
2013-06-22 07:37:22 +08:00
|
|
|
rcu_sysidle_check_cpu(rdp, &isidle, &maxj);
|
|
|
|
if (!isidle)
|
|
|
|
break;
|
|
|
|
}
|
2014-07-22 02:26:54 +08:00
|
|
|
rcu_sysidle_report(rcu_state_p, isidle, maxj, false);
|
2013-06-22 07:37:22 +08:00
|
|
|
oldrss = rss;
|
2015-03-04 06:57:58 +08:00
|
|
|
rss = READ_ONCE(full_sysidle_state);
|
2013-06-22 07:37:22 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If this is the first observation of an idle period, record it. */
|
|
|
|
if (rss == RCU_SYSIDLE_FULL) {
|
|
|
|
rss = cmpxchg(&full_sysidle_state,
|
|
|
|
RCU_SYSIDLE_FULL, RCU_SYSIDLE_FULL_NOTED);
|
|
|
|
return rss == RCU_SYSIDLE_FULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
smp_mb(); /* ensure rss load happens before later caller actions. */
|
|
|
|
|
|
|
|
/* If already fully idle, tell the caller (in case of races). */
|
|
|
|
if (rss == RCU_SYSIDLE_FULL_NOTED)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we aren't there yet, and a grace period is not in flight,
|
|
|
|
* initiate a grace period. Either way, tell the caller that
|
|
|
|
* we are not there yet. We use an xchg() rather than an assignment
|
|
|
|
* to make up for the memory barriers that would otherwise be
|
|
|
|
* provided by the memory allocator.
|
|
|
|
*/
|
|
|
|
if (nr_cpu_ids > CONFIG_NO_HZ_FULL_SYSIDLE_SMALL &&
|
2014-07-22 02:26:54 +08:00
|
|
|
!rcu_gp_in_progress(rcu_state_p) &&
|
2013-06-22 07:37:22 +08:00
|
|
|
!rsh.inuse && xchg(&rsh.inuse, 1) == 0)
|
|
|
|
call_rcu(&rsh.rh, rcu_sysidle_cb);
|
|
|
|
return false;
|
2013-06-22 04:00:57 +08:00
|
|
|
}
|
|
|
|
|
nohz_full: Add rcu_dyntick data for scalable detection of all-idle state
This commit adds fields to the rcu_dyntick structure that are used to
detect idle CPUs. These new fields differ from the existing ones in
that the existing ones consider a CPU executing in user mode to be idle,
where the new ones consider CPUs executing in user mode to be busy.
The handling of these new fields is otherwise quite similar to that for
the exiting fields. This commit also adds the initialization required
for these fields.
So, why is usermode execution treated differently, with RCU considering
it a quiescent state equivalent to idle, while in contrast the new
full-system idle state detection considers usermode execution to be
non-idle?
It turns out that although one of RCU's quiescent states is usermode
execution, it is not a full-system idle state. This is because the
purpose of the full-system idle state is not RCU, but rather determining
when accurate timekeeping can safely be disabled. Whenever accurate
timekeeping is required in a CONFIG_NO_HZ_FULL kernel, at least one
CPU must keep the scheduling-clock tick going. If even one CPU is
executing in user mode, accurate timekeeping is requires, particularly for
architectures where gettimeofday() and friends do not enter the kernel.
Only when all CPUs are really and truly idle can accurate timekeeping be
disabled, allowing all CPUs to turn off the scheduling clock interrupt,
thus greatly improving energy efficiency.
This naturally raises the question "Why is this code in RCU rather than in
timekeeping?", and the answer is that RCU has the data and infrastructure
to efficiently make this determination.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
2013-06-22 03:34:33 +08:00
|
|
|
/*
|
|
|
|
* Initialize dynticks sysidle state for CPUs coming online.
|
|
|
|
*/
|
|
|
|
static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
|
|
|
|
{
|
|
|
|
rdtp->dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
|
|
|
|
|
2014-09-03 05:13:44 +08:00
|
|
|
static void rcu_sysidle_enter(int irq)
|
2013-06-22 04:00:57 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2014-09-03 05:13:44 +08:00
|
|
|
static void rcu_sysidle_exit(int irq)
|
2013-06-22 04:00:57 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2013-06-22 07:37:22 +08:00
|
|
|
static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
|
|
|
|
unsigned long *maxj)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool is_sysidle_rcu_state(struct rcu_state *rsp)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
|
|
|
|
unsigned long maxj)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
nohz_full: Add rcu_dyntick data for scalable detection of all-idle state
This commit adds fields to the rcu_dyntick structure that are used to
detect idle CPUs. These new fields differ from the existing ones in
that the existing ones consider a CPU executing in user mode to be idle,
where the new ones consider CPUs executing in user mode to be busy.
The handling of these new fields is otherwise quite similar to that for
the exiting fields. This commit also adds the initialization required
for these fields.
So, why is usermode execution treated differently, with RCU considering
it a quiescent state equivalent to idle, while in contrast the new
full-system idle state detection considers usermode execution to be
non-idle?
It turns out that although one of RCU's quiescent states is usermode
execution, it is not a full-system idle state. This is because the
purpose of the full-system idle state is not RCU, but rather determining
when accurate timekeeping can safely be disabled. Whenever accurate
timekeeping is required in a CONFIG_NO_HZ_FULL kernel, at least one
CPU must keep the scheduling-clock tick going. If even one CPU is
executing in user mode, accurate timekeeping is requires, particularly for
architectures where gettimeofday() and friends do not enter the kernel.
Only when all CPUs are really and truly idle can accurate timekeeping be
disabled, allowing all CPUs to turn off the scheduling clock interrupt,
thus greatly improving energy efficiency.
This naturally raises the question "Why is this code in RCU rather than in
timekeeping?", and the answer is that RCU has the data and infrastructure
to efficiently make this determination.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
2013-06-22 03:34:33 +08:00
|
|
|
static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
|
2013-11-09 01:03:10 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
|
|
|
|
* grace-period kthread will do force_quiescent_state() processing?
|
|
|
|
* The idea is to avoid waking up RCU core processing on such a
|
|
|
|
* CPU unless the grace period has extended for too long.
|
|
|
|
*
|
|
|
|
* This code relies on the fact that all NO_HZ_FULL CPUs are also
|
2014-02-09 21:35:11 +08:00
|
|
|
* CONFIG_RCU_NOCB_CPU CPUs.
|
2013-11-09 01:03:10 +08:00
|
|
|
*/
|
|
|
|
static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_NO_HZ_FULL
|
|
|
|
if (tick_nohz_full_cpu(smp_processor_id()) &&
|
|
|
|
(!rcu_gp_in_progress(rsp) ||
|
2015-03-04 06:57:58 +08:00
|
|
|
ULONG_CMP_LT(jiffies, READ_ONCE(rsp->gp_start) + HZ)))
|
2013-11-09 01:03:10 +08:00
|
|
|
return 1;
|
|
|
|
#endif /* #ifdef CONFIG_NO_HZ_FULL */
|
|
|
|
return 0;
|
|
|
|
}
|
2014-04-02 02:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Bind the grace-period kthread for the sysidle flavor of RCU to the
|
|
|
|
* timekeeping CPU.
|
|
|
|
*/
|
|
|
|
static void rcu_bind_gp_kthread(void)
|
|
|
|
{
|
2014-06-05 04:46:03 +08:00
|
|
|
int __maybe_unused cpu;
|
2014-04-02 02:20:36 +08:00
|
|
|
|
2014-06-05 04:46:03 +08:00
|
|
|
if (!tick_nohz_full_enabled())
|
2014-04-02 02:20:36 +08:00
|
|
|
return;
|
2014-06-05 04:46:03 +08:00
|
|
|
#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
|
|
|
|
cpu = tick_do_timer_cpu;
|
2015-02-25 03:05:36 +08:00
|
|
|
if (cpu >= 0 && cpu < nr_cpu_ids)
|
2014-04-02 02:20:36 +08:00
|
|
|
set_cpus_allowed_ptr(current, cpumask_of(cpu));
|
2014-06-05 04:46:03 +08:00
|
|
|
#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
|
2015-02-25 03:05:36 +08:00
|
|
|
housekeeping_affine(current);
|
2014-06-05 04:46:03 +08:00
|
|
|
#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
|
2014-04-02 02:20:36 +08:00
|
|
|
}
|
2014-08-05 08:43:50 +08:00
|
|
|
|
|
|
|
/* Record the current task on dyntick-idle entry. */
|
|
|
|
static void rcu_dynticks_task_enter(void)
|
|
|
|
{
|
|
|
|
#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
|
2015-03-04 06:57:58 +08:00
|
|
|
WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id());
|
2014-08-05 08:43:50 +08:00
|
|
|
#endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Record no current task on dyntick-idle exit. */
|
|
|
|
static void rcu_dynticks_task_exit(void)
|
|
|
|
{
|
|
|
|
#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
|
2015-03-04 06:57:58 +08:00
|
|
|
WRITE_ONCE(current->rcu_tasks_idle_cpu, -1);
|
2014-08-05 08:43:50 +08:00
|
|
|
#endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
|
|
|
|
}
|