mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 12:44:11 +08:00
rcu/context_tracking: Move dynticks counter to context tracking
In order to prepare for merging RCU dynticks counter into the context tracking state, move the rcu_data's dynticks field to the context tracking structure. It will later be mixed within the context tracking state itself. [ paulmck: Move enum ctx_state into global scope. ] Acked-by: Paul E. McKenney <paulmck@kernel.org> Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Neeraj Upadhyay <quic_neeraju@quicinc.com> Cc: Uladzislau Rezki <uladzislau.rezki@sony.com> Cc: Joel Fernandes <joel@joelfernandes.org> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Nicolas Saenz Julienne <nsaenz@kernel.org> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Xiongfeng Wang <wangxiongfeng2@huawei.com> Cc: Yu Liao <liaoyu15@huawei.com> Cc: Phil Auld <pauld@redhat.com> Cc: Paul Gortmaker<paul.gortmaker@windriver.com> Cc: Alex Belits <abelits@marvell.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Reviewed-by: Nicolas Saenz Julienne <nsaenzju@redhat.com> Tested-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
This commit is contained in:
parent
3864caafe7
commit
62e2412df4
@ -6,7 +6,15 @@
|
|||||||
#include <linux/static_key.h>
|
#include <linux/static_key.h>
|
||||||
#include <linux/context_tracking_irq.h>
|
#include <linux/context_tracking_irq.h>
|
||||||
|
|
||||||
|
enum ctx_state {
|
||||||
|
CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */
|
||||||
|
CONTEXT_KERNEL = 0,
|
||||||
|
CONTEXT_USER,
|
||||||
|
CONTEXT_GUEST,
|
||||||
|
};
|
||||||
|
|
||||||
struct context_tracking {
|
struct context_tracking {
|
||||||
|
#ifdef CONFIG_CONTEXT_TRACKING_USER
|
||||||
/*
|
/*
|
||||||
* When active is false, probes are unset in order
|
* When active is false, probes are unset in order
|
||||||
* to minimize overhead: TIF flags are cleared
|
* to minimize overhead: TIF flags are cleared
|
||||||
@ -15,17 +23,40 @@ struct context_tracking {
|
|||||||
*/
|
*/
|
||||||
bool active;
|
bool active;
|
||||||
int recursion;
|
int recursion;
|
||||||
enum ctx_state {
|
enum ctx_state state;
|
||||||
CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */
|
#endif
|
||||||
CONTEXT_KERNEL = 0,
|
#ifdef CONFIG_CONTEXT_TRACKING_IDLE
|
||||||
CONTEXT_USER,
|
atomic_t dynticks; /* Even value for idle, else odd. */
|
||||||
CONTEXT_GUEST,
|
#endif
|
||||||
} state;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#ifdef CONFIG_CONTEXT_TRACKING
|
||||||
|
DECLARE_PER_CPU(struct context_tracking, context_tracking);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_CONTEXT_TRACKING_IDLE
|
||||||
|
static __always_inline int ct_dynticks(void)
|
||||||
|
{
|
||||||
|
return atomic_read(this_cpu_ptr(&context_tracking.dynticks));
|
||||||
|
}
|
||||||
|
|
||||||
|
static __always_inline int ct_dynticks_cpu(int cpu)
|
||||||
|
{
|
||||||
|
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
|
||||||
|
|
||||||
|
return atomic_read(&ct->dynticks);
|
||||||
|
}
|
||||||
|
|
||||||
|
static __always_inline int ct_dynticks_cpu_acquire(int cpu)
|
||||||
|
{
|
||||||
|
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
|
||||||
|
|
||||||
|
return atomic_read_acquire(&ct->dynticks);
|
||||||
|
}
|
||||||
|
#endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */
|
||||||
|
|
||||||
#ifdef CONFIG_CONTEXT_TRACKING_USER
|
#ifdef CONFIG_CONTEXT_TRACKING_USER
|
||||||
extern struct static_key_false context_tracking_key;
|
extern struct static_key_false context_tracking_key;
|
||||||
DECLARE_PER_CPU(struct context_tracking, context_tracking);
|
|
||||||
|
|
||||||
static __always_inline bool context_tracking_enabled(void)
|
static __always_inline bool context_tracking_enabled(void)
|
||||||
{
|
{
|
||||||
|
@ -23,6 +23,13 @@
|
|||||||
#include <linux/kprobes.h>
|
#include <linux/kprobes.h>
|
||||||
|
|
||||||
|
|
||||||
|
DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
|
||||||
|
#ifdef CONFIG_CONTEXT_TRACKING_IDLE
|
||||||
|
.dynticks = ATOMIC_INIT(1),
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
EXPORT_SYMBOL_GPL(context_tracking);
|
||||||
|
|
||||||
#ifdef CONFIG_CONTEXT_TRACKING_IDLE
|
#ifdef CONFIG_CONTEXT_TRACKING_IDLE
|
||||||
noinstr void ct_idle_enter(void)
|
noinstr void ct_idle_enter(void)
|
||||||
{
|
{
|
||||||
@ -138,9 +145,6 @@ noinstr void ct_nmi_exit(void)
|
|||||||
DEFINE_STATIC_KEY_FALSE(context_tracking_key);
|
DEFINE_STATIC_KEY_FALSE(context_tracking_key);
|
||||||
EXPORT_SYMBOL_GPL(context_tracking_key);
|
EXPORT_SYMBOL_GPL(context_tracking_key);
|
||||||
|
|
||||||
DEFINE_PER_CPU(struct context_tracking, context_tracking);
|
|
||||||
EXPORT_SYMBOL_GPL(context_tracking);
|
|
||||||
|
|
||||||
static noinstr bool context_tracking_recursion_enter(void)
|
static noinstr bool context_tracking_recursion_enter(void)
|
||||||
{
|
{
|
||||||
int recursion;
|
int recursion;
|
||||||
|
@ -77,7 +77,6 @@
|
|||||||
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
|
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
|
||||||
.dynticks_nesting = 1,
|
.dynticks_nesting = 1,
|
||||||
.dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
|
.dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
|
||||||
.dynticks = ATOMIC_INIT(1),
|
|
||||||
#ifdef CONFIG_RCU_NOCB_CPU
|
#ifdef CONFIG_RCU_NOCB_CPU
|
||||||
.cblist.flags = SEGCBLIST_RCU_CORE,
|
.cblist.flags = SEGCBLIST_RCU_CORE,
|
||||||
#endif
|
#endif
|
||||||
@ -268,7 +267,7 @@ void rcu_softirq_qs(void)
|
|||||||
*/
|
*/
|
||||||
static noinline noinstr unsigned long rcu_dynticks_inc(int incby)
|
static noinline noinstr unsigned long rcu_dynticks_inc(int incby)
|
||||||
{
|
{
|
||||||
return arch_atomic_add_return(incby, this_cpu_ptr(&rcu_data.dynticks));
|
return arch_atomic_add_return(incby, this_cpu_ptr(&context_tracking.dynticks));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -324,9 +323,7 @@ static noinstr void rcu_dynticks_eqs_exit(void)
|
|||||||
*/
|
*/
|
||||||
static void rcu_dynticks_eqs_online(void)
|
static void rcu_dynticks_eqs_online(void)
|
||||||
{
|
{
|
||||||
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
if (ct_dynticks() & 0x1)
|
||||||
|
|
||||||
if (atomic_read(&rdp->dynticks) & 0x1)
|
|
||||||
return;
|
return;
|
||||||
rcu_dynticks_inc(1);
|
rcu_dynticks_inc(1);
|
||||||
}
|
}
|
||||||
@ -338,17 +335,17 @@ static void rcu_dynticks_eqs_online(void)
|
|||||||
*/
|
*/
|
||||||
static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
|
static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
|
||||||
{
|
{
|
||||||
return !(arch_atomic_read(this_cpu_ptr(&rcu_data.dynticks)) & 0x1);
|
return !(arch_atomic_read(this_cpu_ptr(&context_tracking.dynticks)) & 0x1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Snapshot the ->dynticks counter with full ordering so as to allow
|
* Snapshot the ->dynticks counter with full ordering so as to allow
|
||||||
* stable comparison of this counter with past and future snapshots.
|
* stable comparison of this counter with past and future snapshots.
|
||||||
*/
|
*/
|
||||||
static int rcu_dynticks_snap(struct rcu_data *rdp)
|
static int rcu_dynticks_snap(int cpu)
|
||||||
{
|
{
|
||||||
smp_mb(); // Fundamental RCU ordering guarantee.
|
smp_mb(); // Fundamental RCU ordering guarantee.
|
||||||
return atomic_read_acquire(&rdp->dynticks);
|
return ct_dynticks_cpu_acquire(cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -363,9 +360,7 @@ static bool rcu_dynticks_in_eqs(int snap)
|
|||||||
/* Return true if the specified CPU is currently idle from an RCU viewpoint. */
|
/* Return true if the specified CPU is currently idle from an RCU viewpoint. */
|
||||||
bool rcu_is_idle_cpu(int cpu)
|
bool rcu_is_idle_cpu(int cpu)
|
||||||
{
|
{
|
||||||
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
return rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu));
|
||||||
|
|
||||||
return rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -375,7 +370,7 @@ bool rcu_is_idle_cpu(int cpu)
|
|||||||
*/
|
*/
|
||||||
static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
|
static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
|
||||||
{
|
{
|
||||||
return snap != rcu_dynticks_snap(rdp);
|
return snap != rcu_dynticks_snap(rdp->cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -384,11 +379,10 @@ static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
|
|||||||
*/
|
*/
|
||||||
bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
|
bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
|
||||||
{
|
{
|
||||||
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
|
||||||
int snap;
|
int snap;
|
||||||
|
|
||||||
// If not quiescent, force back to earlier extended quiescent state.
|
// If not quiescent, force back to earlier extended quiescent state.
|
||||||
snap = atomic_read(&rdp->dynticks) & ~0x1;
|
snap = ct_dynticks_cpu(cpu) & ~0x1;
|
||||||
|
|
||||||
smp_rmb(); // Order ->dynticks and *vp reads.
|
smp_rmb(); // Order ->dynticks and *vp reads.
|
||||||
if (READ_ONCE(*vp))
|
if (READ_ONCE(*vp))
|
||||||
@ -396,7 +390,7 @@ bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
|
|||||||
smp_rmb(); // Order *vp read and ->dynticks re-read.
|
smp_rmb(); // Order *vp read and ->dynticks re-read.
|
||||||
|
|
||||||
// If still in the same extended quiescent state, we are good!
|
// If still in the same extended quiescent state, we are good!
|
||||||
return snap == atomic_read(&rdp->dynticks);
|
return snap == ct_dynticks_cpu(cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -620,6 +614,7 @@ EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
|
|||||||
static noinstr void rcu_eqs_enter(bool user)
|
static noinstr void rcu_eqs_enter(bool user)
|
||||||
{
|
{
|
||||||
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
||||||
|
struct context_tracking *ct = this_cpu_ptr(&context_tracking);
|
||||||
|
|
||||||
WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE);
|
WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE);
|
||||||
WRITE_ONCE(rdp->dynticks_nmi_nesting, 0);
|
WRITE_ONCE(rdp->dynticks_nmi_nesting, 0);
|
||||||
@ -633,12 +628,12 @@ static noinstr void rcu_eqs_enter(bool user)
|
|||||||
|
|
||||||
instrumentation_begin();
|
instrumentation_begin();
|
||||||
lockdep_assert_irqs_disabled();
|
lockdep_assert_irqs_disabled();
|
||||||
trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
|
trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, ct_dynticks());
|
||||||
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
|
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
|
||||||
rcu_preempt_deferred_qs(current);
|
rcu_preempt_deferred_qs(current);
|
||||||
|
|
||||||
// instrumentation for the noinstr rcu_dynticks_eqs_enter()
|
// instrumentation for the noinstr rcu_dynticks_eqs_enter()
|
||||||
instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
|
instrument_atomic_write(&ct->dynticks, sizeof(ct->dynticks));
|
||||||
|
|
||||||
instrumentation_end();
|
instrumentation_end();
|
||||||
WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
|
WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
|
||||||
@ -740,7 +735,7 @@ noinstr void rcu_user_enter(void)
|
|||||||
* rcu_nmi_exit - inform RCU of exit from NMI context
|
* rcu_nmi_exit - inform RCU of exit from NMI context
|
||||||
*
|
*
|
||||||
* If we are returning from the outermost NMI handler that interrupted an
|
* If we are returning from the outermost NMI handler that interrupted an
|
||||||
* RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
|
* RCU-idle period, update ct->dynticks and rdp->dynticks_nmi_nesting
|
||||||
* to let the RCU grace-period handling know that the CPU is back to
|
* to let the RCU grace-period handling know that the CPU is back to
|
||||||
* being RCU-idle.
|
* being RCU-idle.
|
||||||
*
|
*
|
||||||
@ -749,6 +744,7 @@ noinstr void rcu_user_enter(void)
|
|||||||
*/
|
*/
|
||||||
noinstr void rcu_nmi_exit(void)
|
noinstr void rcu_nmi_exit(void)
|
||||||
{
|
{
|
||||||
|
struct context_tracking *ct = this_cpu_ptr(&context_tracking);
|
||||||
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
||||||
|
|
||||||
instrumentation_begin();
|
instrumentation_begin();
|
||||||
@ -766,7 +762,7 @@ noinstr void rcu_nmi_exit(void)
|
|||||||
*/
|
*/
|
||||||
if (rdp->dynticks_nmi_nesting != 1) {
|
if (rdp->dynticks_nmi_nesting != 1) {
|
||||||
trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
|
trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
|
||||||
atomic_read(&rdp->dynticks));
|
ct_dynticks());
|
||||||
WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
|
WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
|
||||||
rdp->dynticks_nmi_nesting - 2);
|
rdp->dynticks_nmi_nesting - 2);
|
||||||
instrumentation_end();
|
instrumentation_end();
|
||||||
@ -774,11 +770,11 @@ noinstr void rcu_nmi_exit(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
|
/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
|
||||||
trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
|
trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, ct_dynticks());
|
||||||
WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
|
WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
|
||||||
|
|
||||||
// instrumentation for the noinstr rcu_dynticks_eqs_enter()
|
// instrumentation for the noinstr rcu_dynticks_eqs_enter()
|
||||||
instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
|
instrument_atomic_write(&ct->dynticks, sizeof(ct->dynticks));
|
||||||
instrumentation_end();
|
instrumentation_end();
|
||||||
|
|
||||||
// RCU is watching here ...
|
// RCU is watching here ...
|
||||||
@ -817,6 +813,7 @@ void rcu_irq_exit_check_preempt(void)
|
|||||||
*/
|
*/
|
||||||
static void noinstr rcu_eqs_exit(bool user)
|
static void noinstr rcu_eqs_exit(bool user)
|
||||||
{
|
{
|
||||||
|
struct context_tracking *ct = this_cpu_ptr(&context_tracking);
|
||||||
struct rcu_data *rdp;
|
struct rcu_data *rdp;
|
||||||
long oldval;
|
long oldval;
|
||||||
|
|
||||||
@ -836,9 +833,9 @@ static void noinstr rcu_eqs_exit(bool user)
|
|||||||
instrumentation_begin();
|
instrumentation_begin();
|
||||||
|
|
||||||
// instrumentation for the noinstr rcu_dynticks_eqs_exit()
|
// instrumentation for the noinstr rcu_dynticks_eqs_exit()
|
||||||
instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
|
instrument_atomic_write(&ct->dynticks, sizeof(ct->dynticks));
|
||||||
|
|
||||||
trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
|
trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, ct_dynticks());
|
||||||
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
|
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
|
||||||
WRITE_ONCE(rdp->dynticks_nesting, 1);
|
WRITE_ONCE(rdp->dynticks_nesting, 1);
|
||||||
WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
|
WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
|
||||||
@ -944,7 +941,7 @@ void __rcu_irq_enter_check_tick(void)
|
|||||||
/**
|
/**
|
||||||
* rcu_nmi_enter - inform RCU of entry to NMI context
|
* rcu_nmi_enter - inform RCU of entry to NMI context
|
||||||
*
|
*
|
||||||
* If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
|
* If the CPU was idle from RCU's viewpoint, update ct->dynticks and
|
||||||
* rdp->dynticks_nmi_nesting to let the RCU grace-period handling know
|
* rdp->dynticks_nmi_nesting to let the RCU grace-period handling know
|
||||||
* that the CPU is active. This implementation permits nested NMIs, as
|
* that the CPU is active. This implementation permits nested NMIs, as
|
||||||
* long as the nesting level does not overflow an int. (You will probably
|
* long as the nesting level does not overflow an int. (You will probably
|
||||||
@ -957,6 +954,7 @@ noinstr void rcu_nmi_enter(void)
|
|||||||
{
|
{
|
||||||
long incby = 2;
|
long incby = 2;
|
||||||
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
||||||
|
struct context_tracking *ct = this_cpu_ptr(&context_tracking);
|
||||||
|
|
||||||
/* Complain about underflow. */
|
/* Complain about underflow. */
|
||||||
WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0);
|
WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0);
|
||||||
@ -980,9 +978,9 @@ noinstr void rcu_nmi_enter(void)
|
|||||||
|
|
||||||
instrumentation_begin();
|
instrumentation_begin();
|
||||||
// instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs()
|
// instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs()
|
||||||
instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks));
|
instrument_atomic_read(&ct->dynticks, sizeof(ct->dynticks));
|
||||||
// instrumentation for the noinstr rcu_dynticks_eqs_exit()
|
// instrumentation for the noinstr rcu_dynticks_eqs_exit()
|
||||||
instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
|
instrument_atomic_write(&ct->dynticks, sizeof(ct->dynticks));
|
||||||
|
|
||||||
incby = 1;
|
incby = 1;
|
||||||
} else if (!in_nmi()) {
|
} else if (!in_nmi()) {
|
||||||
@ -994,7 +992,7 @@ noinstr void rcu_nmi_enter(void)
|
|||||||
|
|
||||||
trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
|
trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
|
||||||
rdp->dynticks_nmi_nesting,
|
rdp->dynticks_nmi_nesting,
|
||||||
rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
|
rdp->dynticks_nmi_nesting + incby, ct_dynticks());
|
||||||
instrumentation_end();
|
instrumentation_end();
|
||||||
WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
|
WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
|
||||||
rdp->dynticks_nmi_nesting + incby);
|
rdp->dynticks_nmi_nesting + incby);
|
||||||
@ -1138,7 +1136,7 @@ static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
|
|||||||
*/
|
*/
|
||||||
static int dyntick_save_progress_counter(struct rcu_data *rdp)
|
static int dyntick_save_progress_counter(struct rcu_data *rdp)
|
||||||
{
|
{
|
||||||
rdp->dynticks_snap = rcu_dynticks_snap(rdp);
|
rdp->dynticks_snap = rcu_dynticks_snap(rdp->cpu);
|
||||||
if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
|
if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
|
||||||
trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
|
trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
|
||||||
rcu_gpnum_ovf(rdp->mynode, rdp);
|
rcu_gpnum_ovf(rdp->mynode, rdp);
|
||||||
@ -4142,7 +4140,7 @@ rcu_boot_init_percpu_data(int cpu)
|
|||||||
rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
|
rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
|
||||||
INIT_WORK(&rdp->strict_work, strict_work_handler);
|
INIT_WORK(&rdp->strict_work, strict_work_handler);
|
||||||
WARN_ON_ONCE(rdp->dynticks_nesting != 1);
|
WARN_ON_ONCE(rdp->dynticks_nesting != 1);
|
||||||
WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)));
|
WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu)));
|
||||||
rdp->barrier_seq_snap = rcu_state.barrier_sequence;
|
rdp->barrier_seq_snap = rcu_state.barrier_sequence;
|
||||||
rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
|
rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
|
||||||
rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
|
rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
|
||||||
|
@ -189,7 +189,6 @@ struct rcu_data {
|
|||||||
int dynticks_snap; /* Per-GP tracking for dynticks. */
|
int dynticks_snap; /* Per-GP tracking for dynticks. */
|
||||||
long dynticks_nesting; /* Track process nesting level. */
|
long dynticks_nesting; /* Track process nesting level. */
|
||||||
long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */
|
long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */
|
||||||
atomic_t dynticks; /* Even value for idle, else odd. */
|
|
||||||
bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */
|
bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */
|
||||||
bool rcu_urgent_qs; /* GP old need light quiescent state. */
|
bool rcu_urgent_qs; /* GP old need light quiescent state. */
|
||||||
bool rcu_forced_tick; /* Forced tick to provide QS. */
|
bool rcu_forced_tick; /* Forced tick to provide QS. */
|
||||||
|
@ -356,7 +356,7 @@ static void __sync_rcu_exp_select_node_cpus(struct rcu_exp_work *rewp)
|
|||||||
!(rnp->qsmaskinitnext & mask)) {
|
!(rnp->qsmaskinitnext & mask)) {
|
||||||
mask_ofl_test |= mask;
|
mask_ofl_test |= mask;
|
||||||
} else {
|
} else {
|
||||||
snap = rcu_dynticks_snap(rdp);
|
snap = rcu_dynticks_snap(cpu);
|
||||||
if (rcu_dynticks_in_eqs(snap))
|
if (rcu_dynticks_in_eqs(snap))
|
||||||
mask_ofl_test |= mask;
|
mask_ofl_test |= mask;
|
||||||
else
|
else
|
||||||
|
@ -465,7 +465,7 @@ static void print_cpu_stall_info(int cpu)
|
|||||||
}
|
}
|
||||||
delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
|
delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
|
||||||
falsepositive = rcu_is_gp_kthread_starving(NULL) &&
|
falsepositive = rcu_is_gp_kthread_starving(NULL) &&
|
||||||
rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp));
|
rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu));
|
||||||
rcuc_starved = rcu_is_rcuc_kthread_starving(rdp, &j);
|
rcuc_starved = rcu_is_rcuc_kthread_starving(rdp, &j);
|
||||||
if (rcuc_starved)
|
if (rcuc_starved)
|
||||||
sprintf(buf, " rcuc=%ld jiffies(starved)", j);
|
sprintf(buf, " rcuc=%ld jiffies(starved)", j);
|
||||||
@ -478,7 +478,7 @@ static void print_cpu_stall_info(int cpu)
|
|||||||
rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
|
rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
|
||||||
"!."[!delta],
|
"!."[!delta],
|
||||||
ticks_value, ticks_title,
|
ticks_value, ticks_title,
|
||||||
rcu_dynticks_snap(rdp) & 0xfff,
|
rcu_dynticks_snap(cpu) & 0xfff,
|
||||||
rdp->dynticks_nesting, rdp->dynticks_nmi_nesting,
|
rdp->dynticks_nesting, rdp->dynticks_nmi_nesting,
|
||||||
rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
|
rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
|
||||||
data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
|
data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
|
||||||
|
Loading…
Reference in New Issue
Block a user