mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-24 22:55:35 +08:00
rcu: Define rcu_irq_{enter,exit}() in terms of rcu_nmi_{enter,exit}()
RCU currently uses two different mechanisms for tracking irqs and NMIs. This is unnecessary complexity: Given that NMIs can nest and given that RCU's tracking handles such nesting, the NMI tracking mechanism can also be used to track irqs. This commit therefore defines rcu_irq_enter() in terms of rcu_nmi_enter() and rcu_irq_exit() in terms of rcu_nmi_exit(). Unfortunately, callers must still distinguish between the irq and NMI functions because additional actions are taken when an irq interrupts idle or nohz_full usermode execution, and these actions cannot always be taken from NMI handlers. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
parent
6136d6e48a
commit
58721f5da4
@ -266,6 +266,7 @@ void rcu_bh_qs(void)
|
|||||||
|
|
||||||
static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
|
static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
|
||||||
.dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
|
.dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
|
||||||
|
.dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
|
||||||
.dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
|
.dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -914,8 +915,8 @@ void rcu_nmi_exit(void)
|
|||||||
*
|
*
|
||||||
* This code assumes that the idle loop never does anything that might
|
* This code assumes that the idle loop never does anything that might
|
||||||
* result in unbalanced calls to irq_enter() and irq_exit(). If your
|
* result in unbalanced calls to irq_enter() and irq_exit(). If your
|
||||||
* architecture violates this assumption, RCU will give you what you
|
* architecture's idle loop violates this assumption, RCU will give you what
|
||||||
* deserve, good and hard. But very infrequently and irreproducibly.
|
* you deserve, good and hard. But very infrequently and irreproducibly.
|
||||||
*
|
*
|
||||||
* Use things like work queues to work around this limitation.
|
* Use things like work queues to work around this limitation.
|
||||||
*
|
*
|
||||||
@ -926,23 +927,14 @@ void rcu_nmi_exit(void)
|
|||||||
*/
|
*/
|
||||||
void rcu_irq_exit(void)
|
void rcu_irq_exit(void)
|
||||||
{
|
{
|
||||||
struct rcu_dynticks *rdtp;
|
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||||
|
|
||||||
lockdep_assert_irqs_disabled();
|
lockdep_assert_irqs_disabled();
|
||||||
rdtp = this_cpu_ptr(&rcu_dynticks);
|
if (rdtp->dynticks_nmi_nesting == 1)
|
||||||
|
rcu_prepare_for_idle();
|
||||||
/* Page faults can happen in NMI handlers, so check... */
|
rcu_nmi_exit();
|
||||||
if (rdtp->dynticks_nmi_nesting)
|
if (rdtp->dynticks_nmi_nesting == 0)
|
||||||
return;
|
rcu_dynticks_task_enter();
|
||||||
|
|
||||||
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
|
|
||||||
rdtp->dynticks_nesting < 1);
|
|
||||||
if (rdtp->dynticks_nesting <= 1) {
|
|
||||||
rcu_eqs_enter_common(true);
|
|
||||||
} else {
|
|
||||||
trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nesting, rdtp->dynticks_nesting - 1);
|
|
||||||
rdtp->dynticks_nesting--;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1097,12 +1089,12 @@ void rcu_nmi_enter(void)
|
|||||||
* sections can occur. The caller must have disabled interrupts.
|
* sections can occur. The caller must have disabled interrupts.
|
||||||
*
|
*
|
||||||
* Note that the Linux kernel is fully capable of entering an interrupt
|
* Note that the Linux kernel is fully capable of entering an interrupt
|
||||||
* handler that it never exits, for example when doing upcalls to
|
* handler that it never exits, for example when doing upcalls to user mode!
|
||||||
* user mode! This code assumes that the idle loop never does upcalls to
|
* This code assumes that the idle loop never does upcalls to user mode.
|
||||||
* user mode. If your architecture does do upcalls from the idle loop (or
|
* If your architecture's idle loop does do upcalls to user mode (or does
|
||||||
* does anything else that results in unbalanced calls to the irq_enter()
|
* anything else that results in unbalanced calls to the irq_enter() and
|
||||||
* and irq_exit() functions), RCU will give you what you deserve, good
|
* irq_exit() functions), RCU will give you what you deserve, good and hard.
|
||||||
* and hard. But very infrequently and irreproducibly.
|
* But very infrequently and irreproducibly.
|
||||||
*
|
*
|
||||||
* Use things like work queues to work around this limitation.
|
* Use things like work queues to work around this limitation.
|
||||||
*
|
*
|
||||||
@ -1113,23 +1105,14 @@ void rcu_nmi_enter(void)
|
|||||||
*/
|
*/
|
||||||
void rcu_irq_enter(void)
|
void rcu_irq_enter(void)
|
||||||
{
|
{
|
||||||
struct rcu_dynticks *rdtp;
|
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||||
long long newval;
|
|
||||||
|
|
||||||
lockdep_assert_irqs_disabled();
|
lockdep_assert_irqs_disabled();
|
||||||
rdtp = this_cpu_ptr(&rcu_dynticks);
|
if (rdtp->dynticks_nmi_nesting == 0)
|
||||||
|
rcu_dynticks_task_exit();
|
||||||
/* Page faults can happen in NMI handlers, so check... */
|
rcu_nmi_enter();
|
||||||
if (rdtp->dynticks_nmi_nesting)
|
if (rdtp->dynticks_nmi_nesting == 1)
|
||||||
return;
|
rcu_cleanup_after_idle();
|
||||||
|
|
||||||
newval = rdtp->dynticks_nesting + 1;
|
|
||||||
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && newval == 0);
|
|
||||||
if (rdtp->dynticks_nesting)
|
|
||||||
trace_rcu_dyntick(TPS("++="), rdtp->dynticks_nesting, newval);
|
|
||||||
else
|
|
||||||
rcu_eqs_exit_common(newval, true);
|
|
||||||
rdtp->dynticks_nesting++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
Reference in New Issue
Block a user