mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-23 12:43:55 +08:00
rcu: Make ->dynticks_nesting be a simple counter
Now that ->dynticks_nesting counts only process-level dyntick-idle entry and exit, there is no need for the elaborate segmented counter with its guard fields and overflow checking. This commit therefore makes ->dynticks_nesting be a simple counter. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
parent
58721f5da4
commit
51a1fd30f1
@ -30,32 +30,7 @@
|
|||||||
#define RCU_TRACE(stmt)
|
#define RCU_TRACE(stmt)
|
||||||
#endif /* #else #ifdef CONFIG_RCU_TRACE */
|
#endif /* #else #ifdef CONFIG_RCU_TRACE */
|
||||||
|
|
||||||
/*
|
/* Offset to allow for unmatched rcu_irq_{enter,exit}(). */
|
||||||
* Process-level increment to ->dynticks_nesting field. This allows for
|
|
||||||
* architectures that use half-interrupts and half-exceptions from
|
|
||||||
* process context.
|
|
||||||
*
|
|
||||||
* DYNTICK_TASK_NEST_MASK defines a field of width DYNTICK_TASK_NEST_WIDTH
|
|
||||||
* that counts the number of process-based reasons why RCU cannot
|
|
||||||
* consider the corresponding CPU to be idle, and DYNTICK_TASK_NEST_VALUE
|
|
||||||
* is the value used to increment or decrement this field.
|
|
||||||
*
|
|
||||||
* The rest of the bits could in principle be used to count interrupts,
|
|
||||||
* but this would mean that a negative-one value in the interrupt
|
|
||||||
* field could incorrectly zero out the DYNTICK_TASK_NEST_MASK field.
|
|
||||||
* We therefore provide a two-bit guard field defined by DYNTICK_TASK_MASK
|
|
||||||
* that is set to DYNTICK_TASK_FLAG upon initial exit from idle.
|
|
||||||
* The DYNTICK_TASK_EXIT_IDLE value is thus the combined value used upon
|
|
||||||
* initial exit from idle.
|
|
||||||
*/
|
|
||||||
#define DYNTICK_TASK_NEST_WIDTH 7
|
|
||||||
#define DYNTICK_TASK_NEST_VALUE ((LLONG_MAX >> DYNTICK_TASK_NEST_WIDTH) + 1)
|
|
||||||
#define DYNTICK_TASK_NEST_MASK (LLONG_MAX - DYNTICK_TASK_NEST_VALUE + 1)
|
|
||||||
#define DYNTICK_TASK_FLAG ((DYNTICK_TASK_NEST_VALUE / 8) * 2)
|
|
||||||
#define DYNTICK_TASK_MASK ((DYNTICK_TASK_NEST_VALUE / 8) * 3)
|
|
||||||
#define DYNTICK_TASK_EXIT_IDLE (DYNTICK_TASK_NEST_VALUE + \
|
|
||||||
DYNTICK_TASK_FLAG)
|
|
||||||
|
|
||||||
#define DYNTICK_IRQ_NONIDLE ((INT_MAX / 2) + 1)
|
#define DYNTICK_IRQ_NONIDLE ((INT_MAX / 2) + 1)
|
||||||
|
|
||||||
|
|
||||||
|
@ -265,7 +265,7 @@ void rcu_bh_qs(void)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
|
static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
|
||||||
.dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
|
.dynticks_nesting = 1,
|
||||||
.dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
|
.dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
|
||||||
.dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
|
.dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
|
||||||
};
|
};
|
||||||
@ -813,6 +813,10 @@ static void rcu_eqs_enter_common(bool user)
|
|||||||
/*
|
/*
|
||||||
* Enter an RCU extended quiescent state, which can be either the
|
* Enter an RCU extended quiescent state, which can be either the
|
||||||
* idle loop or adaptive-tickless usermode execution.
|
* idle loop or adaptive-tickless usermode execution.
|
||||||
|
*
|
||||||
|
* We crowbar the ->dynticks_nmi_nesting field to zero to allow for
|
||||||
|
* the possibility of usermode upcalls having messed up our count
|
||||||
|
* of interrupt nesting level during the prior busy period.
|
||||||
*/
|
*/
|
||||||
static void rcu_eqs_enter(bool user)
|
static void rcu_eqs_enter(bool user)
|
||||||
{
|
{
|
||||||
@ -821,11 +825,11 @@ static void rcu_eqs_enter(bool user)
|
|||||||
rdtp = this_cpu_ptr(&rcu_dynticks);
|
rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||||
WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0);
|
WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0);
|
||||||
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
|
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
|
||||||
(rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
|
rdtp->dynticks_nesting == 0);
|
||||||
if ((rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
|
if (rdtp->dynticks_nesting == 1)
|
||||||
rcu_eqs_enter_common(user);
|
rcu_eqs_enter_common(user);
|
||||||
else
|
else
|
||||||
rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
|
rdtp->dynticks_nesting--;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -836,10 +840,6 @@ static void rcu_eqs_enter(bool user)
|
|||||||
* critical sections can occur in irq handlers in idle, a possibility
|
* critical sections can occur in irq handlers in idle, a possibility
|
||||||
* handled by irq_enter() and irq_exit().)
|
* handled by irq_enter() and irq_exit().)
|
||||||
*
|
*
|
||||||
* We crowbar the ->dynticks_nesting field to zero to allow for
|
|
||||||
* the possibility of usermode upcalls having messed up our count
|
|
||||||
* of interrupt nesting level during the prior busy period.
|
|
||||||
*
|
|
||||||
* If you add or remove a call to rcu_idle_enter(), be sure to test with
|
* If you add or remove a call to rcu_idle_enter(), be sure to test with
|
||||||
* CONFIG_RCU_EQS_DEBUG=y.
|
* CONFIG_RCU_EQS_DEBUG=y.
|
||||||
*/
|
*/
|
||||||
@ -984,6 +984,10 @@ static void rcu_eqs_exit_common(long long newval, int user)
|
|||||||
/*
|
/*
|
||||||
* Exit an RCU extended quiescent state, which can be either the
|
* Exit an RCU extended quiescent state, which can be either the
|
||||||
* idle loop or adaptive-tickless usermode execution.
|
* idle loop or adaptive-tickless usermode execution.
|
||||||
|
*
|
||||||
|
* We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
|
||||||
|
* allow for the possibility of usermode upcalls messing up our count of
|
||||||
|
* interrupt nesting level during the busy period that is just now starting.
|
||||||
*/
|
*/
|
||||||
static void rcu_eqs_exit(bool user)
|
static void rcu_eqs_exit(bool user)
|
||||||
{
|
{
|
||||||
@ -994,12 +998,12 @@ static void rcu_eqs_exit(bool user)
|
|||||||
rdtp = this_cpu_ptr(&rcu_dynticks);
|
rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||||
oldval = rdtp->dynticks_nesting;
|
oldval = rdtp->dynticks_nesting;
|
||||||
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
|
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
|
||||||
if (oldval & DYNTICK_TASK_NEST_MASK) {
|
if (oldval) {
|
||||||
rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
|
rdtp->dynticks_nesting++;
|
||||||
} else {
|
} else {
|
||||||
__this_cpu_inc(disable_rcu_irq_enter);
|
__this_cpu_inc(disable_rcu_irq_enter);
|
||||||
rcu_eqs_exit_common(DYNTICK_TASK_EXIT_IDLE, user);
|
rcu_eqs_exit_common(1, user);
|
||||||
rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
|
rdtp->dynticks_nesting = 1;
|
||||||
__this_cpu_dec(disable_rcu_irq_enter);
|
__this_cpu_dec(disable_rcu_irq_enter);
|
||||||
WRITE_ONCE(rdtp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
|
WRITE_ONCE(rdtp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
|
||||||
}
|
}
|
||||||
@ -1011,11 +1015,6 @@ static void rcu_eqs_exit(bool user)
|
|||||||
* Exit idle mode, in other words, -enter- the mode in which RCU
|
* Exit idle mode, in other words, -enter- the mode in which RCU
|
||||||
* read-side critical sections can occur.
|
* read-side critical sections can occur.
|
||||||
*
|
*
|
||||||
* We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to
|
|
||||||
* allow for the possibility of usermode upcalls messing up our count
|
|
||||||
* of interrupt nesting level during the busy period that is just
|
|
||||||
* now starting.
|
|
||||||
*
|
|
||||||
* If you add or remove a call to rcu_idle_exit(), be sure to test with
|
* If you add or remove a call to rcu_idle_exit(), be sure to test with
|
||||||
* CONFIG_RCU_EQS_DEBUG=y.
|
* CONFIG_RCU_EQS_DEBUG=y.
|
||||||
*/
|
*/
|
||||||
@ -1219,7 +1218,8 @@ EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
|
|||||||
*/
|
*/
|
||||||
static int rcu_is_cpu_rrupt_from_idle(void)
|
static int rcu_is_cpu_rrupt_from_idle(void)
|
||||||
{
|
{
|
||||||
return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1;
|
return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 0 &&
|
||||||
|
__this_cpu_read(rcu_dynticks.dynticks_nmi_nesting) <= 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -3709,7 +3709,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
|
|||||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||||
rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
|
rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
|
||||||
rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
|
rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
|
||||||
WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
|
WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1);
|
||||||
WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
|
WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
|
||||||
rdp->cpu = cpu;
|
rdp->cpu = cpu;
|
||||||
rdp->rsp = rsp;
|
rdp->rsp = rsp;
|
||||||
@ -3738,7 +3738,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
|
|||||||
if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
|
if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
|
||||||
!init_nocb_callback_list(rdp))
|
!init_nocb_callback_list(rdp))
|
||||||
rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
|
rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
|
||||||
rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
|
rdp->dynticks->dynticks_nesting = 1;
|
||||||
rcu_dynticks_eqs_online();
|
rcu_dynticks_eqs_online();
|
||||||
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
|
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
|
||||||
|
|
||||||
|
@ -39,7 +39,6 @@
|
|||||||
*/
|
*/
|
||||||
struct rcu_dynticks {
|
struct rcu_dynticks {
|
||||||
long long dynticks_nesting; /* Track irq/process nesting level. */
|
long long dynticks_nesting; /* Track irq/process nesting level. */
|
||||||
/* Process level is worth LLONG_MAX/2. */
|
|
||||||
int dynticks_nmi_nesting; /* Track NMI nesting level. */
|
int dynticks_nmi_nesting; /* Track NMI nesting level. */
|
||||||
atomic_t dynticks; /* Even value for idle, else odd. */
|
atomic_t dynticks; /* Even value for idle, else odd. */
|
||||||
bool rcu_need_heavy_qs; /* GP old, need heavy quiescent state. */
|
bool rcu_need_heavy_qs; /* GP old, need heavy quiescent state. */
|
||||||
|
Loading…
Reference in New Issue
Block a user