mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 05:34:13 +08:00
rcu: Changes from reviews: avoid casts, fix/add warnings, improve comments
Changes suggested by review comments from Josh Triplett and Mathieu Desnoyers. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Acked-by: Josh Triplett <josh@joshtriplett.org> Acked-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org LKML-Reference: <20090827220012.GA30525@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
dd5d19bafd
commit
868489660d
@ -1163,6 +1163,8 @@ struct sched_rt_entity {
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct rcu_node;
|
||||||
|
|
||||||
struct task_struct {
|
struct task_struct {
|
||||||
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
|
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
|
||||||
void *stack;
|
void *stack;
|
||||||
@ -1208,7 +1210,7 @@ struct task_struct {
|
|||||||
#ifdef CONFIG_TREE_PREEMPT_RCU
|
#ifdef CONFIG_TREE_PREEMPT_RCU
|
||||||
int rcu_read_lock_nesting;
|
int rcu_read_lock_nesting;
|
||||||
char rcu_read_unlock_special;
|
char rcu_read_unlock_special;
|
||||||
void *rcu_blocked_node;
|
struct rcu_node *rcu_blocked_node;
|
||||||
struct list_head rcu_node_entry;
|
struct list_head rcu_node_entry;
|
||||||
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
|
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
|
||||||
|
|
||||||
|
@ -229,7 +229,6 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp)
|
|||||||
#endif /* #ifdef CONFIG_SMP */
|
#endif /* #ifdef CONFIG_SMP */
|
||||||
|
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ
|
||||||
static DEFINE_RATELIMIT_STATE(rcu_rs, 10 * HZ, 5);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* rcu_enter_nohz - inform RCU that current CPU is entering nohz
|
* rcu_enter_nohz - inform RCU that current CPU is entering nohz
|
||||||
@ -249,7 +248,7 @@ void rcu_enter_nohz(void)
|
|||||||
rdtp = &__get_cpu_var(rcu_dynticks);
|
rdtp = &__get_cpu_var(rcu_dynticks);
|
||||||
rdtp->dynticks++;
|
rdtp->dynticks++;
|
||||||
rdtp->dynticks_nesting--;
|
rdtp->dynticks_nesting--;
|
||||||
WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs);
|
WARN_ON_ONCE(rdtp->dynticks & 0x1);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -268,7 +267,7 @@ void rcu_exit_nohz(void)
|
|||||||
rdtp = &__get_cpu_var(rcu_dynticks);
|
rdtp = &__get_cpu_var(rcu_dynticks);
|
||||||
rdtp->dynticks++;
|
rdtp->dynticks++;
|
||||||
rdtp->dynticks_nesting++;
|
rdtp->dynticks_nesting++;
|
||||||
WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs);
|
WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
|
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
|
||||||
}
|
}
|
||||||
@ -287,7 +286,7 @@ void rcu_nmi_enter(void)
|
|||||||
if (rdtp->dynticks & 0x1)
|
if (rdtp->dynticks & 0x1)
|
||||||
return;
|
return;
|
||||||
rdtp->dynticks_nmi++;
|
rdtp->dynticks_nmi++;
|
||||||
WARN_ON_RATELIMIT(!(rdtp->dynticks_nmi & 0x1), &rcu_rs);
|
WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1));
|
||||||
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
|
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -306,7 +305,7 @@ void rcu_nmi_exit(void)
|
|||||||
return;
|
return;
|
||||||
smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
|
smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
|
||||||
rdtp->dynticks_nmi++;
|
rdtp->dynticks_nmi++;
|
||||||
WARN_ON_RATELIMIT(rdtp->dynticks_nmi & 0x1, &rcu_rs);
|
WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -322,7 +321,7 @@ void rcu_irq_enter(void)
|
|||||||
if (rdtp->dynticks_nesting++)
|
if (rdtp->dynticks_nesting++)
|
||||||
return;
|
return;
|
||||||
rdtp->dynticks++;
|
rdtp->dynticks++;
|
||||||
WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs);
|
WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
|
||||||
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
|
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -341,7 +340,7 @@ void rcu_irq_exit(void)
|
|||||||
return;
|
return;
|
||||||
smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
|
smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
|
||||||
rdtp->dynticks++;
|
rdtp->dynticks++;
|
||||||
WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs);
|
WARN_ON_ONCE(rdtp->dynticks & 0x1);
|
||||||
|
|
||||||
/* If the interrupt queued a callback, get out of dyntick mode. */
|
/* If the interrupt queued a callback, get out of dyntick mode. */
|
||||||
if (__get_cpu_var(rcu_sched_data).nxtlist ||
|
if (__get_cpu_var(rcu_sched_data).nxtlist ||
|
||||||
|
@ -81,6 +81,8 @@ struct rcu_dynticks {
|
|||||||
struct rcu_node {
|
struct rcu_node {
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
long gpnum; /* Current grace period for this node. */
|
long gpnum; /* Current grace period for this node. */
|
||||||
|
/* This will either be equal to or one */
|
||||||
|
/* behind the root rcu_node's gpnum. */
|
||||||
unsigned long qsmask; /* CPUs or groups that need to switch in */
|
unsigned long qsmask; /* CPUs or groups that need to switch in */
|
||||||
/* order for current grace period to proceed.*/
|
/* order for current grace period to proceed.*/
|
||||||
unsigned long qsmaskinit;
|
unsigned long qsmaskinit;
|
||||||
|
@ -92,7 +92,7 @@ static void rcu_preempt_qs(int cpu)
|
|||||||
rnp = rdp->mynode;
|
rnp = rdp->mynode;
|
||||||
spin_lock(&rnp->lock);
|
spin_lock(&rnp->lock);
|
||||||
t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
|
t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
|
||||||
t->rcu_blocked_node = (void *)rnp;
|
t->rcu_blocked_node = rnp;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this CPU has already checked in, then this task
|
* If this CPU has already checked in, then this task
|
||||||
@ -176,9 +176,9 @@ static void rcu_read_unlock_special(struct task_struct *t)
|
|||||||
* most one time. So at most two passes through loop.
|
* most one time. So at most two passes through loop.
|
||||||
*/
|
*/
|
||||||
for (;;) {
|
for (;;) {
|
||||||
rnp = (struct rcu_node *)t->rcu_blocked_node;
|
rnp = t->rcu_blocked_node;
|
||||||
spin_lock(&rnp->lock);
|
spin_lock(&rnp->lock);
|
||||||
if (rnp == (struct rcu_node *)t->rcu_blocked_node)
|
if (rnp == t->rcu_blocked_node)
|
||||||
break;
|
break;
|
||||||
spin_unlock(&rnp->lock);
|
spin_unlock(&rnp->lock);
|
||||||
}
|
}
|
||||||
@ -288,8 +288,10 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
|
|||||||
struct rcu_node *rnp_root = rcu_get_root(rsp);
|
struct rcu_node *rnp_root = rcu_get_root(rsp);
|
||||||
struct task_struct *tp;
|
struct task_struct *tp;
|
||||||
|
|
||||||
if (rnp == rnp_root)
|
if (rnp == rnp_root) {
|
||||||
|
WARN_ONCE(1, "Last CPU thought to be offlined?");
|
||||||
return; /* Shouldn't happen: at least one CPU online. */
|
return; /* Shouldn't happen: at least one CPU online. */
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Move tasks up to root rcu_node. Rely on the fact that the
|
* Move tasks up to root rcu_node. Rely on the fact that the
|
||||||
|
Loading…
Reference in New Issue
Block a user