mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-18 02:04:05 +08:00
rcu: Remove rsp parameter from no-CBs CPU functions
There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from rcu_nocb_cpu_needs_barrier(), rcu_spawn_one_nocb_kthread(), rcu_organize_nocb_kthreads(), rcu_nocb_cpu_needs_barrier(), and rcu_nohz_full_cpu(). Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
parent
b21ebed951
commit
4580b0541b
@ -3095,7 +3095,7 @@ static int rcu_pending(void)
|
||||
check_cpu_stall(rdp);
|
||||
|
||||
/* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
|
||||
if (rcu_nohz_full_cpu(&rcu_state))
|
||||
if (rcu_nohz_full_cpu())
|
||||
return 0;
|
||||
|
||||
/* Is the RCU core waiting for a quiescent state from this CPU? */
|
||||
@ -3246,7 +3246,7 @@ static void _rcu_barrier(void)
|
||||
continue;
|
||||
rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||
if (rcu_is_nocb_cpu(cpu)) {
|
||||
if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
|
||||
if (!rcu_nocb_cpu_needs_barrier(cpu)) {
|
||||
_rcu_barrier_trace(TPS("OfflineNoCB"), cpu,
|
||||
rsp->barrier_sequence);
|
||||
} else {
|
||||
|
@ -476,7 +476,7 @@ static void print_cpu_stall_info(int cpu);
|
||||
static void print_cpu_stall_info_end(void);
|
||||
static void zero_cpu_stall_ticks(struct rcu_data *rdp);
|
||||
static void increment_cpu_stall_ticks(void);
|
||||
static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu);
|
||||
static bool rcu_nocb_cpu_needs_barrier(int cpu);
|
||||
static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
|
||||
static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
|
||||
static void rcu_init_one_nocb(struct rcu_node *rnp);
|
||||
@ -491,11 +491,11 @@ static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
|
||||
static void rcu_spawn_all_nocb_kthreads(int cpu);
|
||||
static void __init rcu_spawn_nocb_kthreads(void);
|
||||
#ifdef CONFIG_RCU_NOCB_CPU
|
||||
static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
|
||||
static void __init rcu_organize_nocb_kthreads(void);
|
||||
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
|
||||
static bool init_nocb_callback_list(struct rcu_data *rdp);
|
||||
static void rcu_bind_gp_kthread(void);
|
||||
static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
|
||||
static bool rcu_nohz_full_cpu(void);
|
||||
static void rcu_dynticks_task_enter(void);
|
||||
static void rcu_dynticks_task_exit(void);
|
||||
|
||||
|
@ -1960,7 +1960,7 @@ static void wake_nocb_leader_defer(struct rcu_data *rdp, int waketype,
|
||||
* Does the specified CPU need an RCU callback for the specified flavor
|
||||
* of rcu_barrier()?
|
||||
*/
|
||||
static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
|
||||
static bool rcu_nocb_cpu_needs_barrier(int cpu)
|
||||
{
|
||||
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||
unsigned long ret;
|
||||
@ -2424,7 +2424,7 @@ void __init rcu_init_nohz(void)
|
||||
for_each_rcu_flavor(rsp) {
|
||||
for_each_cpu(cpu, rcu_nocb_mask)
|
||||
init_nocb_callback_list(per_cpu_ptr(&rcu_data, cpu));
|
||||
rcu_organize_nocb_kthreads(rsp);
|
||||
rcu_organize_nocb_kthreads();
|
||||
}
|
||||
}
|
||||
|
||||
@ -2444,7 +2444,7 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
|
||||
* brought online out of order, this can require re-organizing the
|
||||
* leader-follower relationships.
|
||||
*/
|
||||
static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
|
||||
static void rcu_spawn_one_nocb_kthread(int cpu)
|
||||
{
|
||||
struct rcu_data *rdp;
|
||||
struct rcu_data *rdp_last;
|
||||
@ -2481,7 +2481,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
|
||||
|
||||
/* Spawn the kthread for this CPU and RCU flavor. */
|
||||
t = kthread_run(rcu_nocb_kthread, rdp_spawn,
|
||||
"rcuo%c/%d", rsp->abbr, cpu);
|
||||
"rcuo%c/%d", rcu_state.abbr, cpu);
|
||||
BUG_ON(IS_ERR(t));
|
||||
WRITE_ONCE(rdp_spawn->nocb_kthread, t);
|
||||
}
|
||||
@ -2496,7 +2496,7 @@ static void rcu_spawn_all_nocb_kthreads(int cpu)
|
||||
|
||||
if (rcu_scheduler_fully_active)
|
||||
for_each_rcu_flavor(rsp)
|
||||
rcu_spawn_one_nocb_kthread(rsp, cpu);
|
||||
rcu_spawn_one_nocb_kthread(cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2520,7 +2520,7 @@ module_param(rcu_nocb_leader_stride, int, 0444);
|
||||
/*
|
||||
* Initialize leader-follower relationships for all no-CBs CPU.
|
||||
*/
|
||||
static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp)
|
||||
static void __init rcu_organize_nocb_kthreads(void)
|
||||
{
|
||||
int cpu;
|
||||
int ls = rcu_nocb_leader_stride;
|
||||
@ -2579,7 +2579,7 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
|
||||
|
||||
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
|
||||
|
||||
static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
|
||||
static bool rcu_nocb_cpu_needs_barrier(int cpu)
|
||||
{
|
||||
WARN_ON_ONCE(1); /* Should be dead code. */
|
||||
return false;
|
||||
@ -2648,12 +2648,12 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
|
||||
* This code relies on the fact that all NO_HZ_FULL CPUs are also
|
||||
* CONFIG_RCU_NOCB_CPU CPUs.
|
||||
*/
|
||||
static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
|
||||
static bool rcu_nohz_full_cpu(void)
|
||||
{
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
if (tick_nohz_full_cpu(smp_processor_id()) &&
|
||||
(!rcu_gp_in_progress() ||
|
||||
ULONG_CMP_LT(jiffies, READ_ONCE(rsp->gp_start) + HZ)))
|
||||
ULONG_CMP_LT(jiffies, READ_ONCE(rcu_state.gp_start) + HZ)))
|
||||
return true;
|
||||
#endif /* #ifdef CONFIG_NO_HZ_FULL */
|
||||
return false;
|
||||
|
Loading…
Reference in New Issue
Block a user