mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 20:54:10 +08:00
Merge branch 'urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu
Pull RCU fixes from Paul McKenney: - fix regressions induced by a merge-window change in scheduler semantics, which means that smp_processor_id() can no longer be used in kthreads using simple affinity to bind themselves to a specific CPU. - fix a bug in Tasks Trace RCU that was thought to be strictly theoretical. However, production workloads have started hitting this, so these fixes need to be merged sooner rather than later. - fix a minor printk()-format-mismatch issue introduced during the merge window. * 'urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu: rcu: Fix pr_info() formats and values in show_rcu_gp_kthreads() rcu-tasks: Don't delete holdouts within trc_wait_for_one_reader() rcu-tasks: Don't delete holdouts within trc_inspect_reader() refscale: Avoid false-positive warnings in ref_scale_reader() scftorture: Avoid false-positive warnings in scftorture_invoker()
This commit is contained in:
commit
6e442d0662
@ -487,13 +487,13 @@ ref_scale_reader(void *arg)
|
||||
s64 duration;
|
||||
|
||||
VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: task started", me);
|
||||
set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
|
||||
WARN_ON_ONCE(set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)));
|
||||
set_user_nice(current, MAX_NICE);
|
||||
atomic_inc(&n_init);
|
||||
if (holdoff)
|
||||
schedule_timeout_interruptible(holdoff * HZ);
|
||||
repeat:
|
||||
VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: waiting to start next experiment on cpu %d", me, smp_processor_id());
|
||||
VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: waiting to start next experiment on cpu %d", me, raw_smp_processor_id());
|
||||
|
||||
// Wait for signal that this reader can start.
|
||||
wait_event(rt->wq, (atomic_read(&nreaders_exp) && smp_load_acquire(&rt->start_reader)) ||
|
||||
@ -503,7 +503,7 @@ repeat:
|
||||
goto end;
|
||||
|
||||
// Make sure that the CPU is affinitized appropriately during testing.
|
||||
WARN_ON_ONCE(smp_processor_id() != me);
|
||||
WARN_ON_ONCE(raw_smp_processor_id() != me);
|
||||
|
||||
WRITE_ONCE(rt->start_reader, 0);
|
||||
if (!atomic_dec_return(&n_started))
|
||||
|
@ -953,10 +953,9 @@ static bool trc_inspect_reader(struct task_struct *t, void *arg)
|
||||
in_qs = likely(!t->trc_reader_nesting);
|
||||
}
|
||||
|
||||
// Mark as checked. Because this is called from the grace-period
|
||||
// kthread, also remove the task from the holdout list.
|
||||
// Mark as checked so that the grace-period kthread will
|
||||
// remove it from the holdout list.
|
||||
t->trc_reader_checked = true;
|
||||
trc_del_holdout(t);
|
||||
|
||||
if (in_qs)
|
||||
return true; // Already in quiescent state, done!!!
|
||||
@ -983,7 +982,6 @@ static void trc_wait_for_one_reader(struct task_struct *t,
|
||||
// The current task had better be in a quiescent state.
|
||||
if (t == current) {
|
||||
t->trc_reader_checked = true;
|
||||
trc_del_holdout(t);
|
||||
WARN_ON_ONCE(t->trc_reader_nesting);
|
||||
return;
|
||||
}
|
||||
|
@ -795,9 +795,9 @@ void show_rcu_gp_kthreads(void)
|
||||
jr = j - data_race(rcu_state.gp_req_activity);
|
||||
js = j - data_race(rcu_state.gp_start);
|
||||
jw = j - data_race(rcu_state.gp_wake_time);
|
||||
pr_info("%s: wait state: %s(%d) ->state: %#lx ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n",
|
||||
pr_info("%s: wait state: %s(%d) ->state: %#x ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n",
|
||||
rcu_state.name, gp_state_getname(rcu_state.gp_state),
|
||||
rcu_state.gp_state, t ? t->__state : 0x1ffffL, t ? t->rt_priority : 0xffU,
|
||||
rcu_state.gp_state, t ? t->__state : 0x1ffff, t ? t->rt_priority : 0xffU,
|
||||
js, ja, jr, jw, (long)data_race(rcu_state.gp_wake_seq),
|
||||
(long)data_race(rcu_state.gp_seq),
|
||||
(long)data_race(rcu_get_root()->gp_seq_needed),
|
||||
|
@ -405,15 +405,15 @@ static int scftorture_invoker(void *arg)
|
||||
|
||||
VERBOSE_SCFTORTOUT("scftorture_invoker %d: task started", scfp->cpu);
|
||||
cpu = scfp->cpu % nr_cpu_ids;
|
||||
set_cpus_allowed_ptr(current, cpumask_of(cpu));
|
||||
WARN_ON_ONCE(set_cpus_allowed_ptr(current, cpumask_of(cpu)));
|
||||
set_user_nice(current, MAX_NICE);
|
||||
if (holdoff)
|
||||
schedule_timeout_interruptible(holdoff * HZ);
|
||||
|
||||
VERBOSE_SCFTORTOUT("scftorture_invoker %d: Waiting for all SCF torturers from cpu %d", scfp->cpu, smp_processor_id());
|
||||
VERBOSE_SCFTORTOUT("scftorture_invoker %d: Waiting for all SCF torturers from cpu %d", scfp->cpu, raw_smp_processor_id());
|
||||
|
||||
// Make sure that the CPU is affinitized appropriately during testing.
|
||||
curcpu = smp_processor_id();
|
||||
curcpu = raw_smp_processor_id();
|
||||
WARN_ONCE(curcpu != scfp->cpu % nr_cpu_ids,
|
||||
"%s: Wanted CPU %d, running on %d, nr_cpu_ids = %d\n",
|
||||
__func__, scfp->cpu, curcpu, nr_cpu_ids);
|
||||
|
Loading…
Reference in New Issue
Block a user