mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-02 00:24:12 +08:00
sched: Simplify set_affinity_pending refcounts
Now that we have set_affinity_pending::stop_pending to indicate if a
stopper is in progress, and we have the guarantee that if that stopper
exists, it will (eventually) complete our @pending we can simplify the
refcount scheme by no longer counting the stopper thread.
Fixes: 6d337eab04
("sched: Fix migrate_disable() vs set_cpus_allowed_ptr()")
Cc: stable@kernel.org
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
Link: https://lkml.kernel.org/r/20210224131355.724130207@infradead.org
This commit is contained in:
parent
9e81889c76
commit
50caf9c14b
@ -1862,6 +1862,10 @@ struct migration_arg {
|
||||
struct set_affinity_pending *pending;
|
||||
};
|
||||
|
||||
/*
|
||||
* @refs: number of wait_for_completion()
|
||||
* @stop_pending: is @stop_work in use
|
||||
*/
|
||||
struct set_affinity_pending {
|
||||
refcount_t refs;
|
||||
unsigned int stop_pending;
|
||||
@ -1997,10 +2001,6 @@ out:
|
||||
if (complete)
|
||||
complete_all(&pending->done);
|
||||
|
||||
/* For pending->{arg,stop_work} */
|
||||
if (pending && refcount_dec_and_test(&pending->refs))
|
||||
wake_up_var(&pending->refs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2199,12 +2199,16 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
|
||||
push_task = get_task_struct(p);
|
||||
}
|
||||
|
||||
/*
|
||||
* If there are pending waiters, but no pending stop_work,
|
||||
* then complete now.
|
||||
*/
|
||||
pending = p->migration_pending;
|
||||
if (pending) {
|
||||
refcount_inc(&pending->refs);
|
||||
if (pending && !pending->stop_pending) {
|
||||
p->migration_pending = NULL;
|
||||
complete = true;
|
||||
}
|
||||
|
||||
task_rq_unlock(rq, p, rf);
|
||||
|
||||
if (push_task) {
|
||||
@ -2213,7 +2217,7 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
|
||||
}
|
||||
|
||||
if (complete)
|
||||
goto do_complete;
|
||||
complete_all(&pending->done);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2264,9 +2268,9 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
|
||||
if (!stop_pending)
|
||||
pending->stop_pending = true;
|
||||
|
||||
refcount_inc(&pending->refs); /* pending->{arg,stop_work} */
|
||||
if (flags & SCA_MIGRATE_ENABLE)
|
||||
p->migration_flags &= ~MDF_PUSH;
|
||||
|
||||
task_rq_unlock(rq, p, rf);
|
||||
|
||||
if (!stop_pending) {
|
||||
@ -2282,12 +2286,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
|
||||
if (task_on_rq_queued(p))
|
||||
rq = move_queued_task(rq, rf, p, dest_cpu);
|
||||
|
||||
p->migration_pending = NULL;
|
||||
complete = true;
|
||||
if (!pending->stop_pending) {
|
||||
p->migration_pending = NULL;
|
||||
complete = true;
|
||||
}
|
||||
}
|
||||
task_rq_unlock(rq, p, rf);
|
||||
|
||||
do_complete:
|
||||
if (complete)
|
||||
complete_all(&pending->done);
|
||||
}
|
||||
@ -2295,7 +2300,7 @@ do_complete:
|
||||
wait_for_completion(&pending->done);
|
||||
|
||||
if (refcount_dec_and_test(&pending->refs))
|
||||
wake_up_var(&pending->refs);
|
||||
wake_up_var(&pending->refs); /* No UaF, just an address */
|
||||
|
||||
/*
|
||||
* Block the original owner of &pending until all subsequent callers
|
||||
@ -2303,6 +2308,9 @@ do_complete:
|
||||
*/
|
||||
wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
|
||||
|
||||
/* ARGH */
|
||||
WARN_ON_ONCE(my_pending.stop_pending);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user