mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-17 17:24:17 +08:00
b027789e5e
Kevin is reporting crashes which point to a use-after-free of a cfs_rq in update_blocked_averages(). Initial debugging revealed that we've live cfs_rq's (on_list=1) in an about to be kfree()'d task group in free_fair_sched_group(). However, it was unclear how that can happen. His kernel config happened to lead to a layout of struct sched_entity that put the 'my_q' member directly into the middle of the object which makes it incidentally overlap with SLUB's freelist pointer. That, in combination with SLAB_FREELIST_HARDENED's freelist pointer mangling, leads to a reliable access violation in form of a #GP which made the UAF fail fast. Michal seems to have run into the same issue[1]. He already correctly diagnosed that commita7b359fc6a
("sched/fair: Correctly insert cfs_rq's to list on unthrottle") is causing the preconditions for the UAF to happen by re-adding cfs_rq's also to task groups that have no more running tasks, i.e. also to dead ones. His analysis, however, misses the real root cause and it cannot be seen from the crash backtrace only, as the real offender is tg_unthrottle_up() getting called via sched_cfs_period_timer() via the timer interrupt at an inconvenient time. When unregister_fair_sched_group() unlinks all cfs_rq's from the dying task group, it doesn't protect itself from getting interrupted. If the timer interrupt triggers while we iterate over all CPUs or after unregister_fair_sched_group() has finished but prior to unlinking the task group, sched_cfs_period_timer() will execute and walk the list of task groups, trying to unthrottle cfs_rq's, i.e. re-add them to the dying task group. These will later -- in free_fair_sched_group() -- be kfree()'ed while still being linked, leading to the fireworks Kevin and Michal are seeing. To fix this race, ensure the dying task group gets unlinked first. However, simply switching the order of unregistering and unlinking the task group isn't sufficient, as concurrent RCU walkers might still see it, as can be seen below: CPU1: CPU2: : timer IRQ: : do_sched_cfs_period_timer(): : : : distribute_cfs_runtime(): : rcu_read_lock(); : : : unthrottle_cfs_rq(): sched_offline_group(): : : walk_tg_tree_from(…,tg_unthrottle_up,…): list_del_rcu(&tg->list); : (1) : list_for_each_entry_rcu(child, &parent->children, siblings) : : (2) list_del_rcu(&tg->siblings); : : tg_unthrottle_up(): unregister_fair_sched_group(): struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; : : list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); : : : : if (!cfs_rq_is_decayed(cfs_rq) || cfs_rq->nr_running) (3) : list_add_leaf_cfs_rq(cfs_rq); : : : : : : : : : : (4) : rcu_read_unlock(); CPU 2 walks the task group list in parallel to sched_offline_group(), specifically, it'll read the soon to be unlinked task group entry at (1). Unlinking it on CPU 1 at (2) therefore won't prevent CPU 2 from still passing it on to tg_unthrottle_up(). CPU 1 now tries to unlink all cfs_rq's via list_del_leaf_cfs_rq() in unregister_fair_sched_group(). Meanwhile CPU 2 will re-add some of these at (3), which is the cause of the UAF later on. To prevent this additional race from happening, we need to wait until walk_tg_tree_from() has finished traversing the task groups, i.e. after the RCU read critical section ends in (4). Afterwards we're safe to call unregister_fair_sched_group(), as each new walk won't see the dying task group any more. On top of that, we need to wait yet another RCU grace period after unregister_fair_sched_group() to ensure print_cfs_stats(), which might run concurrently, always sees valid objects, i.e. not already free'd ones. This patch survives Michal's reproducer[2] for 8h+ now, which used to trigger within minutes before. [1] https://lore.kernel.org/lkml/20211011172236.11223-1-mkoutny@suse.com/ [2] https://lore.kernel.org/lkml/20211102160228.GA57072@blackbody.suse.cz/ Fixes:a7b359fc6a
("sched/fair: Correctly insert cfs_rq's to list on unthrottle") [peterz: shuffle code around a bit] Reported-by: Kevin Tanguy <kevin.tanguy@corp.ovh.com> Signed-off-by: Mathias Krause <minipli@grsecurity.net> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
269 lines
6.4 KiB
C
269 lines
6.4 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Auto-group scheduling implementation:
|
|
*/
|
|
#include <linux/nospec.h>
|
|
#include "sched.h"
|
|
|
|
unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
|
|
static struct autogroup autogroup_default;
|
|
static atomic_t autogroup_seq_nr;
|
|
|
|
void __init autogroup_init(struct task_struct *init_task)
|
|
{
|
|
autogroup_default.tg = &root_task_group;
|
|
kref_init(&autogroup_default.kref);
|
|
init_rwsem(&autogroup_default.lock);
|
|
init_task->signal->autogroup = &autogroup_default;
|
|
}
|
|
|
|
void autogroup_free(struct task_group *tg)
|
|
{
|
|
kfree(tg->autogroup);
|
|
}
|
|
|
|
static inline void autogroup_destroy(struct kref *kref)
|
|
{
|
|
struct autogroup *ag = container_of(kref, struct autogroup, kref);
|
|
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
/* We've redirected RT tasks to the root task group... */
|
|
ag->tg->rt_se = NULL;
|
|
ag->tg->rt_rq = NULL;
|
|
#endif
|
|
sched_release_group(ag->tg);
|
|
sched_destroy_group(ag->tg);
|
|
}
|
|
|
|
static inline void autogroup_kref_put(struct autogroup *ag)
|
|
{
|
|
kref_put(&ag->kref, autogroup_destroy);
|
|
}
|
|
|
|
static inline struct autogroup *autogroup_kref_get(struct autogroup *ag)
|
|
{
|
|
kref_get(&ag->kref);
|
|
return ag;
|
|
}
|
|
|
|
static inline struct autogroup *autogroup_task_get(struct task_struct *p)
|
|
{
|
|
struct autogroup *ag;
|
|
unsigned long flags;
|
|
|
|
if (!lock_task_sighand(p, &flags))
|
|
return autogroup_kref_get(&autogroup_default);
|
|
|
|
ag = autogroup_kref_get(p->signal->autogroup);
|
|
unlock_task_sighand(p, &flags);
|
|
|
|
return ag;
|
|
}
|
|
|
|
static inline struct autogroup *autogroup_create(void)
|
|
{
|
|
struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL);
|
|
struct task_group *tg;
|
|
|
|
if (!ag)
|
|
goto out_fail;
|
|
|
|
tg = sched_create_group(&root_task_group);
|
|
if (IS_ERR(tg))
|
|
goto out_free;
|
|
|
|
kref_init(&ag->kref);
|
|
init_rwsem(&ag->lock);
|
|
ag->id = atomic_inc_return(&autogroup_seq_nr);
|
|
ag->tg = tg;
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
/*
|
|
* Autogroup RT tasks are redirected to the root task group
|
|
* so we don't have to move tasks around upon policy change,
|
|
* or flail around trying to allocate bandwidth on the fly.
|
|
* A bandwidth exception in __sched_setscheduler() allows
|
|
* the policy change to proceed.
|
|
*/
|
|
free_rt_sched_group(tg);
|
|
tg->rt_se = root_task_group.rt_se;
|
|
tg->rt_rq = root_task_group.rt_rq;
|
|
#endif
|
|
tg->autogroup = ag;
|
|
|
|
sched_online_group(tg, &root_task_group);
|
|
return ag;
|
|
|
|
out_free:
|
|
kfree(ag);
|
|
out_fail:
|
|
if (printk_ratelimit()) {
|
|
printk(KERN_WARNING "autogroup_create: %s failure.\n",
|
|
ag ? "sched_create_group()" : "kzalloc()");
|
|
}
|
|
|
|
return autogroup_kref_get(&autogroup_default);
|
|
}
|
|
|
|
bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
|
|
{
|
|
if (tg != &root_task_group)
|
|
return false;
|
|
/*
|
|
* If we race with autogroup_move_group() the caller can use the old
|
|
* value of signal->autogroup but in this case sched_move_task() will
|
|
* be called again before autogroup_kref_put().
|
|
*
|
|
* However, there is no way sched_autogroup_exit_task() could tell us
|
|
* to avoid autogroup->tg, so we abuse PF_EXITING flag for this case.
|
|
*/
|
|
if (p->flags & PF_EXITING)
|
|
return false;
|
|
|
|
return true;
|
|
}
|
|
|
|
void sched_autogroup_exit_task(struct task_struct *p)
|
|
{
|
|
/*
|
|
* We are going to call exit_notify() and autogroup_move_group() can't
|
|
* see this thread after that: we can no longer use signal->autogroup.
|
|
* See the PF_EXITING check in task_wants_autogroup().
|
|
*/
|
|
sched_move_task(p);
|
|
}
|
|
|
|
static void
|
|
autogroup_move_group(struct task_struct *p, struct autogroup *ag)
|
|
{
|
|
struct autogroup *prev;
|
|
struct task_struct *t;
|
|
unsigned long flags;
|
|
|
|
BUG_ON(!lock_task_sighand(p, &flags));
|
|
|
|
prev = p->signal->autogroup;
|
|
if (prev == ag) {
|
|
unlock_task_sighand(p, &flags);
|
|
return;
|
|
}
|
|
|
|
p->signal->autogroup = autogroup_kref_get(ag);
|
|
/*
|
|
* We can't avoid sched_move_task() after we changed signal->autogroup,
|
|
* this process can already run with task_group() == prev->tg or we can
|
|
* race with cgroup code which can read autogroup = prev under rq->lock.
|
|
* In the latter case for_each_thread() can not miss a migrating thread,
|
|
* cpu_cgroup_attach() must not be possible after cgroup_exit() and it
|
|
* can't be removed from thread list, we hold ->siglock.
|
|
*
|
|
* If an exiting thread was already removed from thread list we rely on
|
|
* sched_autogroup_exit_task().
|
|
*/
|
|
for_each_thread(p, t)
|
|
sched_move_task(t);
|
|
|
|
unlock_task_sighand(p, &flags);
|
|
autogroup_kref_put(prev);
|
|
}
|
|
|
|
/* Allocates GFP_KERNEL, cannot be called under any spinlock: */
|
|
void sched_autogroup_create_attach(struct task_struct *p)
|
|
{
|
|
struct autogroup *ag = autogroup_create();
|
|
|
|
autogroup_move_group(p, ag);
|
|
|
|
/* Drop extra reference added by autogroup_create(): */
|
|
autogroup_kref_put(ag);
|
|
}
|
|
EXPORT_SYMBOL(sched_autogroup_create_attach);
|
|
|
|
/* Cannot be called under siglock. Currently has no users: */
|
|
void sched_autogroup_detach(struct task_struct *p)
|
|
{
|
|
autogroup_move_group(p, &autogroup_default);
|
|
}
|
|
EXPORT_SYMBOL(sched_autogroup_detach);
|
|
|
|
void sched_autogroup_fork(struct signal_struct *sig)
|
|
{
|
|
sig->autogroup = autogroup_task_get(current);
|
|
}
|
|
|
|
void sched_autogroup_exit(struct signal_struct *sig)
|
|
{
|
|
autogroup_kref_put(sig->autogroup);
|
|
}
|
|
|
|
static int __init setup_autogroup(char *str)
|
|
{
|
|
sysctl_sched_autogroup_enabled = 0;
|
|
|
|
return 1;
|
|
}
|
|
__setup("noautogroup", setup_autogroup);
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
|
|
int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
|
|
{
|
|
static unsigned long next = INITIAL_JIFFIES;
|
|
struct autogroup *ag;
|
|
unsigned long shares;
|
|
int err, idx;
|
|
|
|
if (nice < MIN_NICE || nice > MAX_NICE)
|
|
return -EINVAL;
|
|
|
|
err = security_task_setnice(current, nice);
|
|
if (err)
|
|
return err;
|
|
|
|
if (nice < 0 && !can_nice(current, nice))
|
|
return -EPERM;
|
|
|
|
/* This is a heavy operation, taking global locks.. */
|
|
if (!capable(CAP_SYS_ADMIN) && time_before(jiffies, next))
|
|
return -EAGAIN;
|
|
|
|
next = HZ / 10 + jiffies;
|
|
ag = autogroup_task_get(p);
|
|
|
|
idx = array_index_nospec(nice + 20, 40);
|
|
shares = scale_load(sched_prio_to_weight[idx]);
|
|
|
|
down_write(&ag->lock);
|
|
err = sched_group_set_shares(ag->tg, shares);
|
|
if (!err)
|
|
ag->nice = nice;
|
|
up_write(&ag->lock);
|
|
|
|
autogroup_kref_put(ag);
|
|
|
|
return err;
|
|
}
|
|
|
|
void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m)
|
|
{
|
|
struct autogroup *ag = autogroup_task_get(p);
|
|
|
|
if (!task_group_is_autogroup(ag->tg))
|
|
goto out;
|
|
|
|
down_read(&ag->lock);
|
|
seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice);
|
|
up_read(&ag->lock);
|
|
|
|
out:
|
|
autogroup_kref_put(ag);
|
|
}
|
|
#endif /* CONFIG_PROC_FS */
|
|
|
|
int autogroup_path(struct task_group *tg, char *buf, int buflen)
|
|
{
|
|
if (!task_group_is_autogroup(tg))
|
|
return 0;
|
|
|
|
return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id);
|
|
}
|