mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-15 15:04:27 +08:00
4f64a6c9f6
Syzkaller triggered a WARN in put_pmu_ctx().
WARNING: CPU: 1 PID: 2245 at kernel/events/core.c:4925 put_pmu_ctx+0x1f0/0x278
This is because there is no locking around the access of "if
(!epc->ctx)" in find_get_pmu_context() and when it is set to NULL in
put_pmu_ctx().
The decrement of the reference count in put_pmu_ctx() also happens
outside of the spinlock, leading to the possibility of this order of
events, and the context being cleared in put_pmu_ctx(), after its
refcount is non zero:
CPU0 CPU1
find_get_pmu_context()
if (!epc->ctx) == false
put_pmu_ctx()
atomic_dec_and_test(&epc->refcount) == true
epc->refcount == 0
atomic_inc(&epc->refcount);
epc->refcount == 1
list_del_init(&epc->pmu_ctx_entry);
epc->ctx = NULL;
Another issue is that WARN_ON for no active PMU events in put_pmu_ctx()
is outside of the lock. If the perf_event_pmu_context is an embedded
one, even after clearing it, it won't be deleted and can be re-used. So
the warning can trigger. For this reason it also needs to be moved
inside the lock.
The above warning is very quick to trigger on Arm by running these two
commands at the same time:
while true; do perf record -- ls; done
while true; do perf record -- ls; done
[peterz: atomic_dec_and_raw_lock*()]
Fixes: bd27568117
("perf: Rewrite core context handling")
Reported-by: syzbot+697196bc0265049822bd@syzkaller.appspotmail.com
Signed-off-by: James Clark <james.clark@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Ravi Bangoria <ravi.bangoria@amd.com>
Link: https://lore.kernel.org/r/20230127143141.1782804-2-james.clark@arm.com
83 lines
2.0 KiB
C
83 lines
2.0 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <linux/export.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/atomic.h>
|
|
|
|
/*
|
|
* This is an implementation of the notion of "decrement a
|
|
* reference count, and return locked if it decremented to zero".
|
|
*
|
|
* NOTE NOTE NOTE! This is _not_ equivalent to
|
|
*
|
|
* if (atomic_dec_and_test(&atomic)) {
|
|
* spin_lock(&lock);
|
|
* return 1;
|
|
* }
|
|
* return 0;
|
|
*
|
|
* because the spin-lock and the decrement must be
|
|
* "atomic".
|
|
*/
|
|
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
|
|
{
|
|
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
|
|
if (atomic_add_unless(atomic, -1, 1))
|
|
return 0;
|
|
|
|
/* Otherwise do it the slow way */
|
|
spin_lock(lock);
|
|
if (atomic_dec_and_test(atomic))
|
|
return 1;
|
|
spin_unlock(lock);
|
|
return 0;
|
|
}
|
|
|
|
EXPORT_SYMBOL(_atomic_dec_and_lock);
|
|
|
|
int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
|
|
unsigned long *flags)
|
|
{
|
|
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
|
|
if (atomic_add_unless(atomic, -1, 1))
|
|
return 0;
|
|
|
|
/* Otherwise do it the slow way */
|
|
spin_lock_irqsave(lock, *flags);
|
|
if (atomic_dec_and_test(atomic))
|
|
return 1;
|
|
spin_unlock_irqrestore(lock, *flags);
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);
|
|
|
|
int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock)
|
|
{
|
|
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
|
|
if (atomic_add_unless(atomic, -1, 1))
|
|
return 0;
|
|
|
|
/* Otherwise do it the slow way */
|
|
raw_spin_lock(lock);
|
|
if (atomic_dec_and_test(atomic))
|
|
return 1;
|
|
raw_spin_unlock(lock);
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(_atomic_dec_and_raw_lock);
|
|
|
|
int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
|
|
unsigned long *flags)
|
|
{
|
|
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
|
|
if (atomic_add_unless(atomic, -1, 1))
|
|
return 0;
|
|
|
|
/* Otherwise do it the slow way */
|
|
raw_spin_lock_irqsave(lock, *flags);
|
|
if (atomic_dec_and_test(atomic))
|
|
return 1;
|
|
raw_spin_unlock_irqrestore(lock, *flags);
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(_atomic_dec_and_raw_lock_irqsave);
|