Merge branch 'printk-rework' into for-linus

This commit is contained in:
Petr Mladek 2021-06-29 09:53:17 +02:00
commit 94f2be50ba
3 changed files with 159 additions and 36 deletions

View File

@ -282,6 +282,47 @@ static inline void printk_safe_flush_on_panic(void)
}
#endif
#ifdef CONFIG_SMP
extern int __printk_cpu_trylock(void);
extern void __printk_wait_on_cpu_lock(void);
extern void __printk_cpu_unlock(void);
/**
* printk_cpu_lock_irqsave() - Acquire the printk cpu-reentrant spinning
* lock and disable interrupts.
* @flags: Stack-allocated storage for saving local interrupt state,
* to be passed to printk_cpu_unlock_irqrestore().
*
* If the lock is owned by another CPU, spin until it becomes available.
* Interrupts are restored while spinning.
*/
#define printk_cpu_lock_irqsave(flags) \
for (;;) { \
local_irq_save(flags); \
if (__printk_cpu_trylock()) \
break; \
local_irq_restore(flags); \
__printk_wait_on_cpu_lock(); \
}
/**
* printk_cpu_unlock_irqrestore() - Release the printk cpu-reentrant spinning
* lock and restore interrupts.
* @flags: Caller's saved interrupt state, from printk_cpu_lock_irqsave().
*/
#define printk_cpu_unlock_irqrestore(flags) \
do { \
__printk_cpu_unlock(); \
local_irq_restore(flags); \
} while (0) \
#else
#define printk_cpu_lock_irqsave(flags) ((void)flags)
#define printk_cpu_unlock_irqrestore(flags) ((void)flags)
#endif /* CONFIG_SMP */
extern int kptr_restrict;
/**

View File

@ -3531,3 +3531,119 @@ void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
#endif
#ifdef CONFIG_SMP
static atomic_t printk_cpulock_owner = ATOMIC_INIT(-1);
static atomic_t printk_cpulock_nested = ATOMIC_INIT(0);
/**
* __printk_wait_on_cpu_lock() - Busy wait until the printk cpu-reentrant
* spinning lock is not owned by any CPU.
*
* Context: Any context.
*/
void __printk_wait_on_cpu_lock(void)
{
do {
cpu_relax();
} while (atomic_read(&printk_cpulock_owner) != -1);
}
EXPORT_SYMBOL(__printk_wait_on_cpu_lock);
/**
* __printk_cpu_trylock() - Try to acquire the printk cpu-reentrant
* spinning lock.
*
* If no processor has the lock, the calling processor takes the lock and
* becomes the owner. If the calling processor is already the owner of the
* lock, this function succeeds immediately.
*
* Context: Any context. Expects interrupts to be disabled.
* Return: 1 on success, otherwise 0.
*/
int __printk_cpu_trylock(void)
{
int cpu;
int old;
cpu = smp_processor_id();
/*
* Guarantee loads and stores from this CPU when it is the lock owner
* are _not_ visible to the previous lock owner. This pairs with
* __printk_cpu_unlock:B.
*
* Memory barrier involvement:
*
* If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B, then
* __printk_cpu_unlock:A can never read from __printk_cpu_trylock:B.
*
* Relies on:
*
* RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B
* of the previous CPU
* matching
* ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B
* of this CPU
*/
old = atomic_cmpxchg_acquire(&printk_cpulock_owner, -1,
cpu); /* LMM(__printk_cpu_trylock:A) */
if (old == -1) {
/*
* This CPU is now the owner and begins loading/storing
* data: LMM(__printk_cpu_trylock:B)
*/
return 1;
} else if (old == cpu) {
/* This CPU is already the owner. */
atomic_inc(&printk_cpulock_nested);
return 1;
}
return 0;
}
EXPORT_SYMBOL(__printk_cpu_trylock);
/**
* __printk_cpu_unlock() - Release the printk cpu-reentrant spinning lock.
*
* The calling processor must be the owner of the lock.
*
* Context: Any context. Expects interrupts to be disabled.
*/
void __printk_cpu_unlock(void)
{
if (atomic_read(&printk_cpulock_nested)) {
atomic_dec(&printk_cpulock_nested);
return;
}
/*
* This CPU is finished loading/storing data:
* LMM(__printk_cpu_unlock:A)
*/
/*
* Guarantee loads and stores from this CPU when it was the
* lock owner are visible to the next lock owner. This pairs
* with __printk_cpu_trylock:A.
*
* Memory barrier involvement:
*
* If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B,
* then __printk_cpu_trylock:B reads from __printk_cpu_unlock:A.
*
* Relies on:
*
* RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B
* of this CPU
* matching
* ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B
* of the next CPU
*/
atomic_set_release(&printk_cpulock_owner,
-1); /* LMM(__printk_cpu_unlock:B) */
}
EXPORT_SYMBOL(__printk_cpu_unlock);
#endif /* CONFIG_SMP */

View File

@ -84,50 +84,16 @@ static void __dump_stack(void)
*
* Architectures can override this implementation by implementing its own.
*/
#ifdef CONFIG_SMP
static atomic_t dump_lock = ATOMIC_INIT(-1);
asmlinkage __visible void dump_stack(void)
{
unsigned long flags;
int was_locked;
int old;
int cpu;
/*
* Permit this cpu to perform nested stack dumps while serialising
* against other CPUs
*/
retry:
local_irq_save(flags);
cpu = smp_processor_id();
old = atomic_cmpxchg(&dump_lock, -1, cpu);
if (old == -1) {
was_locked = 0;
} else if (old == cpu) {
was_locked = 1;
} else {
local_irq_restore(flags);
/*
* Wait for the lock to release before jumping to
* atomic_cmpxchg() in order to mitigate the thundering herd
* problem.
*/
do { cpu_relax(); } while (atomic_read(&dump_lock) != -1);
goto retry;
}
printk_cpu_lock_irqsave(flags);
__dump_stack();
if (!was_locked)
atomic_set(&dump_lock, -1);
local_irq_restore(flags);
printk_cpu_unlock_irqrestore(flags);
}
#else
asmlinkage __visible void dump_stack(void)
{
__dump_stack();
}
#endif
EXPORT_SYMBOL(dump_stack);