2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-24 05:04:00 +08:00
linux-next/kernel/locking/locktorture.c

1054 lines
28 KiB
C
Raw Normal View History

/*
* Module-based torture test facility for locking
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* Copyright (C) IBM Corporation, 2014
*
* Authors: Paul E. McKenney <paulmck@us.ibm.com>
* Davidlohr Bueso <dave@stgolabs.net>
* Based on kernel/rcu/torture.c.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/sched/rt.h>
#include <linux/spinlock.h>
#include <linux/rwlock.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <uapi/linux/sched/types.h>
#include <linux/rtmutex.h>
#include <linux/atomic.h>
#include <linux/moduleparam.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/percpu-rwsem.h>
#include <linux/torture.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
torture_param(int, nwriters_stress, -1,
"Number of write-locking stress-test threads");
torture_param(int, nreaders_stress, -1,
"Number of read-locking stress-test threads");
torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
torture_param(int, onoff_interval, 0,
"Time between CPU hotplugs (s), 0=disable");
torture_param(int, shuffle_interval, 3,
"Number of jiffies between shuffles, 0=disable");
torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
torture_param(int, stat_interval, 60,
"Number of seconds between stats printk()s");
torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
torture_param(bool, verbose, true,
"Enable verbose debugging printk()s");
static char *torture_type = "spin_lock";
module_param(torture_type, charp, 0444);
MODULE_PARM_DESC(torture_type,
"Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
static struct task_struct *stats_task;
static struct task_struct **writer_tasks;
static struct task_struct **reader_tasks;
static bool lock_is_write_held;
static bool lock_is_read_held;
struct lock_stress_stats {
long n_lock_fail;
long n_lock_acquired;
};
/* Forward reference. */
static void lock_torture_cleanup(void);
/*
* Operations vector for selecting different types of tests.
*/
struct lock_torture_ops {
void (*init)(void);
int (*writelock)(void);
void (*write_delay)(struct torture_random_state *trsp);
void (*task_boost)(struct torture_random_state *trsp);
void (*writeunlock)(void);
int (*readlock)(void);
void (*read_delay)(struct torture_random_state *trsp);
void (*readunlock)(void);
unsigned long flags; /* for irq spinlocks */
const char *name;
};
struct lock_torture_cxt {
int nrealwriters_stress;
int nrealreaders_stress;
bool debug_lock;
atomic_t n_lock_torture_errors;
struct lock_torture_ops *cur_ops;
struct lock_stress_stats *lwsa; /* writer statistics */
struct lock_stress_stats *lrsa; /* reader statistics */
};
static struct lock_torture_cxt cxt = { 0, 0, false,
ATOMIC_INIT(0),
NULL, NULL};
/*
* Definitions for lock torture testing.
*/
static int torture_lock_busted_write_lock(void)
{
return 0; /* BUGGY, do not use in real life!!! */
}
static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
{
const unsigned long longdelay_ms = 100;
/* We want a long delay occasionally to force massive contention. */
if (!(torture_random(trsp) %
(cxt.nrealwriters_stress * 2000 * longdelay_ms)))
mdelay(longdelay_ms);
if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
torture_preempt_schedule(); /* Allow test to be preempted. */
}
static void torture_lock_busted_write_unlock(void)
{
/* BUGGY, do not use in real life!!! */
}
static void torture_boost_dummy(struct torture_random_state *trsp)
{
/* Only rtmutexes care about priority */
}
static struct lock_torture_ops lock_busted_ops = {
.writelock = torture_lock_busted_write_lock,
.write_delay = torture_lock_busted_write_delay,
.task_boost = torture_boost_dummy,
.writeunlock = torture_lock_busted_write_unlock,
.readlock = NULL,
.read_delay = NULL,
.readunlock = NULL,
.name = "lock_busted"
};
static DEFINE_SPINLOCK(torture_spinlock);
static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock)
{
spin_lock(&torture_spinlock);
return 0;
}
static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
{
const unsigned long shortdelay_us = 2;
const unsigned long longdelay_ms = 100;
/* We want a short delay mostly to emulate likely code, and
* we want a long delay occasionally to force massive contention.
*/
if (!(torture_random(trsp) %
(cxt.nrealwriters_stress * 2000 * longdelay_ms)))
mdelay(longdelay_ms);
if (!(torture_random(trsp) %
(cxt.nrealwriters_stress * 2 * shortdelay_us)))
udelay(shortdelay_us);
if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
torture_preempt_schedule(); /* Allow test to be preempted. */
}
static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
{
spin_unlock(&torture_spinlock);
}
static struct lock_torture_ops spin_lock_ops = {
.writelock = torture_spin_lock_write_lock,
.write_delay = torture_spin_lock_write_delay,
.task_boost = torture_boost_dummy,
.writeunlock = torture_spin_lock_write_unlock,
.readlock = NULL,
.read_delay = NULL,
.readunlock = NULL,
.name = "spin_lock"
};
static int torture_spin_lock_write_lock_irq(void)
__acquires(torture_spinlock)
{
unsigned long flags;
spin_lock_irqsave(&torture_spinlock, flags);
cxt.cur_ops->flags = flags;
return 0;
}
static void torture_lock_spin_write_unlock_irq(void)
__releases(torture_spinlock)
{
spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
}
static struct lock_torture_ops spin_lock_irq_ops = {
.writelock = torture_spin_lock_write_lock_irq,
.write_delay = torture_spin_lock_write_delay,
.task_boost = torture_boost_dummy,
.writeunlock = torture_lock_spin_write_unlock_irq,
.readlock = NULL,
.read_delay = NULL,
.readunlock = NULL,
.name = "spin_lock_irq"
};
static DEFINE_RWLOCK(torture_rwlock);
static int torture_rwlock_write_lock(void) __acquires(torture_rwlock)
{
write_lock(&torture_rwlock);
return 0;
}
static void torture_rwlock_write_delay(struct torture_random_state *trsp)
{
const unsigned long shortdelay_us = 2;
const unsigned long longdelay_ms = 100;
/* We want a short delay mostly to emulate likely code, and
* we want a long delay occasionally to force massive contention.
*/
if (!(torture_random(trsp) %
(cxt.nrealwriters_stress * 2000 * longdelay_ms)))
mdelay(longdelay_ms);
else
udelay(shortdelay_us);
}
static void torture_rwlock_write_unlock(void) __releases(torture_rwlock)
{
write_unlock(&torture_rwlock);
}
static int torture_rwlock_read_lock(void) __acquires(torture_rwlock)
{
read_lock(&torture_rwlock);
return 0;
}
static void torture_rwlock_read_delay(struct torture_random_state *trsp)
{
const unsigned long shortdelay_us = 10;
const unsigned long longdelay_ms = 100;
/* We want a short delay mostly to emulate likely code, and
* we want a long delay occasionally to force massive contention.
*/
if (!(torture_random(trsp) %
(cxt.nrealreaders_stress * 2000 * longdelay_ms)))
mdelay(longdelay_ms);
else
udelay(shortdelay_us);
}
static void torture_rwlock_read_unlock(void) __releases(torture_rwlock)
{
read_unlock(&torture_rwlock);
}
static struct lock_torture_ops rw_lock_ops = {
.writelock = torture_rwlock_write_lock,
.write_delay = torture_rwlock_write_delay,
.task_boost = torture_boost_dummy,
.writeunlock = torture_rwlock_write_unlock,
.readlock = torture_rwlock_read_lock,
.read_delay = torture_rwlock_read_delay,
.readunlock = torture_rwlock_read_unlock,
.name = "rw_lock"
};
static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock)
{
unsigned long flags;
write_lock_irqsave(&torture_rwlock, flags);
cxt.cur_ops->flags = flags;
return 0;
}
static void torture_rwlock_write_unlock_irq(void)
__releases(torture_rwlock)
{
write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
}
static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock)
{
unsigned long flags;
read_lock_irqsave(&torture_rwlock, flags);
cxt.cur_ops->flags = flags;
return 0;
}
static void torture_rwlock_read_unlock_irq(void)
__releases(torture_rwlock)
{
read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
}
static struct lock_torture_ops rw_lock_irq_ops = {
.writelock = torture_rwlock_write_lock_irq,
.write_delay = torture_rwlock_write_delay,
.task_boost = torture_boost_dummy,
.writeunlock = torture_rwlock_write_unlock_irq,
.readlock = torture_rwlock_read_lock_irq,
.read_delay = torture_rwlock_read_delay,
.readunlock = torture_rwlock_read_unlock_irq,
.name = "rw_lock_irq"
};
static DEFINE_MUTEX(torture_mutex);
static int torture_mutex_lock(void) __acquires(torture_mutex)
{
mutex_lock(&torture_mutex);
return 0;
}
static void torture_mutex_delay(struct torture_random_state *trsp)
{
const unsigned long longdelay_ms = 100;
/* We want a long delay occasionally to force massive contention. */
if (!(torture_random(trsp) %
(cxt.nrealwriters_stress * 2000 * longdelay_ms)))
mdelay(longdelay_ms * 5);
else
mdelay(longdelay_ms / 5);
if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
torture_preempt_schedule(); /* Allow test to be preempted. */
}
static void torture_mutex_unlock(void) __releases(torture_mutex)
{
mutex_unlock(&torture_mutex);
}
static struct lock_torture_ops mutex_lock_ops = {
.writelock = torture_mutex_lock,
.write_delay = torture_mutex_delay,
.task_boost = torture_boost_dummy,
.writeunlock = torture_mutex_unlock,
.readlock = NULL,
.read_delay = NULL,
.readunlock = NULL,
.name = "mutex_lock"
};
#include <linux/ww_mutex.h>
static DEFINE_WW_CLASS(torture_ww_class);
static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class);
static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class);
static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class);
static int torture_ww_mutex_lock(void)
__acquires(torture_ww_mutex_0)
__acquires(torture_ww_mutex_1)
__acquires(torture_ww_mutex_2)
{
LIST_HEAD(list);
struct reorder_lock {
struct list_head link;
struct ww_mutex *lock;
} locks[3], *ll, *ln;
struct ww_acquire_ctx ctx;
locks[0].lock = &torture_ww_mutex_0;
list_add(&locks[0].link, &list);
locks[1].lock = &torture_ww_mutex_1;
list_add(&locks[1].link, &list);
locks[2].lock = &torture_ww_mutex_2;
list_add(&locks[2].link, &list);
ww_acquire_init(&ctx, &torture_ww_class);
list_for_each_entry(ll, &list, link) {
int err;
err = ww_mutex_lock(ll->lock, &ctx);
if (!err)
continue;
ln = ll;
list_for_each_entry_continue_reverse(ln, &list, link)
ww_mutex_unlock(ln->lock);
if (err != -EDEADLK)
return err;
ww_mutex_lock_slow(ll->lock, &ctx);
list_move(&ll->link, &list);
}
ww_acquire_fini(&ctx);
return 0;
}
static void torture_ww_mutex_unlock(void)
__releases(torture_ww_mutex_0)
__releases(torture_ww_mutex_1)
__releases(torture_ww_mutex_2)
{
ww_mutex_unlock(&torture_ww_mutex_0);
ww_mutex_unlock(&torture_ww_mutex_1);
ww_mutex_unlock(&torture_ww_mutex_2);
}
static struct lock_torture_ops ww_mutex_lock_ops = {
.writelock = torture_ww_mutex_lock,
.write_delay = torture_mutex_delay,
.task_boost = torture_boost_dummy,
.writeunlock = torture_ww_mutex_unlock,
.readlock = NULL,
.read_delay = NULL,
.readunlock = NULL,
.name = "ww_mutex_lock"
};
#ifdef CONFIG_RT_MUTEXES
static DEFINE_RT_MUTEX(torture_rtmutex);
static int torture_rtmutex_lock(void) __acquires(torture_rtmutex)
{
rt_mutex_lock(&torture_rtmutex);
return 0;
}
static void torture_rtmutex_boost(struct torture_random_state *trsp)
{
int policy;
struct sched_param param;
const unsigned int factor = 50000; /* yes, quite arbitrary */
if (!rt_task(current)) {
/*
locking/locktorture: Fix deboosting NULL pointer dereference For the case of rtmutex torturing we will randomly call into the boost() handler, including upon module exiting when the tasks are deboosted before stopping. In such cases the task may or may not have already been boosted, and therefore the NULL being explicitly passed can occur anywhere. Currently we only assume that the task will is at a higher prio, and in consequence, dereference a NULL pointer. This patch fixes the case of a rmmod locktorture exploding while pounding on the rtmutex lock (partial trace): task: ffff88081026cf80 ti: ffff880816120000 task.ti: ffff880816120000 RSP: 0018:ffff880816123eb0 EFLAGS: 00010206 RAX: ffff88081026cf80 RBX: ffff880816bfa630 RCX: 0000000000160d1b RDX: 0000000000000000 RSI: 0000000000000202 RDI: 0000000000000000 RBP: ffff88081026cf80 R08: 000000000000001f R09: ffff88017c20ca80 R10: 0000000000000000 R11: 000000000048c316 R12: ffffffffa05d1840 R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 FS: 0000000000000000(0000) GS:ffff88203f880000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000008 CR3: 0000000001c0a000 CR4: 00000000000406e0 Stack: ffffffffa05d141d ffff880816bfa630 ffffffffa05d1922 ffff88081e70c2c0 ffff880816bfa630 ffffffff81095fed 0000000000000000 ffffffff8107bf60 ffff880816bfa630 ffffffff00000000 ffff880800000000 ffff880816123f08 Call Trace: [<ffffffff81095fed>] kthread+0xbd/0xe0 [<ffffffff815cf40f>] ret_from_fork+0x3f/0x70 This patch ensures that if the random state pointer is not NULL and current is not boosted, then do nothing. RIP: 0010:[<ffffffffa05c6185>] [<ffffffffa05c6185>] torture_random+0x5/0x60 [torture] [<ffffffffa05d141d>] torture_rtmutex_boost+0x1d/0x90 [locktorture] [<ffffffffa05d1922>] lock_torture_writer+0xe2/0x170 [locktorture] Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: bobby.prani@gmail.com Cc: dhowells@redhat.com Cc: dipankar@in.ibm.com Cc: dvhart@linux.intel.com Cc: edumazet@google.com Cc: fweisbec@gmail.com Cc: jiangshanlai@gmail.com Cc: josh@joshtriplett.org Cc: mathieu.desnoyers@efficios.com Cc: oleg@redhat.com Cc: rostedt@goodmis.org Link: http://lkml.kernel.org/r/1460476038-27060-1-git-send-email-paulmck@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-04-12 23:47:17 +08:00
* Boost priority once every ~50k operations. When the
* task tries to take the lock, the rtmutex it will account
* for the new priority, and do any corresponding pi-dance.
*/
locking/locktorture: Fix deboosting NULL pointer dereference For the case of rtmutex torturing we will randomly call into the boost() handler, including upon module exiting when the tasks are deboosted before stopping. In such cases the task may or may not have already been boosted, and therefore the NULL being explicitly passed can occur anywhere. Currently we only assume that the task will is at a higher prio, and in consequence, dereference a NULL pointer. This patch fixes the case of a rmmod locktorture exploding while pounding on the rtmutex lock (partial trace): task: ffff88081026cf80 ti: ffff880816120000 task.ti: ffff880816120000 RSP: 0018:ffff880816123eb0 EFLAGS: 00010206 RAX: ffff88081026cf80 RBX: ffff880816bfa630 RCX: 0000000000160d1b RDX: 0000000000000000 RSI: 0000000000000202 RDI: 0000000000000000 RBP: ffff88081026cf80 R08: 000000000000001f R09: ffff88017c20ca80 R10: 0000000000000000 R11: 000000000048c316 R12: ffffffffa05d1840 R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 FS: 0000000000000000(0000) GS:ffff88203f880000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000008 CR3: 0000000001c0a000 CR4: 00000000000406e0 Stack: ffffffffa05d141d ffff880816bfa630 ffffffffa05d1922 ffff88081e70c2c0 ffff880816bfa630 ffffffff81095fed 0000000000000000 ffffffff8107bf60 ffff880816bfa630 ffffffff00000000 ffff880800000000 ffff880816123f08 Call Trace: [<ffffffff81095fed>] kthread+0xbd/0xe0 [<ffffffff815cf40f>] ret_from_fork+0x3f/0x70 This patch ensures that if the random state pointer is not NULL and current is not boosted, then do nothing. RIP: 0010:[<ffffffffa05c6185>] [<ffffffffa05c6185>] torture_random+0x5/0x60 [torture] [<ffffffffa05d141d>] torture_rtmutex_boost+0x1d/0x90 [locktorture] [<ffffffffa05d1922>] lock_torture_writer+0xe2/0x170 [locktorture] Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: bobby.prani@gmail.com Cc: dhowells@redhat.com Cc: dipankar@in.ibm.com Cc: dvhart@linux.intel.com Cc: edumazet@google.com Cc: fweisbec@gmail.com Cc: jiangshanlai@gmail.com Cc: josh@joshtriplett.org Cc: mathieu.desnoyers@efficios.com Cc: oleg@redhat.com Cc: rostedt@goodmis.org Link: http://lkml.kernel.org/r/1460476038-27060-1-git-send-email-paulmck@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-04-12 23:47:17 +08:00
if (trsp && !(torture_random(trsp) %
(cxt.nrealwriters_stress * factor))) {
policy = SCHED_FIFO;
param.sched_priority = MAX_RT_PRIO - 1;
} else /* common case, do nothing */
return;
} else {
/*
* The task will remain boosted for another ~500k operations,
* then restored back to its original prio, and so forth.
*
* When @trsp is nil, we want to force-reset the task for
* stopping the kthread.
*/
if (!trsp || !(torture_random(trsp) %
(cxt.nrealwriters_stress * factor * 2))) {
policy = SCHED_NORMAL;
param.sched_priority = 0;
} else /* common case, do nothing */
return;
}
sched_setscheduler_nocheck(current, policy, &param);
}
static void torture_rtmutex_delay(struct torture_random_state *trsp)
{
const unsigned long shortdelay_us = 2;
const unsigned long longdelay_ms = 100;
/*
* We want a short delay mostly to emulate likely code, and
* we want a long delay occasionally to force massive contention.
*/
if (!(torture_random(trsp) %
(cxt.nrealwriters_stress * 2000 * longdelay_ms)))
mdelay(longdelay_ms);
if (!(torture_random(trsp) %
(cxt.nrealwriters_stress * 2 * shortdelay_us)))
udelay(shortdelay_us);
if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
torture_preempt_schedule(); /* Allow test to be preempted. */
}
static void torture_rtmutex_unlock(void) __releases(torture_rtmutex)
{
rt_mutex_unlock(&torture_rtmutex);
}
static struct lock_torture_ops rtmutex_lock_ops = {
.writelock = torture_rtmutex_lock,
.write_delay = torture_rtmutex_delay,
.task_boost = torture_rtmutex_boost,
.writeunlock = torture_rtmutex_unlock,
.readlock = NULL,
.read_delay = NULL,
.readunlock = NULL,
.name = "rtmutex_lock"
};
#endif
static DECLARE_RWSEM(torture_rwsem);
static int torture_rwsem_down_write(void) __acquires(torture_rwsem)
{
down_write(&torture_rwsem);
return 0;
}
static void torture_rwsem_write_delay(struct torture_random_state *trsp)
{
const unsigned long longdelay_ms = 100;
/* We want a long delay occasionally to force massive contention. */
if (!(torture_random(trsp) %
(cxt.nrealwriters_stress * 2000 * longdelay_ms)))
mdelay(longdelay_ms * 10);
else
mdelay(longdelay_ms / 10);
if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
torture_preempt_schedule(); /* Allow test to be preempted. */
}
static void torture_rwsem_up_write(void) __releases(torture_rwsem)
{
up_write(&torture_rwsem);
}
static int torture_rwsem_down_read(void) __acquires(torture_rwsem)
{
down_read(&torture_rwsem);
return 0;
}
static void torture_rwsem_read_delay(struct torture_random_state *trsp)
{
const unsigned long longdelay_ms = 100;
/* We want a long delay occasionally to force massive contention. */
if (!(torture_random(trsp) %
(cxt.nrealreaders_stress * 2000 * longdelay_ms)))
mdelay(longdelay_ms * 2);
else
mdelay(longdelay_ms / 2);
if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
torture_preempt_schedule(); /* Allow test to be preempted. */
}
static void torture_rwsem_up_read(void) __releases(torture_rwsem)
{
up_read(&torture_rwsem);
}
static struct lock_torture_ops rwsem_lock_ops = {
.writelock = torture_rwsem_down_write,
.write_delay = torture_rwsem_write_delay,
.task_boost = torture_boost_dummy,
.writeunlock = torture_rwsem_up_write,
.readlock = torture_rwsem_down_read,
.read_delay = torture_rwsem_read_delay,
.readunlock = torture_rwsem_up_read,
.name = "rwsem_lock"
};
#include <linux/percpu-rwsem.h>
static struct percpu_rw_semaphore pcpu_rwsem;
void torture_percpu_rwsem_init(void)
{
BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
}
static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem)
{
percpu_down_write(&pcpu_rwsem);
return 0;
}
static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem)
{
percpu_up_write(&pcpu_rwsem);
}
static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem)
{
percpu_down_read(&pcpu_rwsem);
return 0;
}
static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem)
{
percpu_up_read(&pcpu_rwsem);
}
static struct lock_torture_ops percpu_rwsem_lock_ops = {
.init = torture_percpu_rwsem_init,
.writelock = torture_percpu_rwsem_down_write,
.write_delay = torture_rwsem_write_delay,
.task_boost = torture_boost_dummy,
.writeunlock = torture_percpu_rwsem_up_write,
.readlock = torture_percpu_rwsem_down_read,
.read_delay = torture_rwsem_read_delay,
.readunlock = torture_percpu_rwsem_up_read,
.name = "percpu_rwsem_lock"
};
/*
* Lock torture writer kthread. Repeatedly acquires and releases
* the lock, checking for duplicate acquisitions.
*/
static int lock_torture_writer(void *arg)
{
struct lock_stress_stats *lwsp = arg;
static DEFINE_TORTURE_RANDOM(rand);
VERBOSE_TOROUT_STRING("lock_torture_writer task started");
set_user_nice(current, MAX_NICE);
do {
if ((torture_random(&rand) & 0xfffff) == 0)
schedule_timeout_uninterruptible(1);
cxt.cur_ops->task_boost(&rand);
cxt.cur_ops->writelock();
if (WARN_ON_ONCE(lock_is_write_held))
lwsp->n_lock_fail++;
lock_is_write_held = 1;
if (WARN_ON_ONCE(lock_is_read_held))
lwsp->n_lock_fail++; /* rare, but... */
lwsp->n_lock_acquired++;
cxt.cur_ops->write_delay(&rand);
lock_is_write_held = 0;
cxt.cur_ops->writeunlock();
stutter_wait("lock_torture_writer");
} while (!torture_must_stop());
cxt.cur_ops->task_boost(NULL); /* reset prio */
torture_kthread_stopping("lock_torture_writer");
return 0;
}
/*
* Lock torture reader kthread. Repeatedly acquires and releases
* the reader lock.
*/
static int lock_torture_reader(void *arg)
{
struct lock_stress_stats *lrsp = arg;
static DEFINE_TORTURE_RANDOM(rand);
VERBOSE_TOROUT_STRING("lock_torture_reader task started");
set_user_nice(current, MAX_NICE);
do {
if ((torture_random(&rand) & 0xfffff) == 0)
schedule_timeout_uninterruptible(1);
cxt.cur_ops->readlock();
lock_is_read_held = 1;
if (WARN_ON_ONCE(lock_is_write_held))
lrsp->n_lock_fail++; /* rare, but... */
lrsp->n_lock_acquired++;
cxt.cur_ops->read_delay(&rand);
lock_is_read_held = 0;
cxt.cur_ops->readunlock();
stutter_wait("lock_torture_reader");
} while (!torture_must_stop());
torture_kthread_stopping("lock_torture_reader");
return 0;
}
/*
* Create an lock-torture-statistics message in the specified buffer.
*/
static void __torture_print_stats(char *page,
struct lock_stress_stats *statp, bool write)
{
bool fail = 0;
int i, n_stress;
long max = 0, min = statp ? statp[0].n_lock_acquired : 0;
long long sum = 0;
n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
for (i = 0; i < n_stress; i++) {
if (statp[i].n_lock_fail)
fail = true;
sum += statp[i].n_lock_acquired;
if (max < statp[i].n_lock_fail)
max = statp[i].n_lock_fail;
if (min > statp[i].n_lock_fail)
min = statp[i].n_lock_fail;
}
page += sprintf(page,
"%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
write ? "Writes" : "Reads ",
sum, max, min, max / 2 > min ? "???" : "",
fail, fail ? "!!!" : "");
if (fail)
atomic_inc(&cxt.n_lock_torture_errors);
}
/*
* Print torture statistics. Caller must ensure that there is only one
* call to this function at a given time!!! This is normally accomplished
* by relying on the module system to only have one copy of the module
* loaded, and then by giving the lock_torture_stats kthread full control
* (or the init/cleanup functions when lock_torture_stats thread is not
* running).
*/
static void lock_torture_stats_print(void)
{
int size = cxt.nrealwriters_stress * 200 + 8192;
char *buf;
if (cxt.cur_ops->readlock)
size += cxt.nrealreaders_stress * 200 + 8192;
buf = kmalloc(size, GFP_KERNEL);
if (!buf) {
pr_err("lock_torture_stats_print: Out of memory, need: %d",
size);
return;
}
__torture_print_stats(buf, cxt.lwsa, true);
pr_alert("%s", buf);
kfree(buf);
if (cxt.cur_ops->readlock) {
buf = kmalloc(size, GFP_KERNEL);
if (!buf) {
pr_err("lock_torture_stats_print: Out of memory, need: %d",
size);
return;
}
__torture_print_stats(buf, cxt.lrsa, false);
pr_alert("%s", buf);
kfree(buf);
}
}
/*
* Periodically prints torture statistics, if periodic statistics printing
* was specified via the stat_interval module parameter.
*
* No need to worry about fullstop here, since this one doesn't reference
* volatile state or register callbacks.
*/
static int lock_torture_stats(void *arg)
{
VERBOSE_TOROUT_STRING("lock_torture_stats task started");
do {
schedule_timeout_interruptible(stat_interval * HZ);
lock_torture_stats_print();
torture_shutdown_absorb("lock_torture_stats");
} while (!torture_must_stop());
torture_kthread_stopping("lock_torture_stats");
return 0;
}
static inline void
lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
const char *tag)
{
pr_alert("%s" TORTURE_FLAG
"--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
torture_type, tag, cxt.debug_lock ? " [debug]": "",
cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval,
verbose, shuffle_interval, stutter, shutdown_secs,
onoff_interval, onoff_holdoff);
}
static void lock_torture_cleanup(void)
{
int i;
if (torture_cleanup_begin())
return;
locking/locktorture: Fix NULL pointer dereference for cleanup paths It has been found that paths that invoke cleanups through lock_torture_cleanup() can trigger NULL pointer dereferencing bugs during the statistics printing phase. This is mainly because we should not be calling into statistics before we are sure things have been set up correctly. Specifically, early checks (and the need for handling this in the cleanup call) only include parameter checks and basic statistics allocation. Once we start write/read kthreads we then consider the test as started. As such, update the function in question to check for cxt.lwsa writer stats, if not set, we either have a bogus parameter or -ENOMEM situation and therefore only need to deal with general torture calls. Reported-and-tested-by: Kefeng Wang <wangkefeng.wang@huawei.com> Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: bobby.prani@gmail.com Cc: dhowells@redhat.com Cc: dipankar@in.ibm.com Cc: dvhart@linux.intel.com Cc: edumazet@google.com Cc: fweisbec@gmail.com Cc: jiangshanlai@gmail.com Cc: josh@joshtriplett.org Cc: mathieu.desnoyers@efficios.com Cc: oleg@redhat.com Cc: rostedt@goodmis.org Link: http://lkml.kernel.org/r/1460476038-27060-2-git-send-email-paulmck@linux.vnet.ibm.com [ Improved the changelog. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-04-12 23:47:18 +08:00
/*
* Indicates early cleanup, meaning that the test has not run,
* such as when passing bogus args when loading the module. As
* such, only perform the underlying torture-specific cleanups,
* and avoid anything related to locktorture.
*/
if (!cxt.lwsa && !cxt.lrsa)
locking/locktorture: Fix NULL pointer dereference for cleanup paths It has been found that paths that invoke cleanups through lock_torture_cleanup() can trigger NULL pointer dereferencing bugs during the statistics printing phase. This is mainly because we should not be calling into statistics before we are sure things have been set up correctly. Specifically, early checks (and the need for handling this in the cleanup call) only include parameter checks and basic statistics allocation. Once we start write/read kthreads we then consider the test as started. As such, update the function in question to check for cxt.lwsa writer stats, if not set, we either have a bogus parameter or -ENOMEM situation and therefore only need to deal with general torture calls. Reported-and-tested-by: Kefeng Wang <wangkefeng.wang@huawei.com> Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: bobby.prani@gmail.com Cc: dhowells@redhat.com Cc: dipankar@in.ibm.com Cc: dvhart@linux.intel.com Cc: edumazet@google.com Cc: fweisbec@gmail.com Cc: jiangshanlai@gmail.com Cc: josh@joshtriplett.org Cc: mathieu.desnoyers@efficios.com Cc: oleg@redhat.com Cc: rostedt@goodmis.org Link: http://lkml.kernel.org/r/1460476038-27060-2-git-send-email-paulmck@linux.vnet.ibm.com [ Improved the changelog. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-04-12 23:47:18 +08:00
goto end;
if (writer_tasks) {
for (i = 0; i < cxt.nrealwriters_stress; i++)
torture_stop_kthread(lock_torture_writer,
writer_tasks[i]);
kfree(writer_tasks);
writer_tasks = NULL;
}
if (reader_tasks) {
for (i = 0; i < cxt.nrealreaders_stress; i++)
torture_stop_kthread(lock_torture_reader,
reader_tasks[i]);
kfree(reader_tasks);
reader_tasks = NULL;
}
torture_stop_kthread(lock_torture_stats, stats_task);
lock_torture_stats_print(); /* -After- the stats thread is stopped! */
if (atomic_read(&cxt.n_lock_torture_errors))
lock_torture_print_module_parms(cxt.cur_ops,
"End of test: FAILURE");
else if (torture_onoff_failures())
lock_torture_print_module_parms(cxt.cur_ops,
"End of test: LOCK_HOTPLUG");
else
lock_torture_print_module_parms(cxt.cur_ops,
"End of test: SUCCESS");
locktorture: Fix potential memory leak with rw lock test When running locktorture module with the below commands with kmemleak enabled: $ modprobe locktorture torture_type=rw_lock_irq $ rmmod locktorture The below kmemleak got caught: root@10:~# echo scan > /sys/kernel/debug/kmemleak [ 323.197029] kmemleak: 2 new suspected memory leaks (see /sys/kernel/debug/kmemleak) root@10:~# cat /sys/kernel/debug/kmemleak unreferenced object 0xffffffc07592d500 (size 128): comm "modprobe", pid 368, jiffies 4294924118 (age 205.824s) hex dump (first 32 bytes): 00 00 00 00 00 00 00 00 c3 7b 02 00 00 00 00 00 .........{...... 00 00 00 00 00 00 00 00 d7 9b 02 00 00 00 00 00 ................ backtrace: [<ffffff80081e5a88>] create_object+0x110/0x288 [<ffffff80086c6078>] kmemleak_alloc+0x58/0xa0 [<ffffff80081d5acc>] __kmalloc+0x234/0x318 [<ffffff80006fa130>] 0xffffff80006fa130 [<ffffff8008083ae4>] do_one_initcall+0x44/0x138 [<ffffff800817e28c>] do_init_module+0x68/0x1cc [<ffffff800811c848>] load_module+0x1a68/0x22e0 [<ffffff800811d340>] SyS_finit_module+0xe0/0xf0 [<ffffff80080836f0>] el0_svc_naked+0x24/0x28 [<ffffffffffffffff>] 0xffffffffffffffff unreferenced object 0xffffffc07592d480 (size 128): comm "modprobe", pid 368, jiffies 4294924118 (age 205.824s) hex dump (first 32 bytes): 00 00 00 00 00 00 00 00 3b 6f 01 00 00 00 00 00 ........;o...... 00 00 00 00 00 00 00 00 23 6a 01 00 00 00 00 00 ........#j...... backtrace: [<ffffff80081e5a88>] create_object+0x110/0x288 [<ffffff80086c6078>] kmemleak_alloc+0x58/0xa0 [<ffffff80081d5acc>] __kmalloc+0x234/0x318 [<ffffff80006fa22c>] 0xffffff80006fa22c [<ffffff8008083ae4>] do_one_initcall+0x44/0x138 [<ffffff800817e28c>] do_init_module+0x68/0x1cc [<ffffff800811c848>] load_module+0x1a68/0x22e0 [<ffffff800811d340>] SyS_finit_module+0xe0/0xf0 [<ffffff80080836f0>] el0_svc_naked+0x24/0x28 [<ffffffffffffffff>] 0xffffffffffffffff It is because cxt.lwsa and cxt.lrsa don't get freed in module_exit, so free them in lock_torture_cleanup() and free writer_tasks if reader_tasks is failed at memory allocation. Signed-off-by: Yang Shi <yang.shi@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
2016-11-11 05:06:39 +08:00
kfree(cxt.lwsa);
kfree(cxt.lrsa);
locking/locktorture: Fix NULL pointer dereference for cleanup paths It has been found that paths that invoke cleanups through lock_torture_cleanup() can trigger NULL pointer dereferencing bugs during the statistics printing phase. This is mainly because we should not be calling into statistics before we are sure things have been set up correctly. Specifically, early checks (and the need for handling this in the cleanup call) only include parameter checks and basic statistics allocation. Once we start write/read kthreads we then consider the test as started. As such, update the function in question to check for cxt.lwsa writer stats, if not set, we either have a bogus parameter or -ENOMEM situation and therefore only need to deal with general torture calls. Reported-and-tested-by: Kefeng Wang <wangkefeng.wang@huawei.com> Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: bobby.prani@gmail.com Cc: dhowells@redhat.com Cc: dipankar@in.ibm.com Cc: dvhart@linux.intel.com Cc: edumazet@google.com Cc: fweisbec@gmail.com Cc: jiangshanlai@gmail.com Cc: josh@joshtriplett.org Cc: mathieu.desnoyers@efficios.com Cc: oleg@redhat.com Cc: rostedt@goodmis.org Link: http://lkml.kernel.org/r/1460476038-27060-2-git-send-email-paulmck@linux.vnet.ibm.com [ Improved the changelog. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-04-12 23:47:18 +08:00
end:
torture_cleanup_end();
}
static int __init lock_torture_init(void)
{
int i, j;
int firsterr = 0;
static struct lock_torture_ops *torture_ops[] = {
&lock_busted_ops,
&spin_lock_ops, &spin_lock_irq_ops,
&rw_lock_ops, &rw_lock_irq_ops,
&mutex_lock_ops,
&ww_mutex_lock_ops,
#ifdef CONFIG_RT_MUTEXES
&rtmutex_lock_ops,
#endif
&rwsem_lock_ops,
&percpu_rwsem_lock_ops,
};
if (!torture_init_begin(torture_type, verbose))
return -EBUSY;
/* Process args and tell the world that the torturer is on the job. */
for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
cxt.cur_ops = torture_ops[i];
if (strcmp(torture_type, cxt.cur_ops->name) == 0)
break;
}
if (i == ARRAY_SIZE(torture_ops)) {
pr_alert("lock-torture: invalid torture type: \"%s\"\n",
torture_type);
pr_alert("lock-torture types:");
for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
pr_alert(" %s", torture_ops[i]->name);
pr_alert("\n");
firsterr = -EINVAL;
goto unwind;
}
if (nwriters_stress == 0 && nreaders_stress == 0) {
pr_alert("lock-torture: must run at least one locking thread\n");
firsterr = -EINVAL;
goto unwind;
}
if (cxt.cur_ops->init)
cxt.cur_ops->init();
if (nwriters_stress >= 0)
cxt.nrealwriters_stress = nwriters_stress;
else
cxt.nrealwriters_stress = 2 * num_online_cpus();
#ifdef CONFIG_DEBUG_MUTEXES
if (strncmp(torture_type, "mutex", 5) == 0)
cxt.debug_lock = true;
#endif
#ifdef CONFIG_DEBUG_RT_MUTEXES
if (strncmp(torture_type, "rtmutex", 7) == 0)
cxt.debug_lock = true;
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
if ((strncmp(torture_type, "spin", 4) == 0) ||
(strncmp(torture_type, "rw_lock", 7) == 0))
cxt.debug_lock = true;
#endif
/* Initialize the statistics so that each run gets its own numbers. */
if (nwriters_stress) {
lock_is_write_held = 0;
cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL);
if (cxt.lwsa == NULL) {
VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
firsterr = -ENOMEM;
goto unwind;
}
for (i = 0; i < cxt.nrealwriters_stress; i++) {
cxt.lwsa[i].n_lock_fail = 0;
cxt.lwsa[i].n_lock_acquired = 0;
}
}
if (cxt.cur_ops->readlock) {
if (nreaders_stress >= 0)
cxt.nrealreaders_stress = nreaders_stress;
else {
/*
* By default distribute evenly the number of
* readers and writers. We still run the same number
* of threads as the writer-only locks default.
*/
if (nwriters_stress < 0) /* user doesn't care */
cxt.nrealwriters_stress = num_online_cpus();
cxt.nrealreaders_stress = cxt.nrealwriters_stress;
}
if (nreaders_stress) {
lock_is_read_held = 0;
cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL);
if (cxt.lrsa == NULL) {
VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
firsterr = -ENOMEM;
kfree(cxt.lwsa);
cxt.lwsa = NULL;
goto unwind;
}
for (i = 0; i < cxt.nrealreaders_stress; i++) {
cxt.lrsa[i].n_lock_fail = 0;
cxt.lrsa[i].n_lock_acquired = 0;
}
}
}
locking/locktorture: Fix NULL pointer dereference for cleanup paths It has been found that paths that invoke cleanups through lock_torture_cleanup() can trigger NULL pointer dereferencing bugs during the statistics printing phase. This is mainly because we should not be calling into statistics before we are sure things have been set up correctly. Specifically, early checks (and the need for handling this in the cleanup call) only include parameter checks and basic statistics allocation. Once we start write/read kthreads we then consider the test as started. As such, update the function in question to check for cxt.lwsa writer stats, if not set, we either have a bogus parameter or -ENOMEM situation and therefore only need to deal with general torture calls. Reported-and-tested-by: Kefeng Wang <wangkefeng.wang@huawei.com> Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: bobby.prani@gmail.com Cc: dhowells@redhat.com Cc: dipankar@in.ibm.com Cc: dvhart@linux.intel.com Cc: edumazet@google.com Cc: fweisbec@gmail.com Cc: jiangshanlai@gmail.com Cc: josh@joshtriplett.org Cc: mathieu.desnoyers@efficios.com Cc: oleg@redhat.com Cc: rostedt@goodmis.org Link: http://lkml.kernel.org/r/1460476038-27060-2-git-send-email-paulmck@linux.vnet.ibm.com [ Improved the changelog. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-04-12 23:47:18 +08:00
lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
/* Prepare torture context. */
if (onoff_interval > 0) {
firsterr = torture_onoff_init(onoff_holdoff * HZ,
onoff_interval * HZ);
if (firsterr)
goto unwind;
}
if (shuffle_interval > 0) {
firsterr = torture_shuffle_init(shuffle_interval);
if (firsterr)
goto unwind;
}
if (shutdown_secs > 0) {
firsterr = torture_shutdown_init(shutdown_secs,
lock_torture_cleanup);
if (firsterr)
goto unwind;
}
if (stutter > 0) {
firsterr = torture_stutter_init(stutter);
if (firsterr)
goto unwind;
}
if (nwriters_stress) {
writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]),
GFP_KERNEL);
if (writer_tasks == NULL) {
VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
firsterr = -ENOMEM;
goto unwind;
}
}
if (cxt.cur_ops->readlock) {
reader_tasks = kzalloc(cxt.nrealreaders_stress * sizeof(reader_tasks[0]),
GFP_KERNEL);
if (reader_tasks == NULL) {
VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
locktorture: Fix potential memory leak with rw lock test When running locktorture module with the below commands with kmemleak enabled: $ modprobe locktorture torture_type=rw_lock_irq $ rmmod locktorture The below kmemleak got caught: root@10:~# echo scan > /sys/kernel/debug/kmemleak [ 323.197029] kmemleak: 2 new suspected memory leaks (see /sys/kernel/debug/kmemleak) root@10:~# cat /sys/kernel/debug/kmemleak unreferenced object 0xffffffc07592d500 (size 128): comm "modprobe", pid 368, jiffies 4294924118 (age 205.824s) hex dump (first 32 bytes): 00 00 00 00 00 00 00 00 c3 7b 02 00 00 00 00 00 .........{...... 00 00 00 00 00 00 00 00 d7 9b 02 00 00 00 00 00 ................ backtrace: [<ffffff80081e5a88>] create_object+0x110/0x288 [<ffffff80086c6078>] kmemleak_alloc+0x58/0xa0 [<ffffff80081d5acc>] __kmalloc+0x234/0x318 [<ffffff80006fa130>] 0xffffff80006fa130 [<ffffff8008083ae4>] do_one_initcall+0x44/0x138 [<ffffff800817e28c>] do_init_module+0x68/0x1cc [<ffffff800811c848>] load_module+0x1a68/0x22e0 [<ffffff800811d340>] SyS_finit_module+0xe0/0xf0 [<ffffff80080836f0>] el0_svc_naked+0x24/0x28 [<ffffffffffffffff>] 0xffffffffffffffff unreferenced object 0xffffffc07592d480 (size 128): comm "modprobe", pid 368, jiffies 4294924118 (age 205.824s) hex dump (first 32 bytes): 00 00 00 00 00 00 00 00 3b 6f 01 00 00 00 00 00 ........;o...... 00 00 00 00 00 00 00 00 23 6a 01 00 00 00 00 00 ........#j...... backtrace: [<ffffff80081e5a88>] create_object+0x110/0x288 [<ffffff80086c6078>] kmemleak_alloc+0x58/0xa0 [<ffffff80081d5acc>] __kmalloc+0x234/0x318 [<ffffff80006fa22c>] 0xffffff80006fa22c [<ffffff8008083ae4>] do_one_initcall+0x44/0x138 [<ffffff800817e28c>] do_init_module+0x68/0x1cc [<ffffff800811c848>] load_module+0x1a68/0x22e0 [<ffffff800811d340>] SyS_finit_module+0xe0/0xf0 [<ffffff80080836f0>] el0_svc_naked+0x24/0x28 [<ffffffffffffffff>] 0xffffffffffffffff It is because cxt.lwsa and cxt.lrsa don't get freed in module_exit, so free them in lock_torture_cleanup() and free writer_tasks if reader_tasks is failed at memory allocation. Signed-off-by: Yang Shi <yang.shi@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
2016-11-11 05:06:39 +08:00
kfree(writer_tasks);
writer_tasks = NULL;
firsterr = -ENOMEM;
goto unwind;
}
}
/*
* Create the kthreads and start torturing (oh, those poor little locks).
*
* TODO: Note that we interleave writers with readers, giving writers a
* slight advantage, by creating its kthread first. This can be modified
* for very specific needs, or even let the user choose the policy, if
* ever wanted.
*/
for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
j < cxt.nrealreaders_stress; i++, j++) {
if (i >= cxt.nrealwriters_stress)
goto create_reader;
/* Create writer. */
firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
writer_tasks[i]);
if (firsterr)
goto unwind;
create_reader:
if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
continue;
/* Create reader. */
firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
reader_tasks[j]);
if (firsterr)
goto unwind;
}
if (stat_interval > 0) {
firsterr = torture_create_kthread(lock_torture_stats, NULL,
stats_task);
if (firsterr)
goto unwind;
}
torture_init_end();
return 0;
unwind:
torture_init_end();
lock_torture_cleanup();
return firsterr;
}
module_init(lock_torture_init);
module_exit(lock_torture_cleanup);