mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
Merge branches 'doc.2015.12.05a', 'exp.2015.12.07a', 'fixes.2015.12.07a', 'list.2015.12.04b' and 'torture.2015.12.05a' into HEAD
doc.2015.12.05a: Documentation updates exp.2015.12.07a: Expedited grace-period updates fixes.2015.12.07a: Miscellaneous fixes list.2015.12.04b: Linked-list updates torture.2015.12.05a: Torture-test updates
This commit is contained in:
commit
648c630c64
@ -3296,18 +3296,35 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
rcutorture.verbose= [KNL]
|
||||
Enable additional printk() statements.
|
||||
|
||||
rcupdate.rcu_cpu_stall_suppress= [KNL]
|
||||
Suppress RCU CPU stall warning messages.
|
||||
|
||||
rcupdate.rcu_cpu_stall_timeout= [KNL]
|
||||
Set timeout for RCU CPU stall warning messages.
|
||||
|
||||
rcupdate.rcu_expedited= [KNL]
|
||||
Use expedited grace-period primitives, for
|
||||
example, synchronize_rcu_expedited() instead
|
||||
of synchronize_rcu(). This reduces latency,
|
||||
but can increase CPU utilization, degrade
|
||||
real-time latency, and degrade energy efficiency.
|
||||
No effect on CONFIG_TINY_RCU kernels.
|
||||
|
||||
rcupdate.rcu_cpu_stall_suppress= [KNL]
|
||||
Suppress RCU CPU stall warning messages.
|
||||
rcupdate.rcu_normal= [KNL]
|
||||
Use only normal grace-period primitives,
|
||||
for example, synchronize_rcu() instead of
|
||||
synchronize_rcu_expedited(). This improves
|
||||
real-time latency, CPU utilization, and
|
||||
energy efficiency, but can expose users to
|
||||
increased grace-period latency. This parameter
|
||||
overrides rcupdate.rcu_expedited. No effect on
|
||||
CONFIG_TINY_RCU kernels.
|
||||
|
||||
rcupdate.rcu_cpu_stall_timeout= [KNL]
|
||||
Set timeout for RCU CPU stall warning messages.
|
||||
rcupdate.rcu_normal_after_boot= [KNL]
|
||||
Once boot has completed (that is, after
|
||||
rcu_end_inkernel_boot() has been invoked), use
|
||||
only normal grace-period primitives. No effect
|
||||
on CONFIG_TINY_RCU kernels.
|
||||
|
||||
rcupdate.rcu_task_stall_timeout= [KNL]
|
||||
Set timeout in jiffies for RCU task stall warning
|
||||
|
@ -24,7 +24,7 @@
|
||||
|
||||
static inline void INIT_LIST_HEAD(struct list_head *list)
|
||||
{
|
||||
list->next = list;
|
||||
WRITE_ONCE(list->next, list);
|
||||
list->prev = list;
|
||||
}
|
||||
|
||||
@ -42,7 +42,7 @@ static inline void __list_add(struct list_head *new,
|
||||
next->prev = new;
|
||||
new->next = next;
|
||||
new->prev = prev;
|
||||
prev->next = new;
|
||||
WRITE_ONCE(prev->next, new);
|
||||
}
|
||||
#else
|
||||
extern void __list_add(struct list_head *new,
|
||||
@ -186,7 +186,7 @@ static inline int list_is_last(const struct list_head *list,
|
||||
*/
|
||||
static inline int list_empty(const struct list_head *head)
|
||||
{
|
||||
return head->next == head;
|
||||
return READ_ONCE(head->next) == head;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -608,7 +608,7 @@ static inline int hlist_unhashed(const struct hlist_node *h)
|
||||
|
||||
static inline int hlist_empty(const struct hlist_head *h)
|
||||
{
|
||||
return !h->first;
|
||||
return !READ_ONCE(h->first);
|
||||
}
|
||||
|
||||
static inline void __hlist_del(struct hlist_node *n)
|
||||
@ -642,7 +642,7 @@ static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
|
||||
n->next = first;
|
||||
if (first)
|
||||
first->pprev = &n->next;
|
||||
h->first = n;
|
||||
WRITE_ONCE(h->first, n);
|
||||
n->pprev = &h->first;
|
||||
}
|
||||
|
||||
@ -653,14 +653,14 @@ static inline void hlist_add_before(struct hlist_node *n,
|
||||
n->pprev = next->pprev;
|
||||
n->next = next;
|
||||
next->pprev = &n->next;
|
||||
*(n->pprev) = n;
|
||||
WRITE_ONCE(*(n->pprev), n);
|
||||
}
|
||||
|
||||
static inline void hlist_add_behind(struct hlist_node *n,
|
||||
struct hlist_node *prev)
|
||||
{
|
||||
n->next = prev->next;
|
||||
prev->next = n;
|
||||
WRITE_ONCE(prev->next, n);
|
||||
n->pprev = &prev->next;
|
||||
|
||||
if (n->next)
|
||||
|
@ -70,7 +70,7 @@ static inline void hlist_bl_set_first(struct hlist_bl_head *h,
|
||||
|
||||
static inline int hlist_bl_empty(const struct hlist_bl_head *h)
|
||||
{
|
||||
return !((unsigned long)h->first & ~LIST_BL_LOCKMASK);
|
||||
return !((unsigned long)READ_ONCE(h->first) & ~LIST_BL_LOCKMASK);
|
||||
}
|
||||
|
||||
static inline void hlist_bl_add_head(struct hlist_bl_node *n,
|
||||
|
@ -57,7 +57,7 @@ static inline int hlist_nulls_unhashed(const struct hlist_nulls_node *h)
|
||||
|
||||
static inline int hlist_nulls_empty(const struct hlist_nulls_head *h)
|
||||
{
|
||||
return is_a_nulls(h->first);
|
||||
return is_a_nulls(READ_ONCE(h->first));
|
||||
}
|
||||
|
||||
static inline void hlist_nulls_add_head(struct hlist_nulls_node *n,
|
||||
|
@ -179,32 +179,31 @@ static inline void list_replace_rcu(struct list_head *old,
|
||||
}
|
||||
|
||||
/**
|
||||
* list_splice_init_rcu - splice an RCU-protected list into an existing list.
|
||||
* __list_splice_init_rcu - join an RCU-protected list into an existing list.
|
||||
* @list: the RCU-protected list to splice
|
||||
* @head: the place in the list to splice the first list into
|
||||
* @prev: points to the last element of the existing list
|
||||
* @next: points to the first element of the existing list
|
||||
* @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
|
||||
*
|
||||
* @head can be RCU-read traversed concurrently with this function.
|
||||
* The list pointed to by @prev and @next can be RCU-read traversed
|
||||
* concurrently with this function.
|
||||
*
|
||||
* Note that this function blocks.
|
||||
*
|
||||
* Important note: the caller must take whatever action is necessary to
|
||||
* prevent any other updates to @head. In principle, it is possible
|
||||
* to modify the list as soon as sync() begins execution.
|
||||
* If this sort of thing becomes necessary, an alternative version
|
||||
* based on call_rcu() could be created. But only if -really-
|
||||
* needed -- there is no shortage of RCU API members.
|
||||
* Important note: the caller must take whatever action is necessary to prevent
|
||||
* any other updates to the existing list. In principle, it is possible to
|
||||
* modify the list as soon as sync() begins execution. If this sort of thing
|
||||
* becomes necessary, an alternative version based on call_rcu() could be
|
||||
* created. But only if -really- needed -- there is no shortage of RCU API
|
||||
* members.
|
||||
*/
|
||||
static inline void list_splice_init_rcu(struct list_head *list,
|
||||
struct list_head *head,
|
||||
void (*sync)(void))
|
||||
static inline void __list_splice_init_rcu(struct list_head *list,
|
||||
struct list_head *prev,
|
||||
struct list_head *next,
|
||||
void (*sync)(void))
|
||||
{
|
||||
struct list_head *first = list->next;
|
||||
struct list_head *last = list->prev;
|
||||
struct list_head *at = head->next;
|
||||
|
||||
if (list_empty(list))
|
||||
return;
|
||||
|
||||
/*
|
||||
* "first" and "last" tracking list, so initialize it. RCU readers
|
||||
@ -231,10 +230,40 @@ static inline void list_splice_init_rcu(struct list_head *list,
|
||||
* this function.
|
||||
*/
|
||||
|
||||
last->next = at;
|
||||
rcu_assign_pointer(list_next_rcu(head), first);
|
||||
first->prev = head;
|
||||
at->prev = last;
|
||||
last->next = next;
|
||||
rcu_assign_pointer(list_next_rcu(prev), first);
|
||||
first->prev = prev;
|
||||
next->prev = last;
|
||||
}
|
||||
|
||||
/**
|
||||
* list_splice_init_rcu - splice an RCU-protected list into an existing list,
|
||||
* designed for stacks.
|
||||
* @list: the RCU-protected list to splice
|
||||
* @head: the place in the existing list to splice the first list into
|
||||
* @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
|
||||
*/
|
||||
static inline void list_splice_init_rcu(struct list_head *list,
|
||||
struct list_head *head,
|
||||
void (*sync)(void))
|
||||
{
|
||||
if (!list_empty(list))
|
||||
__list_splice_init_rcu(list, head, head->next, sync);
|
||||
}
|
||||
|
||||
/**
|
||||
* list_splice_tail_init_rcu - splice an RCU-protected list into an existing
|
||||
* list, designed for queues.
|
||||
* @list: the RCU-protected list to splice
|
||||
* @head: the place in the existing list to splice the first list into
|
||||
* @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
|
||||
*/
|
||||
static inline void list_splice_tail_init_rcu(struct list_head *list,
|
||||
struct list_head *head,
|
||||
void (*sync)(void))
|
||||
{
|
||||
if (!list_empty(list))
|
||||
__list_splice_init_rcu(list, head->prev, head, sync);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -304,6 +333,42 @@ static inline void list_splice_init_rcu(struct list_head *list,
|
||||
&pos->member != (head); \
|
||||
pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
|
||||
|
||||
/**
|
||||
* list_entry_lockless - get the struct for this entry
|
||||
* @ptr: the &struct list_head pointer.
|
||||
* @type: the type of the struct this is embedded in.
|
||||
* @member: the name of the list_head within the struct.
|
||||
*
|
||||
* This primitive may safely run concurrently with the _rcu list-mutation
|
||||
* primitives such as list_add_rcu(), but requires some implicit RCU
|
||||
* read-side guarding. One example is running within a special
|
||||
* exception-time environment where preemption is disabled and where
|
||||
* lockdep cannot be invoked (in which case updaters must use RCU-sched,
|
||||
* as in synchronize_sched(), call_rcu_sched(), and friends). Another
|
||||
* example is when items are added to the list, but never deleted.
|
||||
*/
|
||||
#define list_entry_lockless(ptr, type, member) \
|
||||
container_of((typeof(ptr))lockless_dereference(ptr), type, member)
|
||||
|
||||
/**
|
||||
* list_for_each_entry_lockless - iterate over rcu list of given type
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the list_struct within the struct.
|
||||
*
|
||||
* This primitive may safely run concurrently with the _rcu list-mutation
|
||||
* primitives such as list_add_rcu(), but requires some implicit RCU
|
||||
* read-side guarding. One example is running within a special
|
||||
* exception-time environment where preemption is disabled and where
|
||||
* lockdep cannot be invoked (in which case updaters must use RCU-sched,
|
||||
* as in synchronize_sched(), call_rcu_sched(), and friends). Another
|
||||
* example is when items are added to the list, but never deleted.
|
||||
*/
|
||||
#define list_for_each_entry_lockless(pos, head, member) \
|
||||
for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \
|
||||
&pos->member != (head); \
|
||||
pos = list_entry_lockless(pos->member.next, typeof(*pos), member))
|
||||
|
||||
/**
|
||||
* list_for_each_entry_continue_rcu - continue iteration over list of given type
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
|
@ -48,10 +48,17 @@
|
||||
|
||||
#include <asm/barrier.h>
|
||||
|
||||
#ifndef CONFIG_TINY_RCU
|
||||
extern int rcu_expedited; /* for sysctl */
|
||||
extern int rcu_normal; /* also for sysctl */
|
||||
#endif /* #ifndef CONFIG_TINY_RCU */
|
||||
|
||||
#ifdef CONFIG_TINY_RCU
|
||||
/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
|
||||
static inline bool rcu_gp_is_normal(void) /* Internal RCU use. */
|
||||
{
|
||||
return true;
|
||||
}
|
||||
static inline bool rcu_gp_is_expedited(void) /* Internal RCU use. */
|
||||
{
|
||||
return false;
|
||||
@ -65,6 +72,7 @@ static inline void rcu_unexpedite_gp(void)
|
||||
{
|
||||
}
|
||||
#else /* #ifdef CONFIG_TINY_RCU */
|
||||
bool rcu_gp_is_normal(void); /* Internal RCU use. */
|
||||
bool rcu_gp_is_expedited(void); /* Internal RCU use. */
|
||||
void rcu_expedite_gp(void);
|
||||
void rcu_unexpedite_gp(void);
|
||||
@ -321,7 +329,6 @@ static inline int rcu_preempt_depth(void)
|
||||
|
||||
/* Internal to kernel */
|
||||
void rcu_init(void);
|
||||
void rcu_end_inkernel_boot(void);
|
||||
void rcu_sched_qs(void);
|
||||
void rcu_bh_qs(void);
|
||||
void rcu_check_callbacks(int user);
|
||||
@ -329,6 +336,12 @@ struct notifier_block;
|
||||
int rcu_cpu_notify(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu);
|
||||
|
||||
#ifndef CONFIG_TINY_RCU
|
||||
void rcu_end_inkernel_boot(void);
|
||||
#else /* #ifndef CONFIG_TINY_RCU */
|
||||
static inline void rcu_end_inkernel_boot(void) { }
|
||||
#endif /* #ifndef CONFIG_TINY_RCU */
|
||||
|
||||
#ifdef CONFIG_RCU_STALL_COMMON
|
||||
void rcu_sysrq_start(void);
|
||||
void rcu_sysrq_end(void);
|
||||
@ -379,9 +392,9 @@ static inline void rcu_init_nohz(void)
|
||||
*/
|
||||
#define RCU_NONIDLE(a) \
|
||||
do { \
|
||||
rcu_irq_enter(); \
|
||||
rcu_irq_enter_irqson(); \
|
||||
do { a; } while (0); \
|
||||
rcu_irq_exit(); \
|
||||
rcu_irq_exit_irqson(); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
@ -741,7 +754,7 @@ static inline void rcu_preempt_sleep_check(void)
|
||||
* The tracing infrastructure traces RCU (we want that), but unfortunately
|
||||
* some of the RCU checks causes tracing to lock up the system.
|
||||
*
|
||||
* The tracing version of rcu_dereference_raw() must not call
|
||||
* The no-tracing version of rcu_dereference_raw() must not call
|
||||
* rcu_read_lock_held().
|
||||
*/
|
||||
#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
|
||||
|
@ -181,6 +181,14 @@ static inline void rcu_irq_enter(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void rcu_irq_exit_irqson(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void rcu_irq_enter_irqson(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void rcu_irq_exit(void)
|
||||
{
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ void rcu_cpu_stall_reset(void);
|
||||
/*
|
||||
* Note a virtualization-based context switch. This is simply a
|
||||
* wrapper around rcu_note_context_switch(), which allows TINY_RCU
|
||||
* to save a few bytes.
|
||||
* to save a few bytes. The caller must have disabled interrupts.
|
||||
*/
|
||||
static inline void rcu_virt_note_context_switch(int cpu)
|
||||
{
|
||||
@ -97,6 +97,8 @@ void rcu_idle_enter(void);
|
||||
void rcu_idle_exit(void);
|
||||
void rcu_irq_enter(void);
|
||||
void rcu_irq_exit(void);
|
||||
void rcu_irq_enter_irqson(void);
|
||||
void rcu_irq_exit_irqson(void);
|
||||
|
||||
void exit_rcu(void);
|
||||
|
||||
|
@ -171,8 +171,8 @@ extern void syscall_unregfunc(void);
|
||||
TP_PROTO(data_proto), \
|
||||
TP_ARGS(data_args), \
|
||||
TP_CONDITION(cond), \
|
||||
rcu_irq_enter(), \
|
||||
rcu_irq_exit()); \
|
||||
rcu_irq_enter_irqson(), \
|
||||
rcu_irq_exit_irqson()); \
|
||||
}
|
||||
#else
|
||||
#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args)
|
||||
|
@ -943,6 +943,8 @@ static int __ref kernel_init(void *unused)
|
||||
|
||||
flush_delayed_fput();
|
||||
|
||||
rcu_end_inkernel_boot();
|
||||
|
||||
if (ramdisk_execute_command) {
|
||||
ret = run_init_process(ramdisk_execute_command);
|
||||
if (!ret)
|
||||
|
@ -20,7 +20,7 @@
|
||||
#include <linux/capability.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
#include <linux/rcupdate.h> /* rcu_expedited */
|
||||
#include <linux/rcupdate.h> /* rcu_expedited and rcu_normal */
|
||||
|
||||
#define KERNEL_ATTR_RO(_name) \
|
||||
static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
|
||||
@ -144,11 +144,12 @@ static ssize_t fscaps_show(struct kobject *kobj,
|
||||
}
|
||||
KERNEL_ATTR_RO(fscaps);
|
||||
|
||||
#ifndef CONFIG_TINY_RCU
|
||||
int rcu_expedited;
|
||||
static ssize_t rcu_expedited_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", rcu_expedited);
|
||||
return sprintf(buf, "%d\n", READ_ONCE(rcu_expedited));
|
||||
}
|
||||
static ssize_t rcu_expedited_store(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
@ -161,6 +162,24 @@ static ssize_t rcu_expedited_store(struct kobject *kobj,
|
||||
}
|
||||
KERNEL_ATTR_RW(rcu_expedited);
|
||||
|
||||
int rcu_normal;
|
||||
static ssize_t rcu_normal_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", READ_ONCE(rcu_normal));
|
||||
}
|
||||
static ssize_t rcu_normal_store(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
if (kstrtoint(buf, 0, &rcu_normal))
|
||||
return -EINVAL;
|
||||
|
||||
return count;
|
||||
}
|
||||
KERNEL_ATTR_RW(rcu_normal);
|
||||
#endif /* #ifndef CONFIG_TINY_RCU */
|
||||
|
||||
/*
|
||||
* Make /sys/kernel/notes give the raw contents of our kernel .notes section.
|
||||
*/
|
||||
@ -202,7 +221,10 @@ static struct attribute * kernel_attrs[] = {
|
||||
&kexec_crash_size_attr.attr,
|
||||
&vmcoreinfo_attr.attr,
|
||||
#endif
|
||||
#ifndef CONFIG_TINY_RCU
|
||||
&rcu_expedited_attr.attr,
|
||||
&rcu_normal_attr.attr,
|
||||
#endif
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -162,6 +162,27 @@ static int rcu_torture_writer_state;
|
||||
#define RTWS_SYNC 7
|
||||
#define RTWS_STUTTER 8
|
||||
#define RTWS_STOPPING 9
|
||||
static const char * const rcu_torture_writer_state_names[] = {
|
||||
"RTWS_FIXED_DELAY",
|
||||
"RTWS_DELAY",
|
||||
"RTWS_REPLACE",
|
||||
"RTWS_DEF_FREE",
|
||||
"RTWS_EXP_SYNC",
|
||||
"RTWS_COND_GET",
|
||||
"RTWS_COND_SYNC",
|
||||
"RTWS_SYNC",
|
||||
"RTWS_STUTTER",
|
||||
"RTWS_STOPPING",
|
||||
};
|
||||
|
||||
static const char *rcu_torture_writer_state_getname(void)
|
||||
{
|
||||
unsigned int i = READ_ONCE(rcu_torture_writer_state);
|
||||
|
||||
if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
|
||||
return "???";
|
||||
return rcu_torture_writer_state_names[i];
|
||||
}
|
||||
|
||||
#if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
|
||||
#define RCUTORTURE_RUNNABLE_INIT 1
|
||||
@ -1307,7 +1328,8 @@ rcu_torture_stats_print(void)
|
||||
|
||||
rcutorture_get_gp_data(cur_ops->ttype,
|
||||
&flags, &gpnum, &completed);
|
||||
pr_alert("??? Writer stall state %d g%lu c%lu f%#x\n",
|
||||
pr_alert("??? Writer stall state %s(%d) g%lu c%lu f%#x\n",
|
||||
rcu_torture_writer_state_getname(),
|
||||
rcu_torture_writer_state,
|
||||
gpnum, completed, flags);
|
||||
show_rcu_gp_kthreads();
|
||||
|
@ -489,7 +489,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
|
||||
*/
|
||||
void synchronize_srcu(struct srcu_struct *sp)
|
||||
{
|
||||
__synchronize_srcu(sp, rcu_gp_is_expedited()
|
||||
__synchronize_srcu(sp, (rcu_gp_is_expedited() && !rcu_gp_is_normal())
|
||||
? SYNCHRONIZE_SRCU_EXP_TRYCOUNT
|
||||
: SYNCHRONIZE_SRCU_TRYCOUNT);
|
||||
}
|
||||
|
@ -68,10 +68,6 @@ MODULE_ALIAS("rcutree");
|
||||
|
||||
/* Data structures. */
|
||||
|
||||
static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
|
||||
static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
|
||||
static struct lock_class_key rcu_exp_class[RCU_NUM_LVLS];
|
||||
|
||||
/*
|
||||
* In order to export the rcu_state name to the tracing tools, it
|
||||
* needs to be added in the __tracepoint_string section.
|
||||
@ -246,24 +242,17 @@ static int rcu_gp_in_progress(struct rcu_state *rsp)
|
||||
*/
|
||||
void rcu_sched_qs(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (__this_cpu_read(rcu_sched_data.cpu_no_qs.s)) {
|
||||
trace_rcu_grace_period(TPS("rcu_sched"),
|
||||
__this_cpu_read(rcu_sched_data.gpnum),
|
||||
TPS("cpuqs"));
|
||||
__this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
|
||||
if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
|
||||
return;
|
||||
local_irq_save(flags);
|
||||
if (__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) {
|
||||
__this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
|
||||
rcu_report_exp_rdp(&rcu_sched_state,
|
||||
this_cpu_ptr(&rcu_sched_data),
|
||||
true);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s))
|
||||
return;
|
||||
trace_rcu_grace_period(TPS("rcu_sched"),
|
||||
__this_cpu_read(rcu_sched_data.gpnum),
|
||||
TPS("cpuqs"));
|
||||
__this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
|
||||
if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
|
||||
return;
|
||||
__this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
|
||||
rcu_report_exp_rdp(&rcu_sched_state,
|
||||
this_cpu_ptr(&rcu_sched_data), true);
|
||||
}
|
||||
|
||||
void rcu_bh_qs(void)
|
||||
@ -300,17 +289,16 @@ EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr);
|
||||
* We inform the RCU core by emulating a zero-duration dyntick-idle
|
||||
* period, which we in turn do by incrementing the ->dynticks counter
|
||||
* by two.
|
||||
*
|
||||
* The caller must have disabled interrupts.
|
||||
*/
|
||||
static void rcu_momentary_dyntick_idle(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rcu_data *rdp;
|
||||
struct rcu_dynticks *rdtp;
|
||||
int resched_mask;
|
||||
struct rcu_state *rsp;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
/*
|
||||
* Yes, we can lose flag-setting operations. This is OK, because
|
||||
* the flag will be set again after some delay.
|
||||
@ -340,13 +328,12 @@ static void rcu_momentary_dyntick_idle(void)
|
||||
smp_mb__after_atomic(); /* Later stuff after QS. */
|
||||
break;
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Note a context switch. This is a quiescent state for RCU-sched,
|
||||
* and requires special handling for preemptible RCU.
|
||||
* The caller must have disabled preemption.
|
||||
* The caller must have disabled interrupts.
|
||||
*/
|
||||
void rcu_note_context_switch(void)
|
||||
{
|
||||
@ -376,9 +363,14 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch);
|
||||
*/
|
||||
void rcu_all_qs(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
barrier(); /* Avoid RCU read-side critical sections leaking down. */
|
||||
if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
|
||||
if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) {
|
||||
local_irq_save(flags);
|
||||
rcu_momentary_dyntick_idle();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
this_cpu_inc(rcu_qs_ctr);
|
||||
barrier(); /* Avoid RCU read-side critical sections leaking up. */
|
||||
}
|
||||
@ -605,25 +597,25 @@ static int rcu_future_needs_gp(struct rcu_state *rsp)
|
||||
* The caller must have disabled interrupts to prevent races with
|
||||
* normal callback registry.
|
||||
*/
|
||||
static int
|
||||
static bool
|
||||
cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (rcu_gp_in_progress(rsp))
|
||||
return 0; /* No, a grace period is already in progress. */
|
||||
return false; /* No, a grace period is already in progress. */
|
||||
if (rcu_future_needs_gp(rsp))
|
||||
return 1; /* Yes, a no-CBs CPU needs one. */
|
||||
return true; /* Yes, a no-CBs CPU needs one. */
|
||||
if (!rdp->nxttail[RCU_NEXT_TAIL])
|
||||
return 0; /* No, this is a no-CBs (or offline) CPU. */
|
||||
return false; /* No, this is a no-CBs (or offline) CPU. */
|
||||
if (*rdp->nxttail[RCU_NEXT_READY_TAIL])
|
||||
return 1; /* Yes, this CPU has newly registered callbacks. */
|
||||
return true; /* Yes, CPU has newly registered callbacks. */
|
||||
for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++)
|
||||
if (rdp->nxttail[i - 1] != rdp->nxttail[i] &&
|
||||
ULONG_CMP_LT(READ_ONCE(rsp->completed),
|
||||
rdp->nxtcompleted[i]))
|
||||
return 1; /* Yes, CBs for future grace period. */
|
||||
return 0; /* No grace period needed. */
|
||||
return true; /* Yes, CBs for future grace period. */
|
||||
return false; /* No grace period needed. */
|
||||
}
|
||||
|
||||
/*
|
||||
@ -740,7 +732,7 @@ void rcu_user_enter(void)
|
||||
*
|
||||
* Exit from an interrupt handler, which might possibly result in entering
|
||||
* idle mode, in other words, leaving the mode in which read-side critical
|
||||
* sections can occur.
|
||||
* sections can occur. The caller must have disabled interrupts.
|
||||
*
|
||||
* This code assumes that the idle loop never does anything that might
|
||||
* result in unbalanced calls to irq_enter() and irq_exit(). If your
|
||||
@ -753,11 +745,10 @@ void rcu_user_enter(void)
|
||||
*/
|
||||
void rcu_irq_exit(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
long long oldval;
|
||||
struct rcu_dynticks *rdtp;
|
||||
|
||||
local_irq_save(flags);
|
||||
RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!");
|
||||
rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||
oldval = rdtp->dynticks_nesting;
|
||||
rdtp->dynticks_nesting--;
|
||||
@ -768,6 +759,17 @@ void rcu_irq_exit(void)
|
||||
else
|
||||
rcu_eqs_enter_common(oldval, true);
|
||||
rcu_sysidle_enter(1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wrapper for rcu_irq_exit() where interrupts are enabled.
|
||||
*/
|
||||
void rcu_irq_exit_irqson(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
rcu_irq_exit();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@ -865,7 +867,7 @@ void rcu_user_exit(void)
|
||||
*
|
||||
* Enter an interrupt handler, which might possibly result in exiting
|
||||
* idle mode, in other words, entering the mode in which read-side critical
|
||||
* sections can occur.
|
||||
* sections can occur. The caller must have disabled interrupts.
|
||||
*
|
||||
* Note that the Linux kernel is fully capable of entering an interrupt
|
||||
* handler that it never exits, for example when doing upcalls to
|
||||
@ -881,11 +883,10 @@ void rcu_user_exit(void)
|
||||
*/
|
||||
void rcu_irq_enter(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rcu_dynticks *rdtp;
|
||||
long long oldval;
|
||||
|
||||
local_irq_save(flags);
|
||||
RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!");
|
||||
rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||
oldval = rdtp->dynticks_nesting;
|
||||
rdtp->dynticks_nesting++;
|
||||
@ -896,6 +897,17 @@ void rcu_irq_enter(void)
|
||||
else
|
||||
rcu_eqs_exit_common(oldval, true);
|
||||
rcu_sysidle_exit(1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wrapper for rcu_irq_enter() where interrupts are enabled.
|
||||
*/
|
||||
void rcu_irq_enter_irqson(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
rcu_irq_enter();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@ -1186,6 +1198,16 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
|
||||
rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert a ->gp_state value to a character string.
|
||||
*/
|
||||
static const char *gp_state_getname(short gs)
|
||||
{
|
||||
if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
|
||||
return "???";
|
||||
return gp_state_names[gs];
|
||||
}
|
||||
|
||||
/*
|
||||
* Complain about starvation of grace-period kthread.
|
||||
*/
|
||||
@ -1196,12 +1218,16 @@ static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
|
||||
|
||||
j = jiffies;
|
||||
gpa = READ_ONCE(rsp->gp_activity);
|
||||
if (j - gpa > 2 * HZ)
|
||||
pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x s%d ->state=%#lx\n",
|
||||
if (j - gpa > 2 * HZ) {
|
||||
pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x %s(%d) ->state=%#lx\n",
|
||||
rsp->name, j - gpa,
|
||||
rsp->gpnum, rsp->completed,
|
||||
rsp->gp_flags, rsp->gp_state,
|
||||
rsp->gp_kthread ? rsp->gp_kthread->state : 0);
|
||||
rsp->gp_flags,
|
||||
gp_state_getname(rsp->gp_state), rsp->gp_state,
|
||||
rsp->gp_kthread ? rsp->gp_kthread->state : ~0);
|
||||
if (rsp->gp_kthread)
|
||||
sched_show_task(rsp->gp_kthread);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1802,9 +1828,9 @@ static void rcu_gp_slow(struct rcu_state *rsp, int delay)
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize a new grace period. Return 0 if no grace period required.
|
||||
* Initialize a new grace period. Return false if no grace period required.
|
||||
*/
|
||||
static int rcu_gp_init(struct rcu_state *rsp)
|
||||
static bool rcu_gp_init(struct rcu_state *rsp)
|
||||
{
|
||||
unsigned long oldmask;
|
||||
struct rcu_data *rdp;
|
||||
@ -1815,7 +1841,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
|
||||
if (!READ_ONCE(rsp->gp_flags)) {
|
||||
/* Spurious wakeup, tell caller to go back to sleep. */
|
||||
raw_spin_unlock_irq(&rnp->lock);
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */
|
||||
|
||||
@ -1825,7 +1851,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
|
||||
* Not supposed to be able to happen.
|
||||
*/
|
||||
raw_spin_unlock_irq(&rnp->lock);
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Advance to a new grace period and initialize state. */
|
||||
@ -1917,7 +1943,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
|
||||
WRITE_ONCE(rsp->gp_activity, jiffies);
|
||||
}
|
||||
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3347,7 +3373,6 @@ static unsigned long rcu_seq_snap(unsigned long *sp)
|
||||
{
|
||||
unsigned long s;
|
||||
|
||||
smp_mb(); /* Caller's modifications seen first by other CPUs. */
|
||||
s = (READ_ONCE(*sp) + 3) & ~0x1;
|
||||
smp_mb(); /* Above access must not bleed into critical section. */
|
||||
return s;
|
||||
@ -3374,6 +3399,7 @@ static void rcu_exp_gp_seq_end(struct rcu_state *rsp)
|
||||
}
|
||||
static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp)
|
||||
{
|
||||
smp_mb(); /* Caller's modifications seen first by other CPUs. */
|
||||
return rcu_seq_snap(&rsp->expedited_sequence);
|
||||
}
|
||||
static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
|
||||
@ -3585,7 +3611,7 @@ static bool sync_exp_work_done(struct rcu_state *rsp, struct rcu_node *rnp,
|
||||
*/
|
||||
static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
|
||||
{
|
||||
struct rcu_data *rdp;
|
||||
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
|
||||
struct rcu_node *rnp0;
|
||||
struct rcu_node *rnp1 = NULL;
|
||||
|
||||
@ -3599,7 +3625,7 @@ static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
|
||||
if (!mutex_is_locked(&rnp0->exp_funnel_mutex)) {
|
||||
if (mutex_trylock(&rnp0->exp_funnel_mutex)) {
|
||||
if (sync_exp_work_done(rsp, rnp0, NULL,
|
||||
&rsp->expedited_workdone0, s))
|
||||
&rdp->expedited_workdone0, s))
|
||||
return NULL;
|
||||
return rnp0;
|
||||
}
|
||||
@ -3613,14 +3639,13 @@ static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
|
||||
* can be inexact, as it is just promoting locality and is not
|
||||
* strictly needed for correctness.
|
||||
*/
|
||||
rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
|
||||
if (sync_exp_work_done(rsp, NULL, NULL, &rsp->expedited_workdone1, s))
|
||||
if (sync_exp_work_done(rsp, NULL, NULL, &rdp->expedited_workdone1, s))
|
||||
return NULL;
|
||||
mutex_lock(&rdp->exp_funnel_mutex);
|
||||
rnp0 = rdp->mynode;
|
||||
for (; rnp0 != NULL; rnp0 = rnp0->parent) {
|
||||
if (sync_exp_work_done(rsp, rnp1, rdp,
|
||||
&rsp->expedited_workdone2, s))
|
||||
&rdp->expedited_workdone2, s))
|
||||
return NULL;
|
||||
mutex_lock(&rnp0->exp_funnel_mutex);
|
||||
if (rnp1)
|
||||
@ -3630,7 +3655,7 @@ static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
|
||||
rnp1 = rnp0;
|
||||
}
|
||||
if (sync_exp_work_done(rsp, rnp1, rdp,
|
||||
&rsp->expedited_workdone3, s))
|
||||
&rdp->expedited_workdone3, s))
|
||||
return NULL;
|
||||
return rnp1;
|
||||
}
|
||||
@ -3716,24 +3741,22 @@ retry_ipi:
|
||||
ret = smp_call_function_single(cpu, func, rsp, 0);
|
||||
if (!ret) {
|
||||
mask_ofl_ipi &= ~mask;
|
||||
} else {
|
||||
/* Failed, raced with offline. */
|
||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||
if (cpu_online(cpu) &&
|
||||
(rnp->expmask & mask)) {
|
||||
raw_spin_unlock_irqrestore(&rnp->lock,
|
||||
flags);
|
||||
schedule_timeout_uninterruptible(1);
|
||||
if (cpu_online(cpu) &&
|
||||
(rnp->expmask & mask))
|
||||
goto retry_ipi;
|
||||
raw_spin_lock_irqsave_rcu_node(rnp,
|
||||
flags);
|
||||
}
|
||||
if (!(rnp->expmask & mask))
|
||||
mask_ofl_ipi &= ~mask;
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
continue;
|
||||
}
|
||||
/* Failed, raced with offline. */
|
||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||
if (cpu_online(cpu) &&
|
||||
(rnp->expmask & mask)) {
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
schedule_timeout_uninterruptible(1);
|
||||
if (cpu_online(cpu) &&
|
||||
(rnp->expmask & mask))
|
||||
goto retry_ipi;
|
||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||
}
|
||||
if (!(rnp->expmask & mask))
|
||||
mask_ofl_ipi &= ~mask;
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
}
|
||||
/* Report quiescent states for those that went offline. */
|
||||
mask_ofl_test |= mask_ofl_ipi;
|
||||
@ -3748,6 +3771,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
|
||||
unsigned long jiffies_stall;
|
||||
unsigned long jiffies_start;
|
||||
unsigned long mask;
|
||||
int ndetected;
|
||||
struct rcu_node *rnp;
|
||||
struct rcu_node *rnp_root = rcu_get_root(rsp);
|
||||
int ret;
|
||||
@ -3760,7 +3784,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
|
||||
rsp->expedited_wq,
|
||||
sync_rcu_preempt_exp_done(rnp_root),
|
||||
jiffies_stall);
|
||||
if (ret > 0)
|
||||
if (ret > 0 || sync_rcu_preempt_exp_done(rnp_root))
|
||||
return;
|
||||
if (ret < 0) {
|
||||
/* Hit a signal, disable CPU stall warnings. */
|
||||
@ -3770,14 +3794,16 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
|
||||
}
|
||||
pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
|
||||
rsp->name);
|
||||
ndetected = 0;
|
||||
rcu_for_each_leaf_node(rsp, rnp) {
|
||||
(void)rcu_print_task_exp_stall(rnp);
|
||||
ndetected = rcu_print_task_exp_stall(rnp);
|
||||
mask = 1;
|
||||
for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
|
||||
struct rcu_data *rdp;
|
||||
|
||||
if (!(rnp->expmask & mask))
|
||||
continue;
|
||||
ndetected++;
|
||||
rdp = per_cpu_ptr(rsp->rda, cpu);
|
||||
pr_cont(" %d-%c%c%c", cpu,
|
||||
"O."[cpu_online(cpu)],
|
||||
@ -3786,8 +3812,23 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
|
||||
}
|
||||
mask <<= 1;
|
||||
}
|
||||
pr_cont(" } %lu jiffies s: %lu\n",
|
||||
jiffies - jiffies_start, rsp->expedited_sequence);
|
||||
pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
|
||||
jiffies - jiffies_start, rsp->expedited_sequence,
|
||||
rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
|
||||
if (!ndetected) {
|
||||
pr_err("blocking rcu_node structures:");
|
||||
rcu_for_each_node_breadth_first(rsp, rnp) {
|
||||
if (rnp == rnp_root)
|
||||
continue; /* printed unconditionally */
|
||||
if (sync_rcu_preempt_exp_done(rnp))
|
||||
continue;
|
||||
pr_cont(" l=%u:%d-%d:%#lx/%c",
|
||||
rnp->level, rnp->grplo, rnp->grphi,
|
||||
rnp->expmask,
|
||||
".T"[!!rnp->exp_tasks]);
|
||||
}
|
||||
pr_cont("\n");
|
||||
}
|
||||
rcu_for_each_leaf_node(rsp, rnp) {
|
||||
mask = 1;
|
||||
for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
|
||||
@ -3822,6 +3863,16 @@ void synchronize_sched_expedited(void)
|
||||
struct rcu_node *rnp;
|
||||
struct rcu_state *rsp = &rcu_sched_state;
|
||||
|
||||
/* If only one CPU, this is automatically a grace period. */
|
||||
if (rcu_blocking_is_gp())
|
||||
return;
|
||||
|
||||
/* If expedited grace periods are prohibited, fall back to normal. */
|
||||
if (rcu_gp_is_normal()) {
|
||||
wait_rcu_gp(call_rcu_sched);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Take a snapshot of the sequence number. */
|
||||
s = rcu_exp_gp_seq_snap(rsp);
|
||||
|
||||
@ -4307,8 +4358,8 @@ static int __init rcu_spawn_gp_kthread(void)
|
||||
sp.sched_priority = kthread_prio;
|
||||
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
||||
}
|
||||
wake_up_process(t);
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
wake_up_process(t);
|
||||
}
|
||||
rcu_spawn_nocb_kthreads();
|
||||
rcu_spawn_boost_kthreads();
|
||||
@ -4359,12 +4410,14 @@ static void __init rcu_init_levelspread(int *levelspread, const int *levelcnt)
|
||||
/*
|
||||
* Helper function for rcu_init() that initializes one rcu_state structure.
|
||||
*/
|
||||
static void __init rcu_init_one(struct rcu_state *rsp,
|
||||
struct rcu_data __percpu *rda)
|
||||
static void __init rcu_init_one(struct rcu_state *rsp)
|
||||
{
|
||||
static const char * const buf[] = RCU_NODE_NAME_INIT;
|
||||
static const char * const fqs[] = RCU_FQS_NAME_INIT;
|
||||
static const char * const exp[] = RCU_EXP_NAME_INIT;
|
||||
static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
|
||||
static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
|
||||
static struct lock_class_key rcu_exp_class[RCU_NUM_LVLS];
|
||||
static u8 fl_mask = 0x1;
|
||||
|
||||
int levelcnt[RCU_NUM_LVLS]; /* # nodes in each level. */
|
||||
@ -4550,8 +4603,8 @@ void __init rcu_init(void)
|
||||
|
||||
rcu_bootup_announce();
|
||||
rcu_init_geometry();
|
||||
rcu_init_one(&rcu_bh_state, &rcu_bh_data);
|
||||
rcu_init_one(&rcu_sched_state, &rcu_sched_data);
|
||||
rcu_init_one(&rcu_bh_state);
|
||||
rcu_init_one(&rcu_sched_state);
|
||||
if (dump_tree)
|
||||
rcu_dump_rcu_node_tree(&rcu_sched_state);
|
||||
__rcu_init_preempt();
|
||||
|
@ -178,6 +178,8 @@ struct rcu_node {
|
||||
/* beginning of each expedited GP. */
|
||||
unsigned long expmaskinitnext;
|
||||
/* Online CPUs for next expedited GP. */
|
||||
/* Any CPU that has ever been online will */
|
||||
/* have its bit set. */
|
||||
unsigned long grpmask; /* Mask to apply to parent qsmask. */
|
||||
/* Only one bit will be set in this mask. */
|
||||
int grplo; /* lowest-numbered CPU or group here. */
|
||||
@ -384,6 +386,10 @@ struct rcu_data {
|
||||
struct rcu_head oom_head;
|
||||
#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
|
||||
struct mutex exp_funnel_mutex;
|
||||
atomic_long_t expedited_workdone0; /* # done by others #0. */
|
||||
atomic_long_t expedited_workdone1; /* # done by others #1. */
|
||||
atomic_long_t expedited_workdone2; /* # done by others #2. */
|
||||
atomic_long_t expedited_workdone3; /* # done by others #3. */
|
||||
|
||||
/* 7) Callback offloading. */
|
||||
#ifdef CONFIG_RCU_NOCB_CPU
|
||||
@ -498,10 +504,6 @@ struct rcu_state {
|
||||
/* End of fields guarded by barrier_mutex. */
|
||||
|
||||
unsigned long expedited_sequence; /* Take a ticket. */
|
||||
atomic_long_t expedited_workdone0; /* # done by others #0. */
|
||||
atomic_long_t expedited_workdone1; /* # done by others #1. */
|
||||
atomic_long_t expedited_workdone2; /* # done by others #2. */
|
||||
atomic_long_t expedited_workdone3; /* # done by others #3. */
|
||||
atomic_long_t expedited_normal; /* # fallbacks to normal. */
|
||||
atomic_t expedited_need_qs; /* # CPUs left to check in. */
|
||||
wait_queue_head_t expedited_wq; /* Wait for check-ins. */
|
||||
@ -545,6 +547,18 @@ struct rcu_state {
|
||||
#define RCU_GP_CLEANUP 5 /* Grace-period cleanup started. */
|
||||
#define RCU_GP_CLEANED 6 /* Grace-period cleanup complete. */
|
||||
|
||||
#ifndef RCU_TREE_NONCORE
|
||||
static const char * const gp_state_names[] = {
|
||||
"RCU_GP_IDLE",
|
||||
"RCU_GP_WAIT_GPS",
|
||||
"RCU_GP_DONE_GPS",
|
||||
"RCU_GP_WAIT_FQS",
|
||||
"RCU_GP_DOING_FQS",
|
||||
"RCU_GP_CLEANUP",
|
||||
"RCU_GP_CLEANED",
|
||||
};
|
||||
#endif /* #ifndef RCU_TREE_NONCORE */
|
||||
|
||||
extern struct list_head rcu_struct_flavors;
|
||||
|
||||
/* Sequence through rcu_state structures for each RCU flavor. */
|
||||
|
@ -63,8 +63,7 @@ static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
|
||||
|
||||
/*
|
||||
* Check the RCU kernel configuration parameters and print informative
|
||||
* messages about anything out of the ordinary. If you like #ifdef, you
|
||||
* will love this function.
|
||||
* messages about anything out of the ordinary.
|
||||
*/
|
||||
static void __init rcu_bootup_announce_oddness(void)
|
||||
{
|
||||
@ -147,8 +146,8 @@ static void __init rcu_bootup_announce(void)
|
||||
* the corresponding expedited grace period will also be the end of the
|
||||
* normal grace period.
|
||||
*/
|
||||
static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp,
|
||||
unsigned long flags) __releases(rnp->lock)
|
||||
static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
|
||||
__releases(rnp->lock) /* But leaves rrupts disabled. */
|
||||
{
|
||||
int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
|
||||
(rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
|
||||
@ -236,7 +235,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp,
|
||||
rnp->gp_tasks = &t->rcu_node_entry;
|
||||
if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
|
||||
rnp->exp_tasks = &t->rcu_node_entry;
|
||||
raw_spin_unlock(&rnp->lock);
|
||||
raw_spin_unlock(&rnp->lock); /* rrupts remain disabled. */
|
||||
|
||||
/*
|
||||
* Report the quiescent state for the expedited GP. This expedited
|
||||
@ -251,7 +250,6 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp,
|
||||
} else {
|
||||
WARN_ON_ONCE(t->rcu_read_unlock_special.b.exp_need_qs);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -286,12 +284,11 @@ static void rcu_preempt_qs(void)
|
||||
* predating the current grace period drain, in other words, until
|
||||
* rnp->gp_tasks becomes NULL.
|
||||
*
|
||||
* Caller must disable preemption.
|
||||
* Caller must disable interrupts.
|
||||
*/
|
||||
static void rcu_preempt_note_context_switch(void)
|
||||
{
|
||||
struct task_struct *t = current;
|
||||
unsigned long flags;
|
||||
struct rcu_data *rdp;
|
||||
struct rcu_node *rnp;
|
||||
|
||||
@ -301,7 +298,7 @@ static void rcu_preempt_note_context_switch(void)
|
||||
/* Possibly blocking in an RCU read-side critical section. */
|
||||
rdp = this_cpu_ptr(rcu_state_p->rda);
|
||||
rnp = rdp->mynode;
|
||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||
raw_spin_lock_rcu_node(rnp);
|
||||
t->rcu_read_unlock_special.b.blocked = true;
|
||||
t->rcu_blocked_node = rnp;
|
||||
|
||||
@ -317,7 +314,7 @@ static void rcu_preempt_note_context_switch(void)
|
||||
(rnp->qsmask & rdp->grpmask)
|
||||
? rnp->gpnum
|
||||
: rnp->gpnum + 1);
|
||||
rcu_preempt_ctxt_queue(rnp, rdp, flags);
|
||||
rcu_preempt_ctxt_queue(rnp, rdp);
|
||||
} else if (t->rcu_read_lock_nesting < 0 &&
|
||||
t->rcu_read_unlock_special.s) {
|
||||
|
||||
@ -449,19 +446,13 @@ void rcu_read_unlock_special(struct task_struct *t)
|
||||
|
||||
/*
|
||||
* Remove this task from the list it blocked on. The task
|
||||
* now remains queued on the rcu_node corresponding to
|
||||
* the CPU it first blocked on, so the first attempt to
|
||||
* acquire the task's rcu_node's ->lock will succeed.
|
||||
* Keep the loop and add a WARN_ON() out of sheer paranoia.
|
||||
* now remains queued on the rcu_node corresponding to the
|
||||
* CPU it first blocked on, so there is no longer any need
|
||||
* to loop. Retain a WARN_ON_ONCE() out of sheer paranoia.
|
||||
*/
|
||||
for (;;) {
|
||||
rnp = t->rcu_blocked_node;
|
||||
raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
|
||||
if (rnp == t->rcu_blocked_node)
|
||||
break;
|
||||
WARN_ON_ONCE(1);
|
||||
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
||||
}
|
||||
rnp = t->rcu_blocked_node;
|
||||
raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
|
||||
WARN_ON_ONCE(rnp != t->rcu_blocked_node);
|
||||
empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
|
||||
empty_exp = sync_rcu_preempt_exp_done(rnp);
|
||||
smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
|
||||
@ -746,6 +737,12 @@ void synchronize_rcu_expedited(void)
|
||||
struct rcu_state *rsp = rcu_state_p;
|
||||
unsigned long s;
|
||||
|
||||
/* If expedited grace periods are prohibited, fall back to normal. */
|
||||
if (rcu_gp_is_normal()) {
|
||||
wait_rcu_gp(call_rcu);
|
||||
return;
|
||||
}
|
||||
|
||||
s = rcu_exp_gp_seq_snap(rsp);
|
||||
|
||||
rnp_unlock = exp_funnel_lock(rsp, s);
|
||||
@ -786,7 +783,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier);
|
||||
*/
|
||||
static void __init __rcu_init_preempt(void)
|
||||
{
|
||||
rcu_init_one(rcu_state_p, rcu_data_p);
|
||||
rcu_init_one(rcu_state_p);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1520,7 +1517,8 @@ static void rcu_prepare_for_idle(void)
|
||||
struct rcu_state *rsp;
|
||||
int tne;
|
||||
|
||||
if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL))
|
||||
if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL) ||
|
||||
rcu_is_nocb_cpu(smp_processor_id()))
|
||||
return;
|
||||
|
||||
/* Handle nohz enablement switches conservatively. */
|
||||
@ -1534,10 +1532,6 @@ static void rcu_prepare_for_idle(void)
|
||||
if (!tne)
|
||||
return;
|
||||
|
||||
/* If this is a no-CBs CPU, no callbacks, just return. */
|
||||
if (rcu_is_nocb_cpu(smp_processor_id()))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If a non-lazy callback arrived at a CPU having only lazy
|
||||
* callbacks, invoke RCU core for the side-effect of recalculating
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Read-Copy Update tracing for classic implementation
|
||||
* Read-Copy Update tracing for hierarchical implementation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -16,6 +16,7 @@
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright IBM Corporation, 2008
|
||||
* Author: Paul E. McKenney
|
||||
*
|
||||
* Papers: http://www.rdrop.com/users/paulmck/RCU
|
||||
*
|
||||
@ -33,9 +34,7 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/cpu.h>
|
||||
@ -183,14 +182,20 @@ static const struct file_operations rcudata_fops = {
|
||||
|
||||
static int show_rcuexp(struct seq_file *m, void *v)
|
||||
{
|
||||
int cpu;
|
||||
struct rcu_state *rsp = (struct rcu_state *)m->private;
|
||||
struct rcu_data *rdp;
|
||||
unsigned long s0 = 0, s1 = 0, s2 = 0, s3 = 0;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
rdp = per_cpu_ptr(rsp->rda, cpu);
|
||||
s0 += atomic_long_read(&rdp->expedited_workdone0);
|
||||
s1 += atomic_long_read(&rdp->expedited_workdone1);
|
||||
s2 += atomic_long_read(&rdp->expedited_workdone2);
|
||||
s3 += atomic_long_read(&rdp->expedited_workdone3);
|
||||
}
|
||||
seq_printf(m, "s=%lu wd0=%lu wd1=%lu wd2=%lu wd3=%lu n=%lu enq=%d sc=%lu\n",
|
||||
rsp->expedited_sequence,
|
||||
atomic_long_read(&rsp->expedited_workdone0),
|
||||
atomic_long_read(&rsp->expedited_workdone1),
|
||||
atomic_long_read(&rsp->expedited_workdone2),
|
||||
atomic_long_read(&rsp->expedited_workdone3),
|
||||
rsp->expedited_sequence, s0, s1, s2, s3,
|
||||
atomic_long_read(&rsp->expedited_normal),
|
||||
atomic_read(&rsp->expedited_need_qs),
|
||||
rsp->expedited_sequence / 2);
|
||||
@ -487,16 +492,4 @@ free_out:
|
||||
debugfs_remove_recursive(rcudir);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void __exit rcutree_trace_cleanup(void)
|
||||
{
|
||||
debugfs_remove_recursive(rcudir);
|
||||
}
|
||||
|
||||
|
||||
module_init(rcutree_trace_init);
|
||||
module_exit(rcutree_trace_cleanup);
|
||||
|
||||
MODULE_AUTHOR("Paul E. McKenney");
|
||||
MODULE_DESCRIPTION("Read-Copy Update tracing for hierarchical implementation");
|
||||
MODULE_LICENSE("GPL");
|
||||
device_initcall(rcutree_trace_init);
|
||||
|
@ -60,7 +60,12 @@ MODULE_ALIAS("rcupdate");
|
||||
#endif
|
||||
#define MODULE_PARAM_PREFIX "rcupdate."
|
||||
|
||||
#ifndef CONFIG_TINY_RCU
|
||||
module_param(rcu_expedited, int, 0);
|
||||
module_param(rcu_normal, int, 0);
|
||||
static int rcu_normal_after_boot;
|
||||
module_param(rcu_normal_after_boot, int, 0);
|
||||
#endif /* #ifndef CONFIG_TINY_RCU */
|
||||
|
||||
#if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_PREEMPT_COUNT)
|
||||
/**
|
||||
@ -113,6 +118,17 @@ EXPORT_SYMBOL(rcu_read_lock_sched_held);
|
||||
|
||||
#ifndef CONFIG_TINY_RCU
|
||||
|
||||
/*
|
||||
* Should expedited grace-period primitives always fall back to their
|
||||
* non-expedited counterparts? Intended for use within RCU. Note
|
||||
* that if the user specifies both rcu_expedited and rcu_normal, then
|
||||
* rcu_normal wins.
|
||||
*/
|
||||
bool rcu_gp_is_normal(void)
|
||||
{
|
||||
return READ_ONCE(rcu_normal);
|
||||
}
|
||||
|
||||
static atomic_t rcu_expedited_nesting =
|
||||
ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT) ? 1 : 0);
|
||||
|
||||
@ -157,8 +173,6 @@ void rcu_unexpedite_gp(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
|
||||
|
||||
#endif /* #ifndef CONFIG_TINY_RCU */
|
||||
|
||||
/*
|
||||
* Inform RCU of the end of the in-kernel boot sequence.
|
||||
*/
|
||||
@ -166,8 +180,12 @@ void rcu_end_inkernel_boot(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT))
|
||||
rcu_unexpedite_gp();
|
||||
if (rcu_normal_after_boot)
|
||||
WRITE_ONCE(rcu_normal, 1);
|
||||
}
|
||||
|
||||
#endif /* #ifndef CONFIG_TINY_RCU */
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RCU
|
||||
|
||||
/*
|
||||
|
@ -3085,7 +3085,6 @@ static void __sched notrace __schedule(bool preempt)
|
||||
|
||||
cpu = smp_processor_id();
|
||||
rq = cpu_rq(cpu);
|
||||
rcu_note_context_switch();
|
||||
prev = rq->curr;
|
||||
|
||||
/*
|
||||
@ -3104,13 +3103,16 @@ static void __sched notrace __schedule(bool preempt)
|
||||
if (sched_feat(HRTICK))
|
||||
hrtick_clear(rq);
|
||||
|
||||
local_irq_disable();
|
||||
rcu_note_context_switch();
|
||||
|
||||
/*
|
||||
* Make sure that signal_pending_state()->signal_pending() below
|
||||
* can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
|
||||
* done by the caller to avoid the race with signal_wake_up().
|
||||
*/
|
||||
smp_mb__before_spinlock();
|
||||
raw_spin_lock_irq(&rq->lock);
|
||||
raw_spin_lock(&rq->lock);
|
||||
lockdep_pin_lock(&rq->lock);
|
||||
|
||||
rq->clock_skip_update <<= 1; /* promote REQ to ACT */
|
||||
|
@ -37,7 +37,7 @@ void __list_add(struct list_head *new,
|
||||
next->prev = new;
|
||||
new->next = next;
|
||||
new->prev = prev;
|
||||
prev->next = new;
|
||||
WRITE_ONCE(prev->next, new);
|
||||
}
|
||||
EXPORT_SYMBOL(__list_add);
|
||||
|
||||
|
@ -38,8 +38,6 @@
|
||||
#
|
||||
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
|
||||
|
||||
grace=120
|
||||
|
||||
T=/tmp/kvm-test-1-run.sh.$$
|
||||
trap 'rm -rf $T' 0
|
||||
touch $T
|
||||
@ -152,7 +150,7 @@ fi
|
||||
qemu_args="`specify_qemu_cpus "$QEMU" "$qemu_args" "$cpu_count"`"
|
||||
|
||||
# Generate architecture-specific and interaction-specific qemu arguments
|
||||
qemu_args="$qemu_args `identify_qemu_args "$QEMU" "$builddir/console.log"`"
|
||||
qemu_args="$qemu_args `identify_qemu_args "$QEMU" "$resdir/console.log"`"
|
||||
|
||||
# Generate qemu -append arguments
|
||||
qemu_append="`identify_qemu_append "$QEMU"`"
|
||||
@ -168,7 +166,7 @@ then
|
||||
touch $resdir/buildonly
|
||||
exit 0
|
||||
fi
|
||||
echo "NOTE: $QEMU either did not run or was interactive" > $builddir/console.log
|
||||
echo "NOTE: $QEMU either did not run or was interactive" > $resdir/console.log
|
||||
echo $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd
|
||||
( $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append "$qemu_append $boot_args"; echo $? > $resdir/qemu-retval ) &
|
||||
qemu_pid=$!
|
||||
@ -214,7 +212,7 @@ then
|
||||
else
|
||||
break
|
||||
fi
|
||||
if test $kruntime -ge $((seconds + grace))
|
||||
if test $kruntime -ge $((seconds + $TORTURE_SHUTDOWN_GRACE))
|
||||
then
|
||||
echo "!!! PID $qemu_pid hung at $kruntime vs. $seconds seconds" >> $resdir/Warnings 2>&1
|
||||
kill -KILL $qemu_pid
|
||||
@ -224,6 +222,5 @@ then
|
||||
done
|
||||
fi
|
||||
|
||||
cp $builddir/console.log $resdir
|
||||
parse-torture.sh $resdir/console.log $title
|
||||
parse-console.sh $resdir/console.log $title
|
||||
|
@ -42,6 +42,7 @@ TORTURE_DEFCONFIG=defconfig
|
||||
TORTURE_BOOT_IMAGE=""
|
||||
TORTURE_INITRD="$KVM/initrd"; export TORTURE_INITRD
|
||||
TORTURE_KMAKE_ARG=""
|
||||
TORTURE_SHUTDOWN_GRACE=180
|
||||
TORTURE_SUITE=rcu
|
||||
resdir=""
|
||||
configs=""
|
||||
@ -149,6 +150,11 @@ do
|
||||
resdir=$2
|
||||
shift
|
||||
;;
|
||||
--shutdown-grace)
|
||||
checkarg --shutdown-grace "(seconds)" "$#" "$2" '^[0-9]*$' '^error'
|
||||
TORTURE_SHUTDOWN_GRACE=$2
|
||||
shift
|
||||
;;
|
||||
--torture)
|
||||
checkarg --torture "(suite name)" "$#" "$2" '^\(lock\|rcu\)$' '^--'
|
||||
TORTURE_SUITE=$2
|
||||
@ -266,6 +272,7 @@ TORTURE_KMAKE_ARG="$TORTURE_KMAKE_ARG"; export TORTURE_KMAKE_ARG
|
||||
TORTURE_QEMU_CMD="$TORTURE_QEMU_CMD"; export TORTURE_QEMU_CMD
|
||||
TORTURE_QEMU_INTERACTIVE="$TORTURE_QEMU_INTERACTIVE"; export TORTURE_QEMU_INTERACTIVE
|
||||
TORTURE_QEMU_MAC="$TORTURE_QEMU_MAC"; export TORTURE_QEMU_MAC
|
||||
TORTURE_SHUTDOWN_GRACE="$TORTURE_SHUTDOWN_GRACE"; export TORTURE_SHUTDOWN_GRACE
|
||||
TORTURE_SUITE="$TORTURE_SUITE"; export TORTURE_SUITE
|
||||
if ! test -e $resdir
|
||||
then
|
||||
@ -307,10 +314,10 @@ awk < $T/cfgcpu.pack \
|
||||
}
|
||||
|
||||
# Dump out the scripting required to run one test batch.
|
||||
function dump(first, pastlast)
|
||||
function dump(first, pastlast, batchnum)
|
||||
{
|
||||
print "echo ----Start batch: `date`";
|
||||
print "echo ----Start batch: `date` >> " rd "/log";
|
||||
print "echo ----Start batch " batchnum ": `date`";
|
||||
print "echo ----Start batch " batchnum ": `date` >> " rd "/log";
|
||||
jn=1
|
||||
for (j = first; j < pastlast; j++) {
|
||||
builddir=KVM "/b" jn
|
||||
@ -371,25 +378,28 @@ END {
|
||||
njobs = i;
|
||||
nc = ncpus;
|
||||
first = 0;
|
||||
batchnum = 1;
|
||||
|
||||
# Each pass through the following loop considers one test.
|
||||
for (i = 0; i < njobs; i++) {
|
||||
if (ncpus == 0) {
|
||||
# Sequential test specified, each test its own batch.
|
||||
dump(i, i + 1);
|
||||
dump(i, i + 1, batchnum);
|
||||
first = i;
|
||||
batchnum++;
|
||||
} else if (nc < cpus[i] && i != 0) {
|
||||
# Out of CPUs, dump out a batch.
|
||||
dump(first, i);
|
||||
dump(first, i, batchnum);
|
||||
first = i;
|
||||
nc = ncpus;
|
||||
batchnum++;
|
||||
}
|
||||
# Account for the CPUs needed by the current test.
|
||||
nc -= cpus[i];
|
||||
}
|
||||
# Dump the last batch.
|
||||
if (ncpus != 0)
|
||||
dump(first, i);
|
||||
dump(first, i, batchnum);
|
||||
}' >> $T/script
|
||||
|
||||
cat << ___EOF___ >> $T/script
|
||||
|
@ -24,9 +24,6 @@
|
||||
#
|
||||
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
|
||||
|
||||
T=/tmp/abat-chk-badness.sh.$$
|
||||
trap 'rm -f $T' 0
|
||||
|
||||
file="$1"
|
||||
title="$2"
|
||||
|
||||
@ -36,9 +33,41 @@ if grep -Pq '\x00' < $file
|
||||
then
|
||||
print_warning Console output contains nul bytes, old qemu still running?
|
||||
fi
|
||||
egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:|Stall ended before state dump start' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $T
|
||||
if test -s $T
|
||||
egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:|detected stalls on CPUs/tasks:|Stall ended before state dump start' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $1.diags
|
||||
if test -s $1.diags
|
||||
then
|
||||
print_warning Assertion failure in $file $title
|
||||
cat $T
|
||||
# cat $1.diags
|
||||
summary=""
|
||||
n_badness=`grep -c Badness $1`
|
||||
if test "$n_badness" -ne 0
|
||||
then
|
||||
summary="$summary Badness: $n_badness"
|
||||
fi
|
||||
n_warn=`grep -v 'Warning: unable to open an initial console' $1 | egrep -c 'WARNING:|Warn'`
|
||||
if test "$n_warn" -ne 0
|
||||
then
|
||||
summary="$summary Warnings: $n_warn"
|
||||
fi
|
||||
n_bugs=`egrep -c 'BUG|Oops:' $1`
|
||||
if test "$n_bugs" -ne 0
|
||||
then
|
||||
summary="$summary Bugs: $n_bugs"
|
||||
fi
|
||||
n_calltrace=`grep -c 'Call Trace:' $1`
|
||||
if test "$n_calltrace" -ne 0
|
||||
then
|
||||
summary="$summary Call Traces: $n_calltrace"
|
||||
fi
|
||||
n_lockdep=`grep -c =========== $1`
|
||||
if test "$n_badness" -ne 0
|
||||
then
|
||||
summary="$summary lockdep: $n_badness"
|
||||
fi
|
||||
n_stalls=`egrep -c 'detected stalls on CPUs/tasks:|Stall ended before state dump start' $1`
|
||||
if test "$n_stalls" -ne 0
|
||||
then
|
||||
summary="$summary Stalls: $n_stalls"
|
||||
fi
|
||||
print_warning Summary: $summary
|
||||
fi
|
||||
|
@ -20,7 +20,6 @@ CONFIG_PROVE_RCU
|
||||
|
||||
CONFIG_NO_HZ_FULL_SYSIDLE
|
||||
CONFIG_RCU_NOCB_CPU
|
||||
CONFIG_RCU_USER_QS
|
||||
|
||||
Meaningless for TINY_RCU.
|
||||
|
||||
|
@ -72,10 +72,6 @@ CONFIG_RCU_TORTURE_TEST_RUNNABLE
|
||||
|
||||
Always used in KVM testing.
|
||||
|
||||
CONFIG_RCU_USER_QS
|
||||
|
||||
Redundant with CONFIG_NO_HZ_FULL.
|
||||
|
||||
CONFIG_PREEMPT_RCU
|
||||
CONFIG_TREE_RCU
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user