2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-24 02:24:28 +08:00

Merge branches 'doc.2023.12.13a', 'torture.2023.11.23a', 'fixes.2023.12.13a', 'rcu-tasks.2023.12.12b' and 'srcu.2023.12.13a' into rcu-merge.2023.12.13a

This commit is contained in:
Neeraj Upadhyay (AMD) 2023-12-14 01:21:31 +05:30
15 changed files with 103 additions and 42 deletions

View File

@ -5302,6 +5302,12 @@
Dump ftrace buffer after reporting RCU CPU
stall warning.
rcupdate.rcu_cpu_stall_notifiers= [KNL]
Provide RCU CPU stall notifiers, but see the
warnings in the RCU_CPU_STALL_NOTIFIER Kconfig
option's help text. TL;DR: You almost certainly
do not want rcupdate.rcu_cpu_stall_notifiers.
rcupdate.rcu_cpu_stall_suppress= [KNL]
Suppress RCU CPU stall warning messages.

View File

@ -13,7 +13,7 @@
#define RCU_STALL_NOTIFY_NORM 1
#define RCU_STALL_NOTIFY_EXP 2
#ifdef CONFIG_RCU_STALL_COMMON
#if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
#include <linux/notifier.h>
#include <linux/types.h>
@ -21,12 +21,12 @@
int rcu_stall_chain_notifier_register(struct notifier_block *n);
int rcu_stall_chain_notifier_unregister(struct notifier_block *n);
#else // #ifdef CONFIG_RCU_STALL_COMMON
#else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
// No RCU CPU stall warnings in Tiny RCU.
static inline int rcu_stall_chain_notifier_register(struct notifier_block *n) { return -EEXIST; }
static inline int rcu_stall_chain_notifier_unregister(struct notifier_block *n) { return -ENOENT; }
#endif // #else // #ifdef CONFIG_RCU_STALL_COMMON
#endif // #else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
#endif /* __LINUX_RCU_NOTIFIER_H */

View File

@ -34,9 +34,6 @@
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
#define ulong2long(a) (*(long *)(&(a)))
#define USHORT_CMP_GE(a, b) (USHRT_MAX / 2 >= (unsigned short)((a) - (b)))
#define USHORT_CMP_LT(a, b) (USHRT_MAX / 2 < (unsigned short)((a) - (b)))
/* Exported common interfaces */
void call_rcu(struct rcu_head *head, rcu_callback_t func);
@ -301,6 +298,11 @@ static inline void rcu_lock_acquire(struct lockdep_map *map)
lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
}
static inline void rcu_try_lock_acquire(struct lockdep_map *map)
{
lock_acquire(map, 0, 1, 2, 0, NULL, _THIS_IP_);
}
static inline void rcu_lock_release(struct lockdep_map *map)
{
lock_release(map, _THIS_IP_);
@ -315,6 +317,7 @@ int rcu_read_lock_any_held(void);
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
# define rcu_lock_acquire(a) do { } while (0)
# define rcu_try_lock_acquire(a) do { } while (0)
# define rcu_lock_release(a) do { } while (0)
static inline int rcu_read_lock_held(void)

View File

@ -229,7 +229,7 @@ static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp
srcu_check_nmi_safety(ssp, true);
retval = __srcu_read_lock_nmisafe(ssp);
rcu_lock_acquire(&ssp->dep_map);
rcu_try_lock_acquire(&ssp->dep_map);
return retval;
}

View File

@ -124,7 +124,7 @@ struct call_rcu_chain {
struct rcu_head crc_rh;
bool crc_stop;
};
struct call_rcu_chain *call_rcu_chain;
struct call_rcu_chain *call_rcu_chain_list;
/* Forward reference. */
static void lock_torture_cleanup(void);
@ -1074,12 +1074,12 @@ static int call_rcu_chain_init(void)
if (call_rcu_chains <= 0)
return 0;
call_rcu_chain = kcalloc(call_rcu_chains, sizeof(*call_rcu_chain), GFP_KERNEL);
if (!call_rcu_chain)
call_rcu_chain_list = kcalloc(call_rcu_chains, sizeof(*call_rcu_chain_list), GFP_KERNEL);
if (!call_rcu_chain_list)
return -ENOMEM;
for (i = 0; i < call_rcu_chains; i++) {
call_rcu_chain[i].crc_stop = false;
call_rcu(&call_rcu_chain[i].crc_rh, call_rcu_chain_cb);
call_rcu_chain_list[i].crc_stop = false;
call_rcu(&call_rcu_chain_list[i].crc_rh, call_rcu_chain_cb);
}
return 0;
}
@ -1089,13 +1089,13 @@ static void call_rcu_chain_cleanup(void)
{
int i;
if (!call_rcu_chain)
if (!call_rcu_chain_list)
return;
for (i = 0; i < call_rcu_chains; i++)
smp_store_release(&call_rcu_chain[i].crc_stop, true);
smp_store_release(&call_rcu_chain_list[i].crc_stop, true);
rcu_barrier();
kfree(call_rcu_chain);
call_rcu_chain = NULL;
kfree(call_rcu_chain_list);
call_rcu_chain_list = NULL;
}
static void lock_torture_cleanup(void)

View File

@ -105,6 +105,31 @@ config RCU_CPU_STALL_CPUTIME
The boot option rcupdate.rcu_cpu_stall_cputime has the same function
as this one, but will override this if it exists.
config RCU_CPU_STALL_NOTIFIER
bool "Provide RCU CPU-stall notifiers"
depends on RCU_STALL_COMMON
depends on DEBUG_KERNEL
depends on RCU_EXPERT
default n
help
WARNING: You almost certainly do not want this!!!
Enable RCU CPU-stall notifiers, which are invoked just before
printing the RCU CPU stall warning. As such, bugs in notifier
callbacks can prevent stall warnings from being printed.
And the whole reason that a stall warning is being printed is
that something is hung up somewhere. Therefore, the notifier
callbacks must be written extremely carefully, preferably
containing only lockless code. After all, it is quite possible
that the whole reason that the RCU CPU stall is happening in
the first place is that someone forgot to release whatever lock
that you are thinking of acquiring. In which case, having your
notifier callback acquire that lock will hang, preventing the
RCU CPU stall warning from appearing.
Say Y here if you want RCU CPU stall notifiers (you don't want them)
Say N if you are unsure.
config RCU_TRACE
bool "Enable tracing for RCU"
depends on DEBUG_KERNEL

View File

@ -262,6 +262,8 @@ static inline bool rcu_stall_is_suppressed_at_boot(void)
return rcu_cpu_stall_suppress_at_boot && !rcu_inkernel_boot_has_ended();
}
extern int rcu_cpu_stall_notifiers;
#ifdef CONFIG_RCU_STALL_COMMON
extern int rcu_cpu_stall_ftrace_dump;
@ -659,10 +661,10 @@ static inline bool rcu_cpu_beenfullyonline(int cpu) { return true; }
bool rcu_cpu_beenfullyonline(int cpu);
#endif
#ifdef CONFIG_RCU_STALL_COMMON
#if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
int rcu_stall_notifier_call_chain(unsigned long val, void *v);
#else // #ifdef CONFIG_RCU_STALL_COMMON
#else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
static inline int rcu_stall_notifier_call_chain(unsigned long val, void *v) { return NOTIFY_DONE; }
#endif // #else // #ifdef CONFIG_RCU_STALL_COMMON
#endif // #else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
#endif /* __LINUX_RCU_H */

View File

@ -2450,10 +2450,12 @@ static int rcu_torture_stall(void *args)
unsigned long stop_at;
VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
ret = rcu_stall_chain_notifier_register(&rcu_torture_stall_block);
if (ret)
pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n",
__func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : "");
if (rcu_cpu_stall_notifiers) {
ret = rcu_stall_chain_notifier_register(&rcu_torture_stall_block);
if (ret)
pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n",
__func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : "");
}
if (stall_cpu_holdoff > 0) {
VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
@ -2497,7 +2499,7 @@ static int rcu_torture_stall(void *args)
cur_ops->readunlock(idx);
}
pr_alert("%s end.\n", __func__);
if (!ret) {
if (rcu_cpu_stall_notifiers && !ret) {
ret = rcu_stall_chain_notifier_unregister(&rcu_torture_stall_block);
if (ret)
pr_info("%s: rcu_stall_chain_notifier_unregister() returned %d.\n", __func__, ret);
@ -3872,7 +3874,9 @@ rcu_torture_init(void)
}
if (fqs_duration < 0)
fqs_duration = 0;
if (fqs_duration) {
if (fqs_holdoff < 0)
fqs_holdoff = 0;
if (fqs_duration && fqs_holdoff) {
/* Create the fqs thread */
firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
fqs_task);

View File

@ -772,20 +772,10 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock_nmisafe);
*/
static void srcu_gp_start(struct srcu_struct *ssp)
{
struct srcu_data *sdp;
int state;
if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
sdp = per_cpu_ptr(ssp->sda, get_boot_cpu_id());
else
sdp = this_cpu_ptr(ssp->sda);
lockdep_assert_held(&ACCESS_PRIVATE(ssp->srcu_sup, lock));
WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed));
spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
rcu_segcblist_advance(&sdp->srcu_cblist,
rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
WARN_ON_ONCE(!rcu_segcblist_segempty(&sdp->srcu_cblist, RCU_NEXT_TAIL));
spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
WRITE_ONCE(ssp->srcu_sup->srcu_gp_start, jiffies);
WRITE_ONCE(ssp->srcu_sup->srcu_n_exp_nodelay, 0);
smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
@ -1271,9 +1261,11 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
* period (gp_num = X + 8). So acceleration fails.
*/
s = rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq);
rcu_segcblist_advance(&sdp->srcu_cblist,
rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
WARN_ON_ONCE(!rcu_segcblist_accelerate(&sdp->srcu_cblist, s) && rhp);
if (rhp) {
rcu_segcblist_advance(&sdp->srcu_cblist,
rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
WARN_ON_ONCE(!rcu_segcblist_accelerate(&sdp->srcu_cblist, s));
}
if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
sdp->srcu_gp_seq_needed = s;
needgp = true;
@ -1723,6 +1715,11 @@ static void srcu_invoke_callbacks(struct work_struct *work)
WARN_ON_ONCE(!rcu_segcblist_segempty(&sdp->srcu_cblist, RCU_NEXT_TAIL));
rcu_segcblist_advance(&sdp->srcu_cblist,
rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
/*
* Although this function is theoretically re-entrant, concurrent
* callbacks invocation is disallowed to avoid executing an SRCU barrier
* too early.
*/
if (sdp->srcu_cblist_invoking ||
!rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
spin_unlock_irq_rcu_node(sdp);
@ -1753,6 +1750,7 @@ static void srcu_invoke_callbacks(struct work_struct *work)
sdp->srcu_cblist_invoking = false;
more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
spin_unlock_irq_rcu_node(sdp);
/* An SRCU barrier or callbacks from previous nesting work pending */
if (more)
srcu_schedule_cbs_sdp(sdp, 0);
}

View File

@ -975,7 +975,7 @@ static void check_holdout_task(struct task_struct *t,
t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
!rcu_tasks_is_holdout(t) ||
(IS_ENABLED(CONFIG_NO_HZ_FULL) &&
!is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
!is_idle_task(t) && READ_ONCE(t->rcu_tasks_idle_cpu) >= 0)) {
WRITE_ONCE(t->rcu_tasks_holdout, false);
list_del_init(&t->rcu_tasks_holdout_list);
put_task_struct(t);
@ -993,7 +993,7 @@ static void check_holdout_task(struct task_struct *t,
t, ".I"[is_idle_task(t)],
"N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
t->rcu_tasks_idle_cpu, cpu);
data_race(t->rcu_tasks_idle_cpu), cpu);
sched_show_task(t);
}

View File

@ -2338,6 +2338,8 @@ void rcu_force_quiescent_state(void)
struct rcu_node *rnp;
struct rcu_node *rnp_old = NULL;
if (!rcu_gp_in_progress())
return;
/* Funnel through hierarchy to reduce memory contention. */
rnp = raw_cpu_read(rcu_data.mynode);
for (; rnp != NULL; rnp = rnp->parent) {

View File

@ -1061,6 +1061,7 @@ static int __init rcu_sysrq_init(void)
}
early_initcall(rcu_sysrq_init);
#ifdef CONFIG_RCU_CPU_STALL_NOTIFIER
//////////////////////////////////////////////////////////////////////////////
//
@ -1081,7 +1082,13 @@ static ATOMIC_NOTIFIER_HEAD(rcu_cpu_stall_notifier_list);
*/
int rcu_stall_chain_notifier_register(struct notifier_block *n)
{
return atomic_notifier_chain_register(&rcu_cpu_stall_notifier_list, n);
int rcsn = rcu_cpu_stall_notifiers;
WARN(1, "Adding %pS() to RCU stall notifier list (%s).\n", n->notifier_call,
rcsn ? "possibly suppressing RCU CPU stall warnings" : "failed, so all is well");
if (rcsn)
return atomic_notifier_chain_register(&rcu_cpu_stall_notifier_list, n);
return -EEXIST;
}
EXPORT_SYMBOL_GPL(rcu_stall_chain_notifier_register);
@ -1115,3 +1122,5 @@ int rcu_stall_notifier_call_chain(unsigned long val, void *v)
{
return atomic_notifier_call_chain(&rcu_cpu_stall_notifier_list, val, v);
}
#endif // #ifdef CONFIG_RCU_CPU_STALL_NOTIFIER

View File

@ -538,9 +538,15 @@ long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
EXPORT_SYMBOL_GPL(torture_sched_setaffinity);
#endif
int rcu_cpu_stall_notifiers __read_mostly; // !0 = provide stall notifiers (rarely useful)
EXPORT_SYMBOL_GPL(rcu_cpu_stall_notifiers);
#ifdef CONFIG_RCU_STALL_COMMON
int rcu_cpu_stall_ftrace_dump __read_mostly;
module_param(rcu_cpu_stall_ftrace_dump, int, 0644);
#ifdef CONFIG_RCU_CPU_STALL_NOTIFIER
module_param(rcu_cpu_stall_notifiers, int, 0444);
#endif // #ifdef CONFIG_RCU_CPU_STALL_NOTIFIER
int rcu_cpu_stall_suppress __read_mostly; // !0 = suppress stall warnings.
EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
module_param(rcu_cpu_stall_suppress, int, 0644);

View File

@ -67,7 +67,10 @@ ___EOF___
# build using nolibc on supported archs (smaller executable) and fall
# back to regular glibc on other ones.
if echo -e "#if __x86_64__||__i386__||__i486__||__i586__||__i686__" \
"||__ARM_EABI__||__aarch64__||__s390x__||__loongarch__\nyes\n#endif" \
"||__ARM_EABI__||__aarch64__||(__mips__ && _ABIO32)" \
"||__powerpc__||(__riscv && __riscv_xlen == 64)" \
"||__s390x__||__loongarch__" \
"\nyes\n#endif" \
| ${CROSS_COMPILE}gcc -E -nostdlib -xc - \
| grep -q '^yes'; then
# architecture supported by nolibc

View File

@ -1 +1,4 @@
nohz_full=2-9
rcutorture.stall_cpu=14
rcutorture.stall_cpu_holdoff=90
rcutorture.fwd_progress=0