mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 21:38:32 +08:00
Misc fixes:
- Fix performance regression caused by lack of intended batching of RCU callbacks by over-eager NOHZ-full code. - Fix cgroups related corruption of load_avg and load_sum metrics. - Three fixes to fix blocked load, util_sum/runnable_sum and util_est tracking bugs. Signed-off-by: Ingo Molnar <mingo@kernel.org> -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmDErzERHG1pbmdvQGtl cm5lbC5vcmcACgkQEnMQ0APhK1jxqA//UxByNNuRZy6h+ek9lDXu/l7wqpt9wq5B 7ywMAKhM80ytXGrxVLY1+JOIkRLuKvimS20JlAnrnp3g0k7Wh6SGtH6uiXEmObN7 o5g8Pzwz5TIEYArwjG6C/t8szVGo4n0L4UMmdEAMFj6hu68J7KuMJ4HC18LrNkFP kSaDWeus5tnvMLMch+y5rpvztNTpuyQxqT2fjcelcoYvpAFuG4qcVDI2tWoZ8TnB 3qlTkM/hhEMHoOAPKXh94LzXjOkJ6DVORR/6VcfePzjqd50LThav6/e0bL2cMCC3 PrmBVj0XZLDHwbsfpp6yspNNDZBfCF0PmRVBnoHzjzY39cGoLY7NdgQMOzz9WDOD mm8rcYLg0l6hWW2WxAy6Y3lzbkxLvDAVe+rdCCYsMLi0eiBERoAc5CCwhpyHwbUe YtS/Hda+WFnBpBQLwO0awiJbkWuLFRKL8XRPNU0eWtqOaqZGydwRm2Y39BaXImEY fGo3Q8Upk3ziEF986y2JzlHh3+pODnTNPz12l0fqW4iG7dQx1v8uQhtVzp9Egg/X 2m1IzvPIgw5zwjLTb1Oh4IZ/x91hzg1PXk5g2j8UeTTtXOVwryipRx/t5YjU0B1X eGCrpquSyppUnjjajWRyco6DLG1YpwHVOTq2WdsbmxWVTOvRDZxlOUDk/wzWRINo +c0DzRZe0aY= =aqkp -----END PGP SIGNATURE----- Merge tag 'sched-urgent-2021-06-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull scheduler fixes from Ingo Molnar: "Misc fixes: - Fix performance regression caused by lack of intended batching of RCU callbacks by over-eager NOHZ-full code. - Fix cgroups related corruption of load_avg and load_sum metrics. - Three fixes to fix blocked load, util_sum/runnable_sum and util_est tracking bugs" * tag 'sched-urgent-2021-06-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/fair: Fix util_est UTIL_AVG_UNCHANGED handling sched/pelt: Ensure that *_sum is always synced with *_avg tick/nohz: Only check for RCU deferred wakeup on user/guest entry when needed sched/fair: Make sure to update tg contrib for blocked load sched/fair: Keep load_avg and load_sum synced
This commit is contained in:
commit
99f925947a
@ -3,6 +3,7 @@
|
||||
#define __LINUX_ENTRYKVM_H
|
||||
|
||||
#include <linux/entry-common.h>
|
||||
#include <linux/tick.h>
|
||||
|
||||
/* Transfer to guest mode work */
|
||||
#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
|
||||
@ -57,7 +58,7 @@ int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
|
||||
static inline void xfer_to_guest_mode_prepare(void)
|
||||
{
|
||||
lockdep_assert_irqs_disabled();
|
||||
rcu_nocb_flush_deferred_wakeup();
|
||||
tick_nohz_user_enter_prepare();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -350,11 +350,19 @@ struct load_weight {
|
||||
* Only for tasks we track a moving average of the past instantaneous
|
||||
* estimated utilization. This allows to absorb sporadic drops in utilization
|
||||
* of an otherwise almost periodic task.
|
||||
*
|
||||
* The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
|
||||
* updates. When a task is dequeued, its util_est should not be updated if its
|
||||
* util_avg has not been updated in the meantime.
|
||||
* This information is mapped into the MSB bit of util_est.enqueued at dequeue
|
||||
* time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg
|
||||
* for a task) it is safe to use MSB.
|
||||
*/
|
||||
struct util_est {
|
||||
unsigned int enqueued;
|
||||
unsigned int ewma;
|
||||
#define UTIL_EST_WEIGHT_SHIFT 2
|
||||
#define UTIL_AVG_UNCHANGED 0x80000000
|
||||
} __attribute__((__aligned__(sizeof(u64))));
|
||||
|
||||
/*
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/context_tracking_state.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS
|
||||
extern void __init tick_init(void);
|
||||
@ -300,4 +301,10 @@ static inline void tick_nohz_task_switch(void)
|
||||
__tick_nohz_task_switch();
|
||||
}
|
||||
|
||||
static inline void tick_nohz_user_enter_prepare(void)
|
||||
{
|
||||
if (tick_nohz_full_cpu(smp_processor_id()))
|
||||
rcu_nocb_flush_deferred_wakeup();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/livepatch.h>
|
||||
#include <linux/audit.h>
|
||||
#include <linux/tick.h>
|
||||
|
||||
#include "common.h"
|
||||
|
||||
@ -186,7 +187,7 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
|
||||
local_irq_disable_exit_to_user();
|
||||
|
||||
/* Check if any of the above work has queued a deferred wakeup */
|
||||
rcu_nocb_flush_deferred_wakeup();
|
||||
tick_nohz_user_enter_prepare();
|
||||
|
||||
ti_work = READ_ONCE(current_thread_info()->flags);
|
||||
}
|
||||
@ -202,7 +203,7 @@ static void exit_to_user_mode_prepare(struct pt_regs *regs)
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
/* Flush pending rcuog wakeup before the last need_resched() check */
|
||||
rcu_nocb_flush_deferred_wakeup();
|
||||
tick_nohz_user_enter_prepare();
|
||||
|
||||
if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
|
||||
ti_work = exit_to_user_mode_loop(regs, ti_work);
|
||||
|
@ -885,6 +885,7 @@ static const struct seq_operations sched_debug_sops = {
|
||||
#define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
|
||||
#define __P(F) __PS(#F, F)
|
||||
#define P(F) __PS(#F, p->F)
|
||||
#define PM(F, M) __PS(#F, p->F & (M))
|
||||
#define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
|
||||
#define __PN(F) __PSN(#F, F)
|
||||
#define PN(F) __PSN(#F, p->F)
|
||||
@ -1011,7 +1012,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
|
||||
P(se.avg.util_avg);
|
||||
P(se.avg.last_update_time);
|
||||
P(se.avg.util_est.ewma);
|
||||
P(se.avg.util_est.enqueued);
|
||||
PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
|
||||
#endif
|
||||
#ifdef CONFIG_UCLAMP_TASK
|
||||
__PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
|
||||
|
@ -3499,10 +3499,9 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
|
||||
static inline void
|
||||
update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
|
||||
{
|
||||
long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
|
||||
long delta, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
|
||||
unsigned long load_avg;
|
||||
u64 load_sum = 0;
|
||||
s64 delta_sum;
|
||||
u32 divider;
|
||||
|
||||
if (!runnable_sum)
|
||||
@ -3549,13 +3548,13 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
|
||||
load_sum = (s64)se_weight(se) * runnable_sum;
|
||||
load_avg = div_s64(load_sum, divider);
|
||||
|
||||
delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
|
||||
delta_avg = load_avg - se->avg.load_avg;
|
||||
delta = load_avg - se->avg.load_avg;
|
||||
|
||||
se->avg.load_sum = runnable_sum;
|
||||
se->avg.load_avg = load_avg;
|
||||
add_positive(&cfs_rq->avg.load_avg, delta_avg);
|
||||
add_positive(&cfs_rq->avg.load_sum, delta_sum);
|
||||
|
||||
add_positive(&cfs_rq->avg.load_avg, delta);
|
||||
cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
|
||||
}
|
||||
|
||||
static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
|
||||
@ -3766,11 +3765,17 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
|
||||
*/
|
||||
static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
/*
|
||||
* cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
|
||||
* See ___update_load_avg() for details.
|
||||
*/
|
||||
u32 divider = get_pelt_divider(&cfs_rq->avg);
|
||||
|
||||
dequeue_load_avg(cfs_rq, se);
|
||||
sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
|
||||
sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
|
||||
cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider;
|
||||
sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
|
||||
sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
|
||||
cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
|
||||
|
||||
add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
|
||||
|
||||
@ -3902,7 +3907,7 @@ static inline unsigned long _task_util_est(struct task_struct *p)
|
||||
{
|
||||
struct util_est ue = READ_ONCE(p->se.avg.util_est);
|
||||
|
||||
return (max(ue.ewma, ue.enqueued) | UTIL_AVG_UNCHANGED);
|
||||
return max(ue.ewma, (ue.enqueued & ~UTIL_AVG_UNCHANGED));
|
||||
}
|
||||
|
||||
static inline unsigned long task_util_est(struct task_struct *p)
|
||||
@ -4002,7 +4007,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
|
||||
* Reset EWMA on utilization increases, the moving average is used only
|
||||
* to smooth utilization decreases.
|
||||
*/
|
||||
ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED);
|
||||
ue.enqueued = task_util(p);
|
||||
if (sched_feat(UTIL_EST_FASTUP)) {
|
||||
if (ue.ewma < ue.enqueued) {
|
||||
ue.ewma = ue.enqueued;
|
||||
@ -4051,6 +4056,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
|
||||
ue.ewma += last_ewma_diff;
|
||||
ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
|
||||
done:
|
||||
ue.enqueued |= UTIL_AVG_UNCHANGED;
|
||||
WRITE_ONCE(p->se.avg.util_est, ue);
|
||||
|
||||
trace_sched_util_est_se_tp(&p->se);
|
||||
@ -8030,7 +8036,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
|
||||
/* Propagate pending load changes to the parent, if any: */
|
||||
se = cfs_rq->tg->se[cpu];
|
||||
if (se && !skip_blocked_update(se))
|
||||
update_load_avg(cfs_rq_of(se), se, 0);
|
||||
update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
|
||||
|
||||
/*
|
||||
* There can be a lot of idle CPU cgroups. Don't let fully
|
||||
|
@ -42,15 +42,6 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
|
||||
return LOAD_AVG_MAX - 1024 + avg->period_contrib;
|
||||
}
|
||||
|
||||
/*
|
||||
* When a task is dequeued, its estimated utilization should not be update if
|
||||
* its util_avg has not been updated at least once.
|
||||
* This flag is used to synchronize util_avg updates with util_est updates.
|
||||
* We map this information into the LSB bit of the utilization saved at
|
||||
* dequeue time (i.e. util_est.dequeued).
|
||||
*/
|
||||
#define UTIL_AVG_UNCHANGED 0x1
|
||||
|
||||
static inline void cfs_se_util_change(struct sched_avg *avg)
|
||||
{
|
||||
unsigned int enqueued;
|
||||
@ -58,7 +49,7 @@ static inline void cfs_se_util_change(struct sched_avg *avg)
|
||||
if (!sched_feat(UTIL_EST))
|
||||
return;
|
||||
|
||||
/* Avoid store if the flag has been already set */
|
||||
/* Avoid store if the flag has been already reset */
|
||||
enqueued = avg->util_est.enqueued;
|
||||
if (!(enqueued & UTIL_AVG_UNCHANGED))
|
||||
return;
|
||||
|
@ -230,6 +230,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
|
||||
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
cpumask_var_t tick_nohz_full_mask;
|
||||
EXPORT_SYMBOL_GPL(tick_nohz_full_mask);
|
||||
bool tick_nohz_full_running;
|
||||
EXPORT_SYMBOL_GPL(tick_nohz_full_running);
|
||||
static atomic_t tick_dep_mask;
|
||||
|
Loading…
Reference in New Issue
Block a user