mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-12 05:48:39 +08:00
vtime: Optimize full dynticks accounting off case with static keys
If no CPU is in the full dynticks range, we can avoid the full dynticks cputime accounting through generic vtime along with its overhead and use the traditional tick based accounting instead. Let's do this and nope the off case with static keys. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Kevin Hilman <khilman@linaro.org>
This commit is contained in:
parent
a5725ac23b
commit
b049340613
@ -74,8 +74,7 @@ static inline void context_tracking_init(void) { }
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
||||
static inline void guest_enter(void)
|
||||
{
|
||||
if (static_key_false(&context_tracking_enabled) &&
|
||||
vtime_accounting_enabled())
|
||||
if (vtime_accounting_enabled())
|
||||
vtime_guest_enter(current);
|
||||
else
|
||||
current->flags |= PF_VCPU;
|
||||
@ -83,8 +82,7 @@ static inline void guest_enter(void)
|
||||
|
||||
static inline void guest_exit(void)
|
||||
{
|
||||
if (static_key_false(&context_tracking_enabled) &&
|
||||
vtime_accounting_enabled())
|
||||
if (vtime_accounting_enabled())
|
||||
vtime_guest_exit(current);
|
||||
else
|
||||
current->flags &= ~PF_VCPU;
|
||||
|
@ -1,22 +1,68 @@
|
||||
#ifndef _LINUX_KERNEL_VTIME_H
|
||||
#define _LINUX_KERNEL_VTIME_H
|
||||
|
||||
#include <linux/context_tracking_state.h>
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||
#include <asm/vtime.h>
|
||||
#endif
|
||||
|
||||
|
||||
struct task_struct;
|
||||
|
||||
/*
|
||||
* vtime_accounting_enabled() definitions/declarations
|
||||
*/
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||
static inline bool vtime_accounting_enabled(void) { return true; }
|
||||
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
|
||||
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
||||
static inline bool vtime_accounting_enabled(void)
|
||||
{
|
||||
if (static_key_false(&context_tracking_enabled)) {
|
||||
if (context_tracking_active())
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
|
||||
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
static inline bool vtime_accounting_enabled(void) { return false; }
|
||||
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
|
||||
|
||||
|
||||
/*
|
||||
* Common vtime APIs
|
||||
*/
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
|
||||
#ifdef __ARCH_HAS_VTIME_TASK_SWITCH
|
||||
extern void vtime_task_switch(struct task_struct *prev);
|
||||
#else
|
||||
extern void vtime_common_task_switch(struct task_struct *prev);
|
||||
static inline void vtime_task_switch(struct task_struct *prev)
|
||||
{
|
||||
if (vtime_accounting_enabled())
|
||||
vtime_common_task_switch(prev);
|
||||
}
|
||||
#endif /* __ARCH_HAS_VTIME_TASK_SWITCH */
|
||||
|
||||
extern void vtime_account_system(struct task_struct *tsk);
|
||||
extern void vtime_account_idle(struct task_struct *tsk);
|
||||
extern void vtime_account_user(struct task_struct *tsk);
|
||||
extern void vtime_account_irq_enter(struct task_struct *tsk);
|
||||
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||
static inline bool vtime_accounting_enabled(void) { return true; }
|
||||
#endif
|
||||
#ifdef __ARCH_HAS_VTIME_ACCOUNT
|
||||
extern void vtime_account_irq_enter(struct task_struct *tsk);
|
||||
#else
|
||||
extern void vtime_common_account_irq_enter(struct task_struct *tsk);
|
||||
static inline void vtime_account_irq_enter(struct task_struct *tsk)
|
||||
{
|
||||
if (vtime_accounting_enabled())
|
||||
vtime_common_account_irq_enter(tsk);
|
||||
}
|
||||
#endif /* __ARCH_HAS_VTIME_ACCOUNT */
|
||||
|
||||
#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
|
||||
|
||||
@ -24,14 +70,20 @@ static inline void vtime_task_switch(struct task_struct *prev) { }
|
||||
static inline void vtime_account_system(struct task_struct *tsk) { }
|
||||
static inline void vtime_account_user(struct task_struct *tsk) { }
|
||||
static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
|
||||
static inline bool vtime_accounting_enabled(void) { return false; }
|
||||
#endif
|
||||
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
|
||||
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
||||
extern void arch_vtime_task_switch(struct task_struct *tsk);
|
||||
extern void vtime_account_irq_exit(struct task_struct *tsk);
|
||||
extern bool vtime_accounting_enabled(void);
|
||||
extern void vtime_gen_account_irq_exit(struct task_struct *tsk);
|
||||
|
||||
static inline void vtime_account_irq_exit(struct task_struct *tsk)
|
||||
{
|
||||
if (vtime_accounting_enabled())
|
||||
vtime_gen_account_irq_exit(tsk);
|
||||
}
|
||||
|
||||
extern void vtime_user_enter(struct task_struct *tsk);
|
||||
|
||||
static inline void vtime_user_exit(struct task_struct *tsk)
|
||||
{
|
||||
vtime_account_user(tsk);
|
||||
@ -39,7 +91,7 @@ static inline void vtime_user_exit(struct task_struct *tsk)
|
||||
extern void vtime_guest_enter(struct task_struct *tsk);
|
||||
extern void vtime_guest_exit(struct task_struct *tsk);
|
||||
extern void vtime_init_idle(struct task_struct *tsk, int cpu);
|
||||
#else
|
||||
#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */
|
||||
static inline void vtime_account_irq_exit(struct task_struct *tsk)
|
||||
{
|
||||
/* On hard|softirq exit we always account to hard|softirq cputime */
|
||||
|
@ -378,11 +378,8 @@ static inline void irqtime_account_process_tick(struct task_struct *p, int user_
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
|
||||
#ifndef __ARCH_HAS_VTIME_TASK_SWITCH
|
||||
void vtime_task_switch(struct task_struct *prev)
|
||||
void vtime_common_task_switch(struct task_struct *prev)
|
||||
{
|
||||
if (!vtime_accounting_enabled())
|
||||
return;
|
||||
|
||||
if (is_idle_task(prev))
|
||||
vtime_account_idle(prev);
|
||||
else
|
||||
@ -404,11 +401,8 @@ void vtime_task_switch(struct task_struct *prev)
|
||||
* vtime_account().
|
||||
*/
|
||||
#ifndef __ARCH_HAS_VTIME_ACCOUNT
|
||||
void vtime_account_irq_enter(struct task_struct *tsk)
|
||||
void vtime_common_account_irq_enter(struct task_struct *tsk)
|
||||
{
|
||||
if (!vtime_accounting_enabled())
|
||||
return;
|
||||
|
||||
if (!in_interrupt()) {
|
||||
/*
|
||||
* If we interrupted user, context_tracking_in_user()
|
||||
@ -428,7 +422,7 @@ void vtime_account_irq_enter(struct task_struct *tsk)
|
||||
}
|
||||
vtime_account_system(tsk);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
|
||||
EXPORT_SYMBOL_GPL(vtime_common_account_irq_enter);
|
||||
#endif /* __ARCH_HAS_VTIME_ACCOUNT */
|
||||
#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
|
||||
|
||||
@ -669,11 +663,8 @@ void vtime_account_system(struct task_struct *tsk)
|
||||
write_sequnlock(&tsk->vtime_seqlock);
|
||||
}
|
||||
|
||||
void vtime_account_irq_exit(struct task_struct *tsk)
|
||||
void vtime_gen_account_irq_exit(struct task_struct *tsk)
|
||||
{
|
||||
if (!vtime_accounting_enabled())
|
||||
return;
|
||||
|
||||
write_seqlock(&tsk->vtime_seqlock);
|
||||
if (context_tracking_in_user())
|
||||
tsk->vtime_snap_whence = VTIME_USER;
|
||||
@ -732,11 +723,6 @@ void vtime_account_idle(struct task_struct *tsk)
|
||||
account_idle_time(delta_cpu);
|
||||
}
|
||||
|
||||
bool vtime_accounting_enabled(void)
|
||||
{
|
||||
return context_tracking_active();
|
||||
}
|
||||
|
||||
void arch_vtime_task_switch(struct task_struct *prev)
|
||||
{
|
||||
write_seqlock(&prev->vtime_seqlock);
|
||||
|
Loading…
Reference in New Issue
Block a user