mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-23 12:43:55 +08:00
b049340613
If no CPU is in the full dynticks range, we can avoid the full dynticks cputime accounting through generic vtime along with its overhead and use the traditional tick based accounting instead. Let's do this and nope the off case with static keys. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Kevin Hilman <khilman@linaro.org>
126 lines
3.7 KiB
C
126 lines
3.7 KiB
C
#ifndef _LINUX_KERNEL_VTIME_H
|
|
#define _LINUX_KERNEL_VTIME_H
|
|
|
|
#include <linux/context_tracking_state.h>
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
|
#include <asm/vtime.h>
|
|
#endif
|
|
|
|
|
|
struct task_struct;
|
|
|
|
/*
|
|
* vtime_accounting_enabled() definitions/declarations
|
|
*/
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
|
static inline bool vtime_accounting_enabled(void) { return true; }
|
|
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
|
|
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
|
static inline bool vtime_accounting_enabled(void)
|
|
{
|
|
if (static_key_false(&context_tracking_enabled)) {
|
|
if (context_tracking_active())
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
|
|
|
|
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
|
|
static inline bool vtime_accounting_enabled(void) { return false; }
|
|
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
|
|
|
|
|
|
/*
|
|
* Common vtime APIs
|
|
*/
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
|
|
#ifdef __ARCH_HAS_VTIME_TASK_SWITCH
|
|
extern void vtime_task_switch(struct task_struct *prev);
|
|
#else
|
|
extern void vtime_common_task_switch(struct task_struct *prev);
|
|
static inline void vtime_task_switch(struct task_struct *prev)
|
|
{
|
|
if (vtime_accounting_enabled())
|
|
vtime_common_task_switch(prev);
|
|
}
|
|
#endif /* __ARCH_HAS_VTIME_TASK_SWITCH */
|
|
|
|
extern void vtime_account_system(struct task_struct *tsk);
|
|
extern void vtime_account_idle(struct task_struct *tsk);
|
|
extern void vtime_account_user(struct task_struct *tsk);
|
|
|
|
#ifdef __ARCH_HAS_VTIME_ACCOUNT
|
|
extern void vtime_account_irq_enter(struct task_struct *tsk);
|
|
#else
|
|
extern void vtime_common_account_irq_enter(struct task_struct *tsk);
|
|
static inline void vtime_account_irq_enter(struct task_struct *tsk)
|
|
{
|
|
if (vtime_accounting_enabled())
|
|
vtime_common_account_irq_enter(tsk);
|
|
}
|
|
#endif /* __ARCH_HAS_VTIME_ACCOUNT */
|
|
|
|
#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
|
|
|
|
static inline void vtime_task_switch(struct task_struct *prev) { }
|
|
static inline void vtime_account_system(struct task_struct *tsk) { }
|
|
static inline void vtime_account_user(struct task_struct *tsk) { }
|
|
static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
|
|
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
|
|
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
|
extern void arch_vtime_task_switch(struct task_struct *tsk);
|
|
extern void vtime_gen_account_irq_exit(struct task_struct *tsk);
|
|
|
|
static inline void vtime_account_irq_exit(struct task_struct *tsk)
|
|
{
|
|
if (vtime_accounting_enabled())
|
|
vtime_gen_account_irq_exit(tsk);
|
|
}
|
|
|
|
extern void vtime_user_enter(struct task_struct *tsk);
|
|
|
|
static inline void vtime_user_exit(struct task_struct *tsk)
|
|
{
|
|
vtime_account_user(tsk);
|
|
}
|
|
extern void vtime_guest_enter(struct task_struct *tsk);
|
|
extern void vtime_guest_exit(struct task_struct *tsk);
|
|
extern void vtime_init_idle(struct task_struct *tsk, int cpu);
|
|
#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */
|
|
static inline void vtime_account_irq_exit(struct task_struct *tsk)
|
|
{
|
|
/* On hard|softirq exit we always account to hard|softirq cputime */
|
|
vtime_account_system(tsk);
|
|
}
|
|
static inline void vtime_user_enter(struct task_struct *tsk) { }
|
|
static inline void vtime_user_exit(struct task_struct *tsk) { }
|
|
static inline void vtime_guest_enter(struct task_struct *tsk) { }
|
|
static inline void vtime_guest_exit(struct task_struct *tsk) { }
|
|
static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
|
|
#endif
|
|
|
|
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
|
extern void irqtime_account_irq(struct task_struct *tsk);
|
|
#else
|
|
static inline void irqtime_account_irq(struct task_struct *tsk) { }
|
|
#endif
|
|
|
|
static inline void account_irq_enter_time(struct task_struct *tsk)
|
|
{
|
|
vtime_account_irq_enter(tsk);
|
|
irqtime_account_irq(tsk);
|
|
}
|
|
|
|
static inline void account_irq_exit_time(struct task_struct *tsk)
|
|
{
|
|
vtime_account_irq_exit(tsk);
|
|
irqtime_account_irq(tsk);
|
|
}
|
|
|
|
#endif /* _LINUX_KERNEL_VTIME_H */
|