irqtime: Move irqtime entry accounting after irq offset incrementation

IRQ time entry is currently accounted before HARDIRQ_OFFSET or
SOFTIRQ_OFFSET are incremented. This is convenient to decide to which
index the cputime to account is dispatched.

Unfortunately it prevents tick_irq_enter() from being called under
HARDIRQ_OFFSET because tick_irq_enter() has to be called before the IRQ
entry accounting due to the necessary clock catch up. As a result we
don't benefit from appropriate lockdep coverage on tick_irq_enter().

To prepare for fixing this, move the IRQ entry cputime accounting after
the preempt offset is incremented. This requires the cputime dispatch
code to handle the extra offset.

Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20201202115732.27827-5-frederic@kernel.org
This commit is contained in:
Frederic Weisbecker 2020-12-02 12:57:31 +01:00 committed by Thomas Gleixner
parent 8a6a5920d3
commit d3759e7184
4 changed files with 40 additions and 22 deletions

View File

@ -32,9 +32,9 @@ static __always_inline void rcu_irq_enter_check_tick(void)
*/ */
#define __irq_enter() \ #define __irq_enter() \
do { \ do { \
account_irq_enter_time(current); \
preempt_count_add(HARDIRQ_OFFSET); \ preempt_count_add(HARDIRQ_OFFSET); \
lockdep_hardirq_enter(); \ lockdep_hardirq_enter(); \
account_hardirq_enter(current); \
} while (0) } while (0)
/* /*
@ -62,8 +62,8 @@ void irq_enter_rcu(void);
*/ */
#define __irq_exit() \ #define __irq_exit() \
do { \ do { \
account_hardirq_exit(current); \
lockdep_hardirq_exit(); \ lockdep_hardirq_exit(); \
account_irq_exit_time(current); \
preempt_count_sub(HARDIRQ_OFFSET); \ preempt_count_sub(HARDIRQ_OFFSET); \
} while (0) } while (0)

View File

@ -83,32 +83,46 @@ static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
#endif #endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
extern void vtime_account_irq(struct task_struct *tsk); extern void vtime_account_irq(struct task_struct *tsk, unsigned int offset);
extern void vtime_account_softirq(struct task_struct *tsk); extern void vtime_account_softirq(struct task_struct *tsk);
extern void vtime_account_hardirq(struct task_struct *tsk); extern void vtime_account_hardirq(struct task_struct *tsk);
extern void vtime_flush(struct task_struct *tsk); extern void vtime_flush(struct task_struct *tsk);
#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
static inline void vtime_account_irq(struct task_struct *tsk) { } static inline void vtime_account_irq(struct task_struct *tsk, unsigned int offset) { }
static inline void vtime_account_softirq(struct task_struct *tsk) { }
static inline void vtime_account_hardirq(struct task_struct *tsk) { }
static inline void vtime_flush(struct task_struct *tsk) { } static inline void vtime_flush(struct task_struct *tsk) { }
#endif #endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING #ifdef CONFIG_IRQ_TIME_ACCOUNTING
extern void irqtime_account_irq(struct task_struct *tsk); extern void irqtime_account_irq(struct task_struct *tsk, unsigned int offset);
#else #else
static inline void irqtime_account_irq(struct task_struct *tsk) { } static inline void irqtime_account_irq(struct task_struct *tsk, unsigned int offset) { }
#endif #endif
static inline void account_irq_enter_time(struct task_struct *tsk) static inline void account_softirq_enter(struct task_struct *tsk)
{ {
vtime_account_irq(tsk); vtime_account_irq(tsk, SOFTIRQ_OFFSET);
irqtime_account_irq(tsk); irqtime_account_irq(tsk, SOFTIRQ_OFFSET);
} }
static inline void account_irq_exit_time(struct task_struct *tsk) static inline void account_softirq_exit(struct task_struct *tsk)
{ {
vtime_account_irq(tsk); vtime_account_softirq(tsk);
irqtime_account_irq(tsk); irqtime_account_irq(tsk, 0);
}
static inline void account_hardirq_enter(struct task_struct *tsk)
{
vtime_account_irq(tsk, HARDIRQ_OFFSET);
irqtime_account_irq(tsk, HARDIRQ_OFFSET);
}
static inline void account_hardirq_exit(struct task_struct *tsk)
{
vtime_account_hardirq(tsk);
irqtime_account_irq(tsk, 0);
} }
#endif /* _LINUX_KERNEL_VTIME_H */ #endif /* _LINUX_KERNEL_VTIME_H */

View File

@ -44,12 +44,13 @@ static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,
} }
/* /*
* Called before incrementing preempt_count on {soft,}irq_enter * Called after incrementing preempt_count on {soft,}irq_enter
* and before decrementing preempt_count on {soft,}irq_exit. * and before decrementing preempt_count on {soft,}irq_exit.
*/ */
void irqtime_account_irq(struct task_struct *curr) void irqtime_account_irq(struct task_struct *curr, unsigned int offset)
{ {
struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime); struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
unsigned int pc;
s64 delta; s64 delta;
int cpu; int cpu;
@ -59,6 +60,7 @@ void irqtime_account_irq(struct task_struct *curr)
cpu = smp_processor_id(); cpu = smp_processor_id();
delta = sched_clock_cpu(cpu) - irqtime->irq_start_time; delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
irqtime->irq_start_time += delta; irqtime->irq_start_time += delta;
pc = preempt_count() - offset;
/* /*
* We do not account for softirq time from ksoftirqd here. * We do not account for softirq time from ksoftirqd here.
@ -66,9 +68,9 @@ void irqtime_account_irq(struct task_struct *curr)
* in that case, so as not to confuse scheduler with a special task * in that case, so as not to confuse scheduler with a special task
* that do not consume any time, but still wants to run. * that do not consume any time, but still wants to run.
*/ */
if (hardirq_count()) if (pc & HARDIRQ_MASK)
irqtime_account_delta(irqtime, delta, CPUTIME_IRQ); irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) else if ((pc & SOFTIRQ_OFFSET) && curr != this_cpu_ksoftirqd())
irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ); irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
} }
@ -417,11 +419,13 @@ void vtime_task_switch(struct task_struct *prev)
} }
# endif # endif
void vtime_account_irq(struct task_struct *tsk) void vtime_account_irq(struct task_struct *tsk, unsigned int offset)
{ {
if (hardirq_count()) { unsigned int pc = preempt_count() - offset;
if (pc & HARDIRQ_OFFSET) {
vtime_account_hardirq(tsk); vtime_account_hardirq(tsk);
} else if (in_serving_softirq()) { } else if (pc & SOFTIRQ_OFFSET) {
vtime_account_softirq(tsk); vtime_account_softirq(tsk);
} else if (!IS_ENABLED(CONFIG_HAVE_VIRT_CPU_ACCOUNTING_IDLE) && } else if (!IS_ENABLED(CONFIG_HAVE_VIRT_CPU_ACCOUNTING_IDLE) &&
is_idle_task(tsk)) { is_idle_task(tsk)) {

View File

@ -315,10 +315,10 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
current->flags &= ~PF_MEMALLOC; current->flags &= ~PF_MEMALLOC;
pending = local_softirq_pending(); pending = local_softirq_pending();
account_irq_enter_time(current);
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
in_hardirq = lockdep_softirq_start(); in_hardirq = lockdep_softirq_start();
account_softirq_enter(current);
restart: restart:
/* Reset the pending bitmask before enabling irqs */ /* Reset the pending bitmask before enabling irqs */
@ -365,8 +365,8 @@ restart:
wakeup_softirqd(); wakeup_softirqd();
} }
account_softirq_exit(current);
lockdep_softirq_end(in_hardirq); lockdep_softirq_end(in_hardirq);
account_irq_exit_time(current);
__local_bh_enable(SOFTIRQ_OFFSET); __local_bh_enable(SOFTIRQ_OFFSET);
WARN_ON_ONCE(in_interrupt()); WARN_ON_ONCE(in_interrupt());
current_restore_flags(old_flags, PF_MEMALLOC); current_restore_flags(old_flags, PF_MEMALLOC);
@ -418,7 +418,7 @@ static inline void __irq_exit_rcu(void)
#else #else
lockdep_assert_irqs_disabled(); lockdep_assert_irqs_disabled();
#endif #endif
account_irq_exit_time(current); account_hardirq_exit(current);
preempt_count_sub(HARDIRQ_OFFSET); preempt_count_sub(HARDIRQ_OFFSET);
if (!in_interrupt() && local_softirq_pending()) if (!in_interrupt() && local_softirq_pending())
invoke_softirq(); invoke_softirq();