mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-17 01:34:00 +08:00
09223371de
Commit a26ac2455ffcf3(rcu: move TREE_RCU from softirq to kthread) introduced performance regression. In an AIM7 test, this commit degraded performance by about 40%. The commit runs rcu callbacks in a kthread instead of softirq. We observed high rate of context switch which is caused by this. Out test system has 64 CPUs and HZ is 1000, so we saw more than 64k context switch per second which is caused by RCU's per-CPU kthread. A trace showed that most of the time the RCU per-CPU kthread doesn't actually handle any callbacks, but instead just does a very small amount of work handling grace periods. This means that RCU's per-CPU kthreads are making the scheduler do quite a bit of work in order to allow a very small amount of RCU-related processing to be done. Alex Shi's analysis determined that this slowdown is due to lock contention within the scheduler. Unfortunately, as Peter Zijlstra points out, the scheduler's real-time semantics require global action, which means that this contention is inherent in real-time scheduling. (Yes, perhaps someone will come up with a workaround -- otherwise, -rt is not going to do well on large SMP systems -- but this patch will work around this issue in the meantime. And "the meantime" might well be forever.) This patch therefore re-introduces softirq processing to RCU, but only for core RCU work. RCU callbacks are still executed in kthread context, so that only a small amount of RCU work runs in softirq context in the common case. This should minimize ksoftirqd execution, allowing us to skip boosting of ksoftirqd for CONFIG_RCU_BOOST=y kernels. Signed-off-by: Shaohua Li <shaohua.li@intel.com> Tested-by: "Alex,Shi" <alex.shi@intel.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
151 lines
3.4 KiB
C
151 lines
3.4 KiB
C
#undef TRACE_SYSTEM
|
|
#define TRACE_SYSTEM irq
|
|
|
|
#if !defined(_TRACE_IRQ_H) || defined(TRACE_HEADER_MULTI_READ)
|
|
#define _TRACE_IRQ_H
|
|
|
|
#include <linux/tracepoint.h>
|
|
|
|
struct irqaction;
|
|
struct softirq_action;
|
|
|
|
#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
|
|
#define show_softirq_name(val) \
|
|
__print_symbolic(val, \
|
|
softirq_name(HI), \
|
|
softirq_name(TIMER), \
|
|
softirq_name(NET_TX), \
|
|
softirq_name(NET_RX), \
|
|
softirq_name(BLOCK), \
|
|
softirq_name(BLOCK_IOPOLL), \
|
|
softirq_name(TASKLET), \
|
|
softirq_name(SCHED), \
|
|
softirq_name(HRTIMER), \
|
|
softirq_name(RCU))
|
|
|
|
/**
|
|
* irq_handler_entry - called immediately before the irq action handler
|
|
* @irq: irq number
|
|
* @action: pointer to struct irqaction
|
|
*
|
|
* The struct irqaction pointed to by @action contains various
|
|
* information about the handler, including the device name,
|
|
* @action->name, and the device id, @action->dev_id. When used in
|
|
* conjunction with the irq_handler_exit tracepoint, we can figure
|
|
* out irq handler latencies.
|
|
*/
|
|
TRACE_EVENT(irq_handler_entry,
|
|
|
|
TP_PROTO(int irq, struct irqaction *action),
|
|
|
|
TP_ARGS(irq, action),
|
|
|
|
TP_STRUCT__entry(
|
|
__field( int, irq )
|
|
__string( name, action->name )
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->irq = irq;
|
|
__assign_str(name, action->name);
|
|
),
|
|
|
|
TP_printk("irq=%d name=%s", __entry->irq, __get_str(name))
|
|
);
|
|
|
|
/**
|
|
* irq_handler_exit - called immediately after the irq action handler returns
|
|
* @irq: irq number
|
|
* @action: pointer to struct irqaction
|
|
* @ret: return value
|
|
*
|
|
* If the @ret value is set to IRQ_HANDLED, then we know that the corresponding
|
|
* @action->handler scuccessully handled this irq. Otherwise, the irq might be
|
|
* a shared irq line, or the irq was not handled successfully. Can be used in
|
|
* conjunction with the irq_handler_entry to understand irq handler latencies.
|
|
*/
|
|
TRACE_EVENT(irq_handler_exit,
|
|
|
|
TP_PROTO(int irq, struct irqaction *action, int ret),
|
|
|
|
TP_ARGS(irq, action, ret),
|
|
|
|
TP_STRUCT__entry(
|
|
__field( int, irq )
|
|
__field( int, ret )
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->irq = irq;
|
|
__entry->ret = ret;
|
|
),
|
|
|
|
TP_printk("irq=%d ret=%s",
|
|
__entry->irq, __entry->ret ? "handled" : "unhandled")
|
|
);
|
|
|
|
DECLARE_EVENT_CLASS(softirq,
|
|
|
|
TP_PROTO(unsigned int vec_nr),
|
|
|
|
TP_ARGS(vec_nr),
|
|
|
|
TP_STRUCT__entry(
|
|
__field( unsigned int, vec )
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->vec = vec_nr;
|
|
),
|
|
|
|
TP_printk("vec=%u [action=%s]", __entry->vec,
|
|
show_softirq_name(__entry->vec))
|
|
);
|
|
|
|
/**
|
|
* softirq_entry - called immediately before the softirq handler
|
|
* @vec_nr: softirq vector number
|
|
*
|
|
* When used in combination with the softirq_exit tracepoint
|
|
* we can determine the softirq handler runtine.
|
|
*/
|
|
DEFINE_EVENT(softirq, softirq_entry,
|
|
|
|
TP_PROTO(unsigned int vec_nr),
|
|
|
|
TP_ARGS(vec_nr)
|
|
);
|
|
|
|
/**
|
|
* softirq_exit - called immediately after the softirq handler returns
|
|
* @vec_nr: softirq vector number
|
|
*
|
|
* When used in combination with the softirq_entry tracepoint
|
|
* we can determine the softirq handler runtine.
|
|
*/
|
|
DEFINE_EVENT(softirq, softirq_exit,
|
|
|
|
TP_PROTO(unsigned int vec_nr),
|
|
|
|
TP_ARGS(vec_nr)
|
|
);
|
|
|
|
/**
|
|
* softirq_raise - called immediately when a softirq is raised
|
|
* @vec_nr: softirq vector number
|
|
*
|
|
* When used in combination with the softirq_entry tracepoint
|
|
* we can determine the softirq raise to run latency.
|
|
*/
|
|
DEFINE_EVENT(softirq, softirq_raise,
|
|
|
|
TP_PROTO(unsigned int vec_nr),
|
|
|
|
TP_ARGS(vec_nr)
|
|
);
|
|
|
|
#endif /* _TRACE_IRQ_H */
|
|
|
|
/* This part must be outside protection */
|
|
#include <trace/define_trace.h>
|