mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-19 10:44:14 +08:00
perf: Use jump_labels to optimize the scheduler hooks
Trades a call + conditional + ret for an unconditional jmp. Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20101014203625.501657727@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
8b92538d84
commit
82cd6def98
@ -487,6 +487,7 @@ struct perf_guest_info_callbacks {
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/irq_work.h>
|
||||
#include <linux/jump_label_ref.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/local.h>
|
||||
|
||||
@ -895,8 +896,30 @@ extern void perf_pmu_unregister(struct pmu *pmu);
|
||||
|
||||
extern int perf_num_counters(void);
|
||||
extern const char *perf_pmu_name(void);
|
||||
extern void perf_event_task_sched_in(struct task_struct *task);
|
||||
extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
|
||||
extern void __perf_event_task_sched_in(struct task_struct *task);
|
||||
extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
|
||||
|
||||
extern atomic_t perf_task_events;
|
||||
|
||||
static inline void perf_event_task_sched_in(struct task_struct *task)
|
||||
{
|
||||
JUMP_LABEL(&perf_task_events, have_events);
|
||||
return;
|
||||
|
||||
have_events:
|
||||
__perf_event_task_sched_in(task);
|
||||
}
|
||||
|
||||
static inline
|
||||
void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
|
||||
{
|
||||
JUMP_LABEL(&perf_task_events, have_events);
|
||||
return;
|
||||
|
||||
have_events:
|
||||
__perf_event_task_sched_out(task, next);
|
||||
}
|
||||
|
||||
extern int perf_event_init_task(struct task_struct *child);
|
||||
extern void perf_event_exit_task(struct task_struct *child);
|
||||
extern void perf_event_free_task(struct task_struct *task);
|
||||
|
@ -34,7 +34,7 @@
|
||||
|
||||
#include <asm/irq_regs.h>
|
||||
|
||||
static atomic_t nr_events __read_mostly;
|
||||
atomic_t perf_task_events __read_mostly;
|
||||
static atomic_t nr_mmap_events __read_mostly;
|
||||
static atomic_t nr_comm_events __read_mostly;
|
||||
static atomic_t nr_task_events __read_mostly;
|
||||
@ -1311,8 +1311,8 @@ void perf_event_context_sched_out(struct task_struct *task, int ctxn,
|
||||
* accessing the event control register. If a NMI hits, then it will
|
||||
* not restart the event.
|
||||
*/
|
||||
void perf_event_task_sched_out(struct task_struct *task,
|
||||
struct task_struct *next)
|
||||
void __perf_event_task_sched_out(struct task_struct *task,
|
||||
struct task_struct *next)
|
||||
{
|
||||
int ctxn;
|
||||
|
||||
@ -1337,14 +1337,6 @@ static void task_ctx_sched_out(struct perf_event_context *ctx,
|
||||
cpuctx->task_ctx = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with IRQs disabled
|
||||
*/
|
||||
static void __perf_event_task_sched_out(struct perf_event_context *ctx)
|
||||
{
|
||||
task_ctx_sched_out(ctx, EVENT_ALL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with IRQs disabled
|
||||
*/
|
||||
@ -1494,7 +1486,7 @@ void perf_event_context_sched_in(struct perf_event_context *ctx)
|
||||
* accessing the event control register. If a NMI hits, then it will
|
||||
* keep the event running.
|
||||
*/
|
||||
void perf_event_task_sched_in(struct task_struct *task)
|
||||
void __perf_event_task_sched_in(struct task_struct *task)
|
||||
{
|
||||
struct perf_event_context *ctx;
|
||||
int ctxn;
|
||||
@ -2216,7 +2208,8 @@ static void free_event(struct perf_event *event)
|
||||
irq_work_sync(&event->pending);
|
||||
|
||||
if (!event->parent) {
|
||||
atomic_dec(&nr_events);
|
||||
if (event->attach_state & PERF_ATTACH_TASK)
|
||||
jump_label_dec(&perf_task_events);
|
||||
if (event->attr.mmap || event->attr.mmap_data)
|
||||
atomic_dec(&nr_mmap_events);
|
||||
if (event->attr.comm)
|
||||
@ -5354,7 +5347,8 @@ done:
|
||||
event->pmu = pmu;
|
||||
|
||||
if (!event->parent) {
|
||||
atomic_inc(&nr_events);
|
||||
if (event->attach_state & PERF_ATTACH_TASK)
|
||||
jump_label_inc(&perf_task_events);
|
||||
if (event->attr.mmap || event->attr.mmap_data)
|
||||
atomic_inc(&nr_mmap_events);
|
||||
if (event->attr.comm)
|
||||
@ -5849,7 +5843,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
|
||||
* our context.
|
||||
*/
|
||||
child_ctx = child->perf_event_ctxp[ctxn];
|
||||
__perf_event_task_sched_out(child_ctx);
|
||||
task_ctx_sched_out(child_ctx, EVENT_ALL);
|
||||
|
||||
/*
|
||||
* Take the context lock here so that if find_get_context is
|
||||
|
Loading…
Reference in New Issue
Block a user