mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 20:48:49 +08:00
perf: Remove type specific target pointers
The only reason CQM had to use a hard-coded pmu type was so it could use cqm_target in hw_perf_event. Do away with the {tp,bp,cqm}_target pointers and provide a non type specific one. This allows us to do away with that silly pmu type as well. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Vince Weaver <vince@deater.net> Cc: acme@kernel.org Cc: acme@redhat.com Cc: hpa@zytor.com Cc: jolsa@redhat.com Cc: kanaka.d.juvva@intel.com Cc: matt.fleming@intel.com Cc: tglx@linutronix.de Cc: torvalds@linux-foundation.org Cc: vikas.shivappa@linux.intel.com Link: http://lkml.kernel.org/r/20150305211019.GU21418@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
4e16ed9941
commit
50f16a8bf9
@ -648,7 +648,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
|
||||
* Per-cpu breakpoints are not supported by our stepping
|
||||
* mechanism.
|
||||
*/
|
||||
if (!bp->hw.bp_target)
|
||||
if (!bp->hw.target)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
|
@ -527,7 +527,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
|
||||
* Disallow per-task kernel breakpoints since these would
|
||||
* complicate the stepping code.
|
||||
*/
|
||||
if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.bp_target)
|
||||
if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
|
@ -263,7 +263,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
|
||||
/*
|
||||
* Events that target same task are placed into the same cache group.
|
||||
*/
|
||||
if (a->hw.cqm_target == b->hw.cqm_target)
|
||||
if (a->hw.target == b->hw.target)
|
||||
return true;
|
||||
|
||||
/*
|
||||
@ -279,7 +279,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
|
||||
static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
|
||||
{
|
||||
if (event->attach_state & PERF_ATTACH_TASK)
|
||||
return perf_cgroup_from_task(event->hw.cqm_target);
|
||||
return perf_cgroup_from_task(event->hw.target);
|
||||
|
||||
return event->cgrp;
|
||||
}
|
||||
@ -1365,8 +1365,7 @@ static int __init intel_cqm_init(void)
|
||||
|
||||
__perf_cpu_notifier(intel_cqm_cpu_notifier);
|
||||
|
||||
ret = perf_pmu_register(&intel_cqm_pmu, "intel_cqm",
|
||||
PERF_TYPE_INTEL_CQM);
|
||||
ret = perf_pmu_register(&intel_cqm_pmu, "intel_cqm", -1);
|
||||
if (ret)
|
||||
pr_err("Intel CQM perf registration failed: %d\n", ret);
|
||||
else
|
||||
|
@ -119,7 +119,6 @@ struct hw_perf_event {
|
||||
struct hrtimer hrtimer;
|
||||
};
|
||||
struct { /* tracepoint */
|
||||
struct task_struct *tp_target;
|
||||
/* for tp_event->class */
|
||||
struct list_head tp_list;
|
||||
};
|
||||
@ -129,7 +128,6 @@ struct hw_perf_event {
|
||||
struct list_head cqm_events_entry;
|
||||
struct list_head cqm_groups_entry;
|
||||
struct list_head cqm_group_entry;
|
||||
struct task_struct *cqm_target;
|
||||
};
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
struct { /* breakpoint */
|
||||
@ -138,12 +136,12 @@ struct hw_perf_event {
|
||||
* problem hw_breakpoint has with context
|
||||
* creation and event initalization.
|
||||
*/
|
||||
struct task_struct *bp_target;
|
||||
struct arch_hw_breakpoint info;
|
||||
struct list_head bp_list;
|
||||
};
|
||||
#endif
|
||||
};
|
||||
struct task_struct *target;
|
||||
int state;
|
||||
local64_t prev_count;
|
||||
u64 sample_period;
|
||||
|
@ -32,7 +32,6 @@ enum perf_type_id {
|
||||
PERF_TYPE_HW_CACHE = 3,
|
||||
PERF_TYPE_RAW = 4,
|
||||
PERF_TYPE_BREAKPOINT = 5,
|
||||
PERF_TYPE_INTEL_CQM = 6,
|
||||
|
||||
PERF_TYPE_MAX, /* non-ABI */
|
||||
};
|
||||
|
@ -7171,18 +7171,12 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
||||
|
||||
if (task) {
|
||||
event->attach_state = PERF_ATTACH_TASK;
|
||||
|
||||
if (attr->type == PERF_TYPE_TRACEPOINT)
|
||||
event->hw.tp_target = task;
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
/*
|
||||
* hw_breakpoint is a bit difficult here..
|
||||
* XXX pmu::event_init needs to know what task to account to
|
||||
* and we cannot use the ctx information because we need the
|
||||
* pmu before we get a ctx.
|
||||
*/
|
||||
else if (attr->type == PERF_TYPE_BREAKPOINT)
|
||||
event->hw.bp_target = task;
|
||||
#endif
|
||||
else if (attr->type == PERF_TYPE_INTEL_CQM)
|
||||
event->hw.cqm_target = task;
|
||||
event->hw.target = task;
|
||||
}
|
||||
|
||||
if (!overflow_handler && parent_event) {
|
||||
|
@ -116,12 +116,12 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
|
||||
*/
|
||||
static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
|
||||
{
|
||||
struct task_struct *tsk = bp->hw.bp_target;
|
||||
struct task_struct *tsk = bp->hw.target;
|
||||
struct perf_event *iter;
|
||||
int count = 0;
|
||||
|
||||
list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
|
||||
if (iter->hw.bp_target == tsk &&
|
||||
if (iter->hw.target == tsk &&
|
||||
find_slot_idx(iter) == type &&
|
||||
(iter->cpu < 0 || cpu == iter->cpu))
|
||||
count += hw_breakpoint_weight(iter);
|
||||
@ -153,7 +153,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
|
||||
int nr;
|
||||
|
||||
nr = info->cpu_pinned;
|
||||
if (!bp->hw.bp_target)
|
||||
if (!bp->hw.target)
|
||||
nr += max_task_bp_pinned(cpu, type);
|
||||
else
|
||||
nr += task_bp_pinned(cpu, bp, type);
|
||||
@ -210,7 +210,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
|
||||
weight = -weight;
|
||||
|
||||
/* Pinned counter cpu profiling */
|
||||
if (!bp->hw.bp_target) {
|
||||
if (!bp->hw.target) {
|
||||
get_bp_info(bp->cpu, type)->cpu_pinned += weight;
|
||||
return;
|
||||
}
|
||||
|
@ -1005,7 +1005,7 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
|
||||
return true;
|
||||
|
||||
list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
|
||||
if (event->hw.tp_target->mm == mm)
|
||||
if (event->hw.target->mm == mm)
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1015,7 +1015,7 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
|
||||
static inline bool
|
||||
uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
|
||||
{
|
||||
return __uprobe_perf_filter(&tu->filter, event->hw.tp_target->mm);
|
||||
return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
|
||||
}
|
||||
|
||||
static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
|
||||
@ -1023,10 +1023,10 @@ static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
|
||||
bool done;
|
||||
|
||||
write_lock(&tu->filter.rwlock);
|
||||
if (event->hw.tp_target) {
|
||||
if (event->hw.target) {
|
||||
list_del(&event->hw.tp_list);
|
||||
done = tu->filter.nr_systemwide ||
|
||||
(event->hw.tp_target->flags & PF_EXITING) ||
|
||||
(event->hw.target->flags & PF_EXITING) ||
|
||||
uprobe_filter_event(tu, event);
|
||||
} else {
|
||||
tu->filter.nr_systemwide--;
|
||||
@ -1046,7 +1046,7 @@ static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
|
||||
int err;
|
||||
|
||||
write_lock(&tu->filter.rwlock);
|
||||
if (event->hw.tp_target) {
|
||||
if (event->hw.target) {
|
||||
/*
|
||||
* event->parent != NULL means copy_process(), we can avoid
|
||||
* uprobe_apply(). current->mm must be probed and we can rely
|
||||
|
Loading…
Reference in New Issue
Block a user