mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-13 16:14:26 +08:00
perf/x86: Add is_visible attribute_group callback for base events
We dont need to pre-filter out unsupported base events, we can just use its group's is_visible function to do this. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lkml.kernel.org/r/20190512155518.21468-6-jolsa@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
baa0c83363
commit
3d5672735b
@ -1618,42 +1618,6 @@ static struct attribute_group x86_pmu_format_group __ro_after_init = {
|
|||||||
.attrs = NULL,
|
.attrs = NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
|
||||||
* Remove all undefined events (x86_pmu.event_map(id) == 0)
|
|
||||||
* out of events_attr attributes.
|
|
||||||
*/
|
|
||||||
static void __init filter_events(struct attribute **attrs)
|
|
||||||
{
|
|
||||||
struct device_attribute *d;
|
|
||||||
struct perf_pmu_events_attr *pmu_attr;
|
|
||||||
int offset = 0;
|
|
||||||
int i, j;
|
|
||||||
|
|
||||||
for (i = 0; attrs[i]; i++) {
|
|
||||||
d = (struct device_attribute *)attrs[i];
|
|
||||||
pmu_attr = container_of(d, struct perf_pmu_events_attr, attr);
|
|
||||||
/* str trumps id */
|
|
||||||
if (pmu_attr->event_str)
|
|
||||||
continue;
|
|
||||||
if (x86_pmu.event_map(i + offset))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
for (j = i; attrs[j]; j++)
|
|
||||||
attrs[j] = attrs[j + 1];
|
|
||||||
|
|
||||||
/* Check the shifted attr. */
|
|
||||||
i--;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* event_map() is index based, the attrs array is organized
|
|
||||||
* by increasing event index. If we shift the events, then
|
|
||||||
* we need to compensate for the event_map(), otherwise
|
|
||||||
* we are looking up the wrong event in the map
|
|
||||||
*/
|
|
||||||
offset++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Merge two pointer arrays */
|
/* Merge two pointer arrays */
|
||||||
__init struct attribute **merge_attr(struct attribute **a, struct attribute **b)
|
__init struct attribute **merge_attr(struct attribute **a, struct attribute **b)
|
||||||
{
|
{
|
||||||
@ -1744,9 +1708,24 @@ static struct attribute *events_attr[] = {
|
|||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Remove all undefined events (x86_pmu.event_map(id) == 0)
|
||||||
|
* out of events_attr attributes.
|
||||||
|
*/
|
||||||
|
static umode_t
|
||||||
|
is_visible(struct kobject *kobj, struct attribute *attr, int idx)
|
||||||
|
{
|
||||||
|
struct perf_pmu_events_attr *pmu_attr;
|
||||||
|
|
||||||
|
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
|
||||||
|
/* str trumps id */
|
||||||
|
return pmu_attr->event_str || x86_pmu.event_map(idx) ? attr->mode : 0;
|
||||||
|
}
|
||||||
|
|
||||||
static struct attribute_group x86_pmu_events_group __ro_after_init = {
|
static struct attribute_group x86_pmu_events_group __ro_after_init = {
|
||||||
.name = "events",
|
.name = "events",
|
||||||
.attrs = events_attr,
|
.attrs = events_attr,
|
||||||
|
.is_visible = is_visible,
|
||||||
};
|
};
|
||||||
|
|
||||||
ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
|
ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
|
||||||
@ -1852,8 +1831,6 @@ static int __init init_hw_perf_events(void)
|
|||||||
|
|
||||||
if (!x86_pmu.events_sysfs_show)
|
if (!x86_pmu.events_sysfs_show)
|
||||||
x86_pmu_events_group.attrs = &empty_attrs;
|
x86_pmu_events_group.attrs = &empty_attrs;
|
||||||
else
|
|
||||||
filter_events(x86_pmu_events_group.attrs);
|
|
||||||
|
|
||||||
if (x86_pmu.attrs) {
|
if (x86_pmu.attrs) {
|
||||||
struct attribute **tmp;
|
struct attribute **tmp;
|
||||||
|
Loading…
Reference in New Issue
Block a user