mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-29 07:04:10 +08:00
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar: "The main changes in this cycle on the kernel side were: - CPU PMU and uncore driver updates to Intel Snow Ridge, IceLake, KabyLake, AmberLake and WhiskeyLake CPUs. - Rework the MSR probing infrastructure to make it more robust, make it work better on virtualized systems and to better expose it on sysfs. - Rework PMU attributes group support based on the feedback from Greg. The core sysfs patch that adds sysfs_update_groups() was acked by Greg. There's a lot of perf tooling changes as well, all around the place: - vendor updates to Intel, cs-etm (ARM), ARM64, s390, - various enhancements to Intel PT tooling support: - Improve CBR (Core to Bus Ratio) packets support. - Export power and ptwrite events to sqlite and postgresql. - Add support for decoding PEBS via PT packets. - Add support for samples to contain IPC ratio, collecting cycles information from CYC packets, showing the IPC info periodically - Allow using time ranges - lots of updates to perf pmu, perf stat, perf trace, eBPF support, perf record, perf diff, etc. - please see the shortlog and Git log for details" * 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (252 commits) tools arch x86: Sync asm/cpufeatures.h with the with the kernel tools build: Check if gettid() is available before providing helper perf jvmti: Address gcc string overflow warning for strncpy() perf python: Remove -fstack-protector-strong if clang doesn't have it perf annotate TUI browser: Do not use member from variable within its own initialization perf tests: Fix record+probe_libc_inet_pton.sh for powerpc64 perf evsel: Do not rely on errno values for precise_ip fallback perf thread: Allow references to thread objects after machine__exit() perf header: Assign proper ff->ph in perf_event__synthesize_features() tools arch kvm: Sync kvm headers with the kernel sources perf script: Allow specifying the files to process guest samples perf tools metric: Don't include duration_time in group perf list: Avoid extra : for --raw metrics perf vendor events intel: Metric fixes for SKX/CLX perf tools: Fix typos / broken sentences perf jevents: Add support for Hisi hip08 L3C PMU aliasing perf jevents: Add support for Hisi hip08 HHA PMU aliasing perf jevents: Add support for Hisi hip08 DDRC PMU aliasing perf pmu: Support more complex PMU event aliasing perf diff: Documentation -c cycles option ...
This commit is contained in:
commit
608745f124
@ -1,5 +1,5 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-y += core.o
|
||||
obj-y += core.o probe.o
|
||||
obj-y += amd/
|
||||
obj-$(CONFIG_X86_LOCAL_APIC) += msr.o
|
||||
obj-$(CONFIG_CPU_SUP_INTEL) += intel/
|
||||
|
@ -1618,68 +1618,6 @@ static struct attribute_group x86_pmu_format_group __ro_after_init = {
|
||||
.attrs = NULL,
|
||||
};
|
||||
|
||||
/*
|
||||
* Remove all undefined events (x86_pmu.event_map(id) == 0)
|
||||
* out of events_attr attributes.
|
||||
*/
|
||||
static void __init filter_events(struct attribute **attrs)
|
||||
{
|
||||
struct device_attribute *d;
|
||||
struct perf_pmu_events_attr *pmu_attr;
|
||||
int offset = 0;
|
||||
int i, j;
|
||||
|
||||
for (i = 0; attrs[i]; i++) {
|
||||
d = (struct device_attribute *)attrs[i];
|
||||
pmu_attr = container_of(d, struct perf_pmu_events_attr, attr);
|
||||
/* str trumps id */
|
||||
if (pmu_attr->event_str)
|
||||
continue;
|
||||
if (x86_pmu.event_map(i + offset))
|
||||
continue;
|
||||
|
||||
for (j = i; attrs[j]; j++)
|
||||
attrs[j] = attrs[j + 1];
|
||||
|
||||
/* Check the shifted attr. */
|
||||
i--;
|
||||
|
||||
/*
|
||||
* event_map() is index based, the attrs array is organized
|
||||
* by increasing event index. If we shift the events, then
|
||||
* we need to compensate for the event_map(), otherwise
|
||||
* we are looking up the wrong event in the map
|
||||
*/
|
||||
offset++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Merge two pointer arrays */
|
||||
__init struct attribute **merge_attr(struct attribute **a, struct attribute **b)
|
||||
{
|
||||
struct attribute **new;
|
||||
int j, i;
|
||||
|
||||
for (j = 0; a && a[j]; j++)
|
||||
;
|
||||
for (i = 0; b && b[i]; i++)
|
||||
j++;
|
||||
j++;
|
||||
|
||||
new = kmalloc_array(j, sizeof(struct attribute *), GFP_KERNEL);
|
||||
if (!new)
|
||||
return NULL;
|
||||
|
||||
j = 0;
|
||||
for (i = 0; a && a[i]; i++)
|
||||
new[j++] = a[i];
|
||||
for (i = 0; b && b[i]; i++)
|
||||
new[j++] = b[i];
|
||||
new[j] = NULL;
|
||||
|
||||
return new;
|
||||
}
|
||||
|
||||
ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page)
|
||||
{
|
||||
struct perf_pmu_events_attr *pmu_attr = \
|
||||
@ -1744,9 +1682,24 @@ static struct attribute *events_attr[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
/*
|
||||
* Remove all undefined events (x86_pmu.event_map(id) == 0)
|
||||
* out of events_attr attributes.
|
||||
*/
|
||||
static umode_t
|
||||
is_visible(struct kobject *kobj, struct attribute *attr, int idx)
|
||||
{
|
||||
struct perf_pmu_events_attr *pmu_attr;
|
||||
|
||||
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
|
||||
/* str trumps id */
|
||||
return pmu_attr->event_str || x86_pmu.event_map(idx) ? attr->mode : 0;
|
||||
}
|
||||
|
||||
static struct attribute_group x86_pmu_events_group __ro_after_init = {
|
||||
.name = "events",
|
||||
.attrs = events_attr,
|
||||
.is_visible = is_visible,
|
||||
};
|
||||
|
||||
ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
|
||||
@ -1842,37 +1795,10 @@ static int __init init_hw_perf_events(void)
|
||||
|
||||
x86_pmu_format_group.attrs = x86_pmu.format_attrs;
|
||||
|
||||
if (x86_pmu.caps_attrs) {
|
||||
struct attribute **tmp;
|
||||
|
||||
tmp = merge_attr(x86_pmu_caps_group.attrs, x86_pmu.caps_attrs);
|
||||
if (!WARN_ON(!tmp))
|
||||
x86_pmu_caps_group.attrs = tmp;
|
||||
}
|
||||
|
||||
if (x86_pmu.event_attrs)
|
||||
x86_pmu_events_group.attrs = x86_pmu.event_attrs;
|
||||
|
||||
if (!x86_pmu.events_sysfs_show)
|
||||
x86_pmu_events_group.attrs = &empty_attrs;
|
||||
else
|
||||
filter_events(x86_pmu_events_group.attrs);
|
||||
|
||||
if (x86_pmu.cpu_events) {
|
||||
struct attribute **tmp;
|
||||
|
||||
tmp = merge_attr(x86_pmu_events_group.attrs, x86_pmu.cpu_events);
|
||||
if (!WARN_ON(!tmp))
|
||||
x86_pmu_events_group.attrs = tmp;
|
||||
}
|
||||
|
||||
if (x86_pmu.attrs) {
|
||||
struct attribute **tmp;
|
||||
|
||||
tmp = merge_attr(x86_pmu_attr_group.attrs, x86_pmu.attrs);
|
||||
if (!WARN_ON(!tmp))
|
||||
x86_pmu_attr_group.attrs = tmp;
|
||||
}
|
||||
pmu.attr_update = x86_pmu.attr_update;
|
||||
|
||||
pr_info("... version: %d\n", x86_pmu.version);
|
||||
pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <asm/intel-family.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/hypervisor.h>
|
||||
|
||||
#include "../perf_event.h"
|
||||
|
||||
@ -3897,8 +3898,6 @@ static __initconst const struct x86_pmu core_pmu = {
|
||||
.check_period = intel_pmu_check_period,
|
||||
};
|
||||
|
||||
static struct attribute *intel_pmu_attrs[];
|
||||
|
||||
static __initconst const struct x86_pmu intel_pmu = {
|
||||
.name = "Intel",
|
||||
.handle_irq = intel_pmu_handle_irq,
|
||||
@ -3930,8 +3929,6 @@ static __initconst const struct x86_pmu intel_pmu = {
|
||||
.format_attrs = intel_arch3_formats_attr,
|
||||
.events_sysfs_show = intel_event_sysfs_show,
|
||||
|
||||
.attrs = intel_pmu_attrs,
|
||||
|
||||
.cpu_prepare = intel_pmu_cpu_prepare,
|
||||
.cpu_starting = intel_pmu_cpu_starting,
|
||||
.cpu_dying = intel_pmu_cpu_dying,
|
||||
@ -4054,6 +4051,13 @@ static bool check_msr(unsigned long msr, u64 mask)
|
||||
{
|
||||
u64 val_old, val_new, val_tmp;
|
||||
|
||||
/*
|
||||
* Disable the check for real HW, so we don't
|
||||
* mess with potentionaly enabled registers:
|
||||
*/
|
||||
if (hypervisor_is_type(X86_HYPER_NATIVE))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Read the current value, change it and read it back to see if it
|
||||
* matches, this is needed to detect certain hardware emulators
|
||||
@ -4274,13 +4278,6 @@ static struct attribute *icl_tsx_events_attrs[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static __init struct attribute **get_icl_events_attrs(void)
|
||||
{
|
||||
return boot_cpu_has(X86_FEATURE_RTM) ?
|
||||
merge_attr(icl_events_attrs, icl_tsx_events_attrs) :
|
||||
icl_events_attrs;
|
||||
}
|
||||
|
||||
static ssize_t freeze_on_smi_show(struct device *cdev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
@ -4402,43 +4399,111 @@ static DEVICE_ATTR(allow_tsx_force_abort, 0644,
|
||||
|
||||
static struct attribute *intel_pmu_attrs[] = {
|
||||
&dev_attr_freeze_on_smi.attr,
|
||||
NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */
|
||||
&dev_attr_allow_tsx_force_abort.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static __init struct attribute **
|
||||
get_events_attrs(struct attribute **base,
|
||||
struct attribute **mem,
|
||||
struct attribute **tsx)
|
||||
static umode_t
|
||||
tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i)
|
||||
{
|
||||
struct attribute **attrs = base;
|
||||
struct attribute **old;
|
||||
|
||||
if (mem && x86_pmu.pebs)
|
||||
attrs = merge_attr(attrs, mem);
|
||||
|
||||
if (tsx && boot_cpu_has(X86_FEATURE_RTM)) {
|
||||
old = attrs;
|
||||
attrs = merge_attr(attrs, tsx);
|
||||
if (old != base)
|
||||
kfree(old);
|
||||
}
|
||||
|
||||
return attrs;
|
||||
return boot_cpu_has(X86_FEATURE_RTM) ? attr->mode : 0;
|
||||
}
|
||||
|
||||
static umode_t
|
||||
pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i)
|
||||
{
|
||||
return x86_pmu.pebs ? attr->mode : 0;
|
||||
}
|
||||
|
||||
static umode_t
|
||||
lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i)
|
||||
{
|
||||
return x86_pmu.lbr_nr ? attr->mode : 0;
|
||||
}
|
||||
|
||||
static umode_t
|
||||
exra_is_visible(struct kobject *kobj, struct attribute *attr, int i)
|
||||
{
|
||||
return x86_pmu.version >= 2 ? attr->mode : 0;
|
||||
}
|
||||
|
||||
static umode_t
|
||||
default_is_visible(struct kobject *kobj, struct attribute *attr, int i)
|
||||
{
|
||||
if (attr == &dev_attr_allow_tsx_force_abort.attr)
|
||||
return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0;
|
||||
|
||||
return attr->mode;
|
||||
}
|
||||
|
||||
static struct attribute_group group_events_td = {
|
||||
.name = "events",
|
||||
};
|
||||
|
||||
static struct attribute_group group_events_mem = {
|
||||
.name = "events",
|
||||
.is_visible = pebs_is_visible,
|
||||
};
|
||||
|
||||
static struct attribute_group group_events_tsx = {
|
||||
.name = "events",
|
||||
.is_visible = tsx_is_visible,
|
||||
};
|
||||
|
||||
static struct attribute_group group_caps_gen = {
|
||||
.name = "caps",
|
||||
.attrs = intel_pmu_caps_attrs,
|
||||
};
|
||||
|
||||
static struct attribute_group group_caps_lbr = {
|
||||
.name = "caps",
|
||||
.attrs = lbr_attrs,
|
||||
.is_visible = lbr_is_visible,
|
||||
};
|
||||
|
||||
static struct attribute_group group_format_extra = {
|
||||
.name = "format",
|
||||
.is_visible = exra_is_visible,
|
||||
};
|
||||
|
||||
static struct attribute_group group_format_extra_skl = {
|
||||
.name = "format",
|
||||
.is_visible = exra_is_visible,
|
||||
};
|
||||
|
||||
static struct attribute_group group_default = {
|
||||
.attrs = intel_pmu_attrs,
|
||||
.is_visible = default_is_visible,
|
||||
};
|
||||
|
||||
static const struct attribute_group *attr_update[] = {
|
||||
&group_events_td,
|
||||
&group_events_mem,
|
||||
&group_events_tsx,
|
||||
&group_caps_gen,
|
||||
&group_caps_lbr,
|
||||
&group_format_extra,
|
||||
&group_format_extra_skl,
|
||||
&group_default,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute *empty_attrs;
|
||||
|
||||
__init int intel_pmu_init(void)
|
||||
{
|
||||
struct attribute **extra_attr = NULL;
|
||||
struct attribute **mem_attr = NULL;
|
||||
struct attribute **tsx_attr = NULL;
|
||||
struct attribute **to_free = NULL;
|
||||
struct attribute **extra_skl_attr = &empty_attrs;
|
||||
struct attribute **extra_attr = &empty_attrs;
|
||||
struct attribute **td_attr = &empty_attrs;
|
||||
struct attribute **mem_attr = &empty_attrs;
|
||||
struct attribute **tsx_attr = &empty_attrs;
|
||||
union cpuid10_edx edx;
|
||||
union cpuid10_eax eax;
|
||||
union cpuid10_ebx ebx;
|
||||
struct event_constraint *c;
|
||||
unsigned int unused;
|
||||
struct extra_reg *er;
|
||||
bool pmem = false;
|
||||
int version, i;
|
||||
char *name;
|
||||
|
||||
@ -4596,7 +4661,7 @@ __init int intel_pmu_init(void)
|
||||
x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
|
||||
x86_pmu.extra_regs = intel_slm_extra_regs;
|
||||
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
|
||||
x86_pmu.cpu_events = slm_events_attrs;
|
||||
td_attr = slm_events_attrs;
|
||||
extra_attr = slm_format_attr;
|
||||
pr_cont("Silvermont events, ");
|
||||
name = "silvermont";
|
||||
@ -4624,7 +4689,7 @@ __init int intel_pmu_init(void)
|
||||
x86_pmu.pebs_prec_dist = true;
|
||||
x86_pmu.lbr_pt_coexist = true;
|
||||
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
|
||||
x86_pmu.cpu_events = glm_events_attrs;
|
||||
td_attr = glm_events_attrs;
|
||||
extra_attr = slm_format_attr;
|
||||
pr_cont("Goldmont events, ");
|
||||
name = "goldmont";
|
||||
@ -4651,7 +4716,7 @@ __init int intel_pmu_init(void)
|
||||
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
|
||||
x86_pmu.flags |= PMU_FL_PEBS_ALL;
|
||||
x86_pmu.get_event_constraints = glp_get_event_constraints;
|
||||
x86_pmu.cpu_events = glm_events_attrs;
|
||||
td_attr = glm_events_attrs;
|
||||
/* Goldmont Plus has 4-wide pipeline */
|
||||
event_attr_td_total_slots_scale_glm.event_str = "4";
|
||||
extra_attr = slm_format_attr;
|
||||
@ -4740,7 +4805,7 @@ __init int intel_pmu_init(void)
|
||||
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
|
||||
x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
|
||||
|
||||
x86_pmu.cpu_events = snb_events_attrs;
|
||||
td_attr = snb_events_attrs;
|
||||
mem_attr = snb_mem_events_attrs;
|
||||
|
||||
/* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
|
||||
@ -4781,7 +4846,7 @@ __init int intel_pmu_init(void)
|
||||
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
|
||||
x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
|
||||
|
||||
x86_pmu.cpu_events = snb_events_attrs;
|
||||
td_attr = snb_events_attrs;
|
||||
mem_attr = snb_mem_events_attrs;
|
||||
|
||||
/* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
|
||||
@ -4818,10 +4883,10 @@ __init int intel_pmu_init(void)
|
||||
|
||||
x86_pmu.hw_config = hsw_hw_config;
|
||||
x86_pmu.get_event_constraints = hsw_get_event_constraints;
|
||||
x86_pmu.cpu_events = hsw_events_attrs;
|
||||
x86_pmu.lbr_double_abort = true;
|
||||
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
|
||||
hsw_format_attr : nhm_format_attr;
|
||||
td_attr = hsw_events_attrs;
|
||||
mem_attr = hsw_mem_events_attrs;
|
||||
tsx_attr = hsw_tsx_events_attrs;
|
||||
pr_cont("Haswell events, ");
|
||||
@ -4860,10 +4925,10 @@ __init int intel_pmu_init(void)
|
||||
|
||||
x86_pmu.hw_config = hsw_hw_config;
|
||||
x86_pmu.get_event_constraints = hsw_get_event_constraints;
|
||||
x86_pmu.cpu_events = hsw_events_attrs;
|
||||
x86_pmu.limit_period = bdw_limit_period;
|
||||
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
|
||||
hsw_format_attr : nhm_format_attr;
|
||||
td_attr = hsw_events_attrs;
|
||||
mem_attr = hsw_mem_events_attrs;
|
||||
tsx_attr = hsw_tsx_events_attrs;
|
||||
pr_cont("Broadwell events, ");
|
||||
@ -4890,9 +4955,10 @@ __init int intel_pmu_init(void)
|
||||
name = "knights-landing";
|
||||
break;
|
||||
|
||||
case INTEL_FAM6_SKYLAKE_X:
|
||||
pmem = true;
|
||||
case INTEL_FAM6_SKYLAKE_MOBILE:
|
||||
case INTEL_FAM6_SKYLAKE_DESKTOP:
|
||||
case INTEL_FAM6_SKYLAKE_X:
|
||||
case INTEL_FAM6_KABYLAKE_MOBILE:
|
||||
case INTEL_FAM6_KABYLAKE_DESKTOP:
|
||||
x86_add_quirk(intel_pebs_isolation_quirk);
|
||||
@ -4920,27 +4986,28 @@ __init int intel_pmu_init(void)
|
||||
x86_pmu.get_event_constraints = hsw_get_event_constraints;
|
||||
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
|
||||
hsw_format_attr : nhm_format_attr;
|
||||
extra_attr = merge_attr(extra_attr, skl_format_attr);
|
||||
to_free = extra_attr;
|
||||
x86_pmu.cpu_events = hsw_events_attrs;
|
||||
extra_skl_attr = skl_format_attr;
|
||||
td_attr = hsw_events_attrs;
|
||||
mem_attr = hsw_mem_events_attrs;
|
||||
tsx_attr = hsw_tsx_events_attrs;
|
||||
intel_pmu_pebs_data_source_skl(
|
||||
boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
|
||||
intel_pmu_pebs_data_source_skl(pmem);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
|
||||
x86_pmu.flags |= PMU_FL_TFA;
|
||||
x86_pmu.get_event_constraints = tfa_get_event_constraints;
|
||||
x86_pmu.enable_all = intel_tfa_pmu_enable_all;
|
||||
x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
|
||||
intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr;
|
||||
}
|
||||
|
||||
pr_cont("Skylake events, ");
|
||||
name = "skylake";
|
||||
break;
|
||||
|
||||
case INTEL_FAM6_ICELAKE_X:
|
||||
case INTEL_FAM6_ICELAKE_XEON_D:
|
||||
pmem = true;
|
||||
case INTEL_FAM6_ICELAKE_MOBILE:
|
||||
case INTEL_FAM6_ICELAKE_DESKTOP:
|
||||
x86_pmu.late_ack = true;
|
||||
memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
|
||||
memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
|
||||
@ -4959,11 +5026,12 @@ __init int intel_pmu_init(void)
|
||||
x86_pmu.get_event_constraints = icl_get_event_constraints;
|
||||
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
|
||||
hsw_format_attr : nhm_format_attr;
|
||||
extra_attr = merge_attr(extra_attr, skl_format_attr);
|
||||
x86_pmu.cpu_events = get_icl_events_attrs();
|
||||
extra_skl_attr = skl_format_attr;
|
||||
mem_attr = icl_events_attrs;
|
||||
tsx_attr = icl_tsx_events_attrs;
|
||||
x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xca, .umask=0x02);
|
||||
x86_pmu.lbr_pt_coexist = true;
|
||||
intel_pmu_pebs_data_source_skl(false);
|
||||
intel_pmu_pebs_data_source_skl(pmem);
|
||||
pr_cont("Icelake events, ");
|
||||
name = "icelake";
|
||||
break;
|
||||
@ -4988,14 +5056,14 @@ __init int intel_pmu_init(void)
|
||||
|
||||
snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
|
||||
|
||||
if (version >= 2 && extra_attr) {
|
||||
x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr,
|
||||
extra_attr);
|
||||
WARN_ON(!x86_pmu.format_attrs);
|
||||
}
|
||||
|
||||
x86_pmu.cpu_events = get_events_attrs(x86_pmu.cpu_events,
|
||||
mem_attr, tsx_attr);
|
||||
group_events_td.attrs = td_attr;
|
||||
group_events_mem.attrs = mem_attr;
|
||||
group_events_tsx.attrs = tsx_attr;
|
||||
group_format_extra.attrs = extra_attr;
|
||||
group_format_extra_skl.attrs = extra_skl_attr;
|
||||
|
||||
x86_pmu.attr_update = attr_update;
|
||||
|
||||
if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
|
||||
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
|
||||
@ -5043,12 +5111,8 @@ __init int intel_pmu_init(void)
|
||||
x86_pmu.lbr_nr = 0;
|
||||
}
|
||||
|
||||
x86_pmu.caps_attrs = intel_pmu_caps_attrs;
|
||||
|
||||
if (x86_pmu.lbr_nr) {
|
||||
x86_pmu.caps_attrs = merge_attr(x86_pmu.caps_attrs, lbr_attrs);
|
||||
if (x86_pmu.lbr_nr)
|
||||
pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Access extra MSR may cause #GP under certain circumstances.
|
||||
@ -5078,7 +5142,6 @@ __init int intel_pmu_init(void)
|
||||
if (x86_pmu.counter_freezing)
|
||||
x86_pmu.handle_irq = intel_pmu_handle_irq_v4;
|
||||
|
||||
kfree(to_free);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -96,6 +96,7 @@
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/intel-family.h>
|
||||
#include "../perf_event.h"
|
||||
#include "../probe.h"
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
@ -144,25 +145,42 @@ enum perf_cstate_core_events {
|
||||
PERF_CSTATE_CORE_EVENT_MAX,
|
||||
};
|
||||
|
||||
PMU_EVENT_ATTR_STRING(c1-residency, evattr_cstate_core_c1, "event=0x00");
|
||||
PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_core_c3, "event=0x01");
|
||||
PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_core_c6, "event=0x02");
|
||||
PMU_EVENT_ATTR_STRING(c7-residency, evattr_cstate_core_c7, "event=0x03");
|
||||
PMU_EVENT_ATTR_STRING(c1-residency, attr_cstate_core_c1, "event=0x00");
|
||||
PMU_EVENT_ATTR_STRING(c3-residency, attr_cstate_core_c3, "event=0x01");
|
||||
PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_core_c6, "event=0x02");
|
||||
PMU_EVENT_ATTR_STRING(c7-residency, attr_cstate_core_c7, "event=0x03");
|
||||
|
||||
static struct perf_cstate_msr core_msr[] = {
|
||||
[PERF_CSTATE_CORE_C1_RES] = { MSR_CORE_C1_RES, &evattr_cstate_core_c1 },
|
||||
[PERF_CSTATE_CORE_C3_RES] = { MSR_CORE_C3_RESIDENCY, &evattr_cstate_core_c3 },
|
||||
[PERF_CSTATE_CORE_C6_RES] = { MSR_CORE_C6_RESIDENCY, &evattr_cstate_core_c6 },
|
||||
[PERF_CSTATE_CORE_C7_RES] = { MSR_CORE_C7_RESIDENCY, &evattr_cstate_core_c7 },
|
||||
static unsigned long core_msr_mask;
|
||||
|
||||
PMU_EVENT_GROUP(events, cstate_core_c1);
|
||||
PMU_EVENT_GROUP(events, cstate_core_c3);
|
||||
PMU_EVENT_GROUP(events, cstate_core_c6);
|
||||
PMU_EVENT_GROUP(events, cstate_core_c7);
|
||||
|
||||
static bool test_msr(int idx, void *data)
|
||||
{
|
||||
return test_bit(idx, (unsigned long *) data);
|
||||
}
|
||||
|
||||
static struct perf_msr core_msr[] = {
|
||||
[PERF_CSTATE_CORE_C1_RES] = { MSR_CORE_C1_RES, &group_cstate_core_c1, test_msr },
|
||||
[PERF_CSTATE_CORE_C3_RES] = { MSR_CORE_C3_RESIDENCY, &group_cstate_core_c3, test_msr },
|
||||
[PERF_CSTATE_CORE_C6_RES] = { MSR_CORE_C6_RESIDENCY, &group_cstate_core_c6, test_msr },
|
||||
[PERF_CSTATE_CORE_C7_RES] = { MSR_CORE_C7_RESIDENCY, &group_cstate_core_c7, test_msr },
|
||||
};
|
||||
|
||||
static struct attribute *core_events_attrs[PERF_CSTATE_CORE_EVENT_MAX + 1] = {
|
||||
static struct attribute *attrs_empty[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
/*
|
||||
* There are no default events, but we need to create
|
||||
* "events" group (with empty attrs) before updating
|
||||
* it with detected events.
|
||||
*/
|
||||
static struct attribute_group core_events_attr_group = {
|
||||
.name = "events",
|
||||
.attrs = core_events_attrs,
|
||||
.attrs = attrs_empty,
|
||||
};
|
||||
|
||||
DEFINE_CSTATE_FORMAT_ATTR(core_event, event, "config:0-63");
|
||||
@ -211,31 +229,37 @@ enum perf_cstate_pkg_events {
|
||||
PERF_CSTATE_PKG_EVENT_MAX,
|
||||
};
|
||||
|
||||
PMU_EVENT_ATTR_STRING(c2-residency, evattr_cstate_pkg_c2, "event=0x00");
|
||||
PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_pkg_c3, "event=0x01");
|
||||
PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_pkg_c6, "event=0x02");
|
||||
PMU_EVENT_ATTR_STRING(c7-residency, evattr_cstate_pkg_c7, "event=0x03");
|
||||
PMU_EVENT_ATTR_STRING(c8-residency, evattr_cstate_pkg_c8, "event=0x04");
|
||||
PMU_EVENT_ATTR_STRING(c9-residency, evattr_cstate_pkg_c9, "event=0x05");
|
||||
PMU_EVENT_ATTR_STRING(c10-residency, evattr_cstate_pkg_c10, "event=0x06");
|
||||
PMU_EVENT_ATTR_STRING(c2-residency, attr_cstate_pkg_c2, "event=0x00");
|
||||
PMU_EVENT_ATTR_STRING(c3-residency, attr_cstate_pkg_c3, "event=0x01");
|
||||
PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_pkg_c6, "event=0x02");
|
||||
PMU_EVENT_ATTR_STRING(c7-residency, attr_cstate_pkg_c7, "event=0x03");
|
||||
PMU_EVENT_ATTR_STRING(c8-residency, attr_cstate_pkg_c8, "event=0x04");
|
||||
PMU_EVENT_ATTR_STRING(c9-residency, attr_cstate_pkg_c9, "event=0x05");
|
||||
PMU_EVENT_ATTR_STRING(c10-residency, attr_cstate_pkg_c10, "event=0x06");
|
||||
|
||||
static struct perf_cstate_msr pkg_msr[] = {
|
||||
[PERF_CSTATE_PKG_C2_RES] = { MSR_PKG_C2_RESIDENCY, &evattr_cstate_pkg_c2 },
|
||||
[PERF_CSTATE_PKG_C3_RES] = { MSR_PKG_C3_RESIDENCY, &evattr_cstate_pkg_c3 },
|
||||
[PERF_CSTATE_PKG_C6_RES] = { MSR_PKG_C6_RESIDENCY, &evattr_cstate_pkg_c6 },
|
||||
[PERF_CSTATE_PKG_C7_RES] = { MSR_PKG_C7_RESIDENCY, &evattr_cstate_pkg_c7 },
|
||||
[PERF_CSTATE_PKG_C8_RES] = { MSR_PKG_C8_RESIDENCY, &evattr_cstate_pkg_c8 },
|
||||
[PERF_CSTATE_PKG_C9_RES] = { MSR_PKG_C9_RESIDENCY, &evattr_cstate_pkg_c9 },
|
||||
[PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY, &evattr_cstate_pkg_c10 },
|
||||
};
|
||||
static unsigned long pkg_msr_mask;
|
||||
|
||||
static struct attribute *pkg_events_attrs[PERF_CSTATE_PKG_EVENT_MAX + 1] = {
|
||||
NULL,
|
||||
PMU_EVENT_GROUP(events, cstate_pkg_c2);
|
||||
PMU_EVENT_GROUP(events, cstate_pkg_c3);
|
||||
PMU_EVENT_GROUP(events, cstate_pkg_c6);
|
||||
PMU_EVENT_GROUP(events, cstate_pkg_c7);
|
||||
PMU_EVENT_GROUP(events, cstate_pkg_c8);
|
||||
PMU_EVENT_GROUP(events, cstate_pkg_c9);
|
||||
PMU_EVENT_GROUP(events, cstate_pkg_c10);
|
||||
|
||||
static struct perf_msr pkg_msr[] = {
|
||||
[PERF_CSTATE_PKG_C2_RES] = { MSR_PKG_C2_RESIDENCY, &group_cstate_pkg_c2, test_msr },
|
||||
[PERF_CSTATE_PKG_C3_RES] = { MSR_PKG_C3_RESIDENCY, &group_cstate_pkg_c3, test_msr },
|
||||
[PERF_CSTATE_PKG_C6_RES] = { MSR_PKG_C6_RESIDENCY, &group_cstate_pkg_c6, test_msr },
|
||||
[PERF_CSTATE_PKG_C7_RES] = { MSR_PKG_C7_RESIDENCY, &group_cstate_pkg_c7, test_msr },
|
||||
[PERF_CSTATE_PKG_C8_RES] = { MSR_PKG_C8_RESIDENCY, &group_cstate_pkg_c8, test_msr },
|
||||
[PERF_CSTATE_PKG_C9_RES] = { MSR_PKG_C9_RESIDENCY, &group_cstate_pkg_c9, test_msr },
|
||||
[PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY, &group_cstate_pkg_c10, test_msr },
|
||||
};
|
||||
|
||||
static struct attribute_group pkg_events_attr_group = {
|
||||
.name = "events",
|
||||
.attrs = pkg_events_attrs,
|
||||
.attrs = attrs_empty,
|
||||
};
|
||||
|
||||
DEFINE_CSTATE_FORMAT_ATTR(pkg_event, event, "config:0-63");
|
||||
@ -289,7 +313,8 @@ static int cstate_pmu_event_init(struct perf_event *event)
|
||||
if (event->pmu == &cstate_core_pmu) {
|
||||
if (cfg >= PERF_CSTATE_CORE_EVENT_MAX)
|
||||
return -EINVAL;
|
||||
if (!core_msr[cfg].attr)
|
||||
cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_CORE_EVENT_MAX);
|
||||
if (!(core_msr_mask & (1 << cfg)))
|
||||
return -EINVAL;
|
||||
event->hw.event_base = core_msr[cfg].msr;
|
||||
cpu = cpumask_any_and(&cstate_core_cpu_mask,
|
||||
@ -298,7 +323,7 @@ static int cstate_pmu_event_init(struct perf_event *event)
|
||||
if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
|
||||
return -EINVAL;
|
||||
cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX);
|
||||
if (!pkg_msr[cfg].attr)
|
||||
if (!(pkg_msr_mask & (1 << cfg)))
|
||||
return -EINVAL;
|
||||
event->hw.event_base = pkg_msr[cfg].msr;
|
||||
cpu = cpumask_any_and(&cstate_pkg_cpu_mask,
|
||||
@ -421,8 +446,28 @@ static int cstate_cpu_init(unsigned int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct attribute_group *core_attr_update[] = {
|
||||
&group_cstate_core_c1,
|
||||
&group_cstate_core_c3,
|
||||
&group_cstate_core_c6,
|
||||
&group_cstate_core_c7,
|
||||
NULL,
|
||||
};
|
||||
|
||||
const struct attribute_group *pkg_attr_update[] = {
|
||||
&group_cstate_pkg_c2,
|
||||
&group_cstate_pkg_c3,
|
||||
&group_cstate_pkg_c6,
|
||||
&group_cstate_pkg_c7,
|
||||
&group_cstate_pkg_c8,
|
||||
&group_cstate_pkg_c9,
|
||||
&group_cstate_pkg_c10,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct pmu cstate_core_pmu = {
|
||||
.attr_groups = core_attr_groups,
|
||||
.attr_update = core_attr_update,
|
||||
.name = "cstate_core",
|
||||
.task_ctx_nr = perf_invalid_context,
|
||||
.event_init = cstate_pmu_event_init,
|
||||
@ -437,6 +482,7 @@ static struct pmu cstate_core_pmu = {
|
||||
|
||||
static struct pmu cstate_pkg_pmu = {
|
||||
.attr_groups = pkg_attr_groups,
|
||||
.attr_update = pkg_attr_update,
|
||||
.name = "cstate_pkg",
|
||||
.task_ctx_nr = perf_invalid_context,
|
||||
.event_init = cstate_pmu_event_init,
|
||||
@ -580,35 +626,11 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_MOBILE, snb_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_DESKTOP, snb_cstates),
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
|
||||
|
||||
/*
|
||||
* Probe the cstate events and insert the available one into sysfs attrs
|
||||
* Return false if there are no available events.
|
||||
*/
|
||||
static bool __init cstate_probe_msr(const unsigned long evmsk, int max,
|
||||
struct perf_cstate_msr *msr,
|
||||
struct attribute **attrs)
|
||||
{
|
||||
bool found = false;
|
||||
unsigned int bit;
|
||||
u64 val;
|
||||
|
||||
for (bit = 0; bit < max; bit++) {
|
||||
if (test_bit(bit, &evmsk) && !rdmsrl_safe(msr[bit].msr, &val)) {
|
||||
*attrs++ = &msr[bit].attr->attr.attr;
|
||||
found = true;
|
||||
} else {
|
||||
msr[bit].attr = NULL;
|
||||
}
|
||||
}
|
||||
*attrs = NULL;
|
||||
|
||||
return found;
|
||||
}
|
||||
|
||||
static int __init cstate_probe(const struct cstate_model *cm)
|
||||
{
|
||||
/* SLM has different MSR for PKG C6 */
|
||||
@ -620,13 +642,14 @@ static int __init cstate_probe(const struct cstate_model *cm)
|
||||
pkg_msr[PERF_CSTATE_CORE_C6_RES].msr = MSR_KNL_CORE_C6_RESIDENCY;
|
||||
|
||||
|
||||
has_cstate_core = cstate_probe_msr(cm->core_events,
|
||||
PERF_CSTATE_CORE_EVENT_MAX,
|
||||
core_msr, core_events_attrs);
|
||||
core_msr_mask = perf_msr_probe(core_msr, PERF_CSTATE_CORE_EVENT_MAX,
|
||||
true, (void *) &cm->core_events);
|
||||
|
||||
has_cstate_pkg = cstate_probe_msr(cm->pkg_events,
|
||||
PERF_CSTATE_PKG_EVENT_MAX,
|
||||
pkg_msr, pkg_events_attrs);
|
||||
pkg_msr_mask = perf_msr_probe(pkg_msr, PERF_CSTATE_PKG_EVENT_MAX,
|
||||
true, (void *) &cm->pkg_events);
|
||||
|
||||
has_cstate_core = !!core_msr_mask;
|
||||
has_cstate_pkg = !!pkg_msr_mask;
|
||||
|
||||
return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV;
|
||||
}
|
||||
|
@ -55,27 +55,28 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/intel-family.h>
|
||||
#include "../perf_event.h"
|
||||
#include "../probe.h"
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
/*
|
||||
* RAPL energy status counters
|
||||
*/
|
||||
#define RAPL_IDX_PP0_NRG_STAT 0 /* all cores */
|
||||
#define INTEL_RAPL_PP0 0x1 /* pseudo-encoding */
|
||||
#define RAPL_IDX_PKG_NRG_STAT 1 /* entire package */
|
||||
#define INTEL_RAPL_PKG 0x2 /* pseudo-encoding */
|
||||
#define RAPL_IDX_RAM_NRG_STAT 2 /* DRAM */
|
||||
#define INTEL_RAPL_RAM 0x3 /* pseudo-encoding */
|
||||
#define RAPL_IDX_PP1_NRG_STAT 3 /* gpu */
|
||||
#define INTEL_RAPL_PP1 0x4 /* pseudo-encoding */
|
||||
#define RAPL_IDX_PSYS_NRG_STAT 4 /* psys */
|
||||
#define INTEL_RAPL_PSYS 0x5 /* pseudo-encoding */
|
||||
enum perf_rapl_events {
|
||||
PERF_RAPL_PP0 = 0, /* all cores */
|
||||
PERF_RAPL_PKG, /* entire package */
|
||||
PERF_RAPL_RAM, /* DRAM */
|
||||
PERF_RAPL_PP1, /* gpu */
|
||||
PERF_RAPL_PSYS, /* psys */
|
||||
|
||||
PERF_RAPL_MAX,
|
||||
NR_RAPL_DOMAINS = PERF_RAPL_MAX,
|
||||
};
|
||||
|
||||
#define NR_RAPL_DOMAINS 0x5
|
||||
static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = {
|
||||
"pp0-core",
|
||||
"package",
|
||||
@ -84,33 +85,6 @@ static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = {
|
||||
"psys",
|
||||
};
|
||||
|
||||
/* Clients have PP0, PKG */
|
||||
#define RAPL_IDX_CLN (1<<RAPL_IDX_PP0_NRG_STAT|\
|
||||
1<<RAPL_IDX_PKG_NRG_STAT|\
|
||||
1<<RAPL_IDX_PP1_NRG_STAT)
|
||||
|
||||
/* Servers have PP0, PKG, RAM */
|
||||
#define RAPL_IDX_SRV (1<<RAPL_IDX_PP0_NRG_STAT|\
|
||||
1<<RAPL_IDX_PKG_NRG_STAT|\
|
||||
1<<RAPL_IDX_RAM_NRG_STAT)
|
||||
|
||||
/* Servers have PP0, PKG, RAM, PP1 */
|
||||
#define RAPL_IDX_HSW (1<<RAPL_IDX_PP0_NRG_STAT|\
|
||||
1<<RAPL_IDX_PKG_NRG_STAT|\
|
||||
1<<RAPL_IDX_RAM_NRG_STAT|\
|
||||
1<<RAPL_IDX_PP1_NRG_STAT)
|
||||
|
||||
/* SKL clients have PP0, PKG, RAM, PP1, PSYS */
|
||||
#define RAPL_IDX_SKL_CLN (1<<RAPL_IDX_PP0_NRG_STAT|\
|
||||
1<<RAPL_IDX_PKG_NRG_STAT|\
|
||||
1<<RAPL_IDX_RAM_NRG_STAT|\
|
||||
1<<RAPL_IDX_PP1_NRG_STAT|\
|
||||
1<<RAPL_IDX_PSYS_NRG_STAT)
|
||||
|
||||
/* Knights Landing has PKG, RAM */
|
||||
#define RAPL_IDX_KNL (1<<RAPL_IDX_PKG_NRG_STAT|\
|
||||
1<<RAPL_IDX_RAM_NRG_STAT)
|
||||
|
||||
/*
|
||||
* event code: LSB 8 bits, passed in attr->config
|
||||
* any other bit is reserved
|
||||
@ -153,12 +127,18 @@ struct rapl_pmus {
|
||||
struct rapl_pmu *pmus[];
|
||||
};
|
||||
|
||||
struct rapl_model {
|
||||
unsigned long events;
|
||||
bool apply_quirk;
|
||||
};
|
||||
|
||||
/* 1/2^hw_unit Joule */
|
||||
static int rapl_hw_unit[NR_RAPL_DOMAINS] __read_mostly;
|
||||
static struct rapl_pmus *rapl_pmus;
|
||||
static cpumask_t rapl_cpu_mask;
|
||||
static unsigned int rapl_cntr_mask;
|
||||
static u64 rapl_timer_ms;
|
||||
static struct perf_msr rapl_msrs[];
|
||||
|
||||
static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
|
||||
{
|
||||
@ -350,7 +330,7 @@ static void rapl_pmu_event_del(struct perf_event *event, int flags)
|
||||
static int rapl_pmu_event_init(struct perf_event *event)
|
||||
{
|
||||
u64 cfg = event->attr.config & RAPL_EVENT_MASK;
|
||||
int bit, msr, ret = 0;
|
||||
int bit, ret = 0;
|
||||
struct rapl_pmu *pmu;
|
||||
|
||||
/* only look at RAPL events */
|
||||
@ -366,33 +346,12 @@ static int rapl_pmu_event_init(struct perf_event *event)
|
||||
|
||||
event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
|
||||
|
||||
/*
|
||||
* check event is known (determines counter)
|
||||
*/
|
||||
switch (cfg) {
|
||||
case INTEL_RAPL_PP0:
|
||||
bit = RAPL_IDX_PP0_NRG_STAT;
|
||||
msr = MSR_PP0_ENERGY_STATUS;
|
||||
break;
|
||||
case INTEL_RAPL_PKG:
|
||||
bit = RAPL_IDX_PKG_NRG_STAT;
|
||||
msr = MSR_PKG_ENERGY_STATUS;
|
||||
break;
|
||||
case INTEL_RAPL_RAM:
|
||||
bit = RAPL_IDX_RAM_NRG_STAT;
|
||||
msr = MSR_DRAM_ENERGY_STATUS;
|
||||
break;
|
||||
case INTEL_RAPL_PP1:
|
||||
bit = RAPL_IDX_PP1_NRG_STAT;
|
||||
msr = MSR_PP1_ENERGY_STATUS;
|
||||
break;
|
||||
case INTEL_RAPL_PSYS:
|
||||
bit = RAPL_IDX_PSYS_NRG_STAT;
|
||||
msr = MSR_PLATFORM_ENERGY_STATUS;
|
||||
break;
|
||||
default:
|
||||
if (!cfg || cfg >= NR_RAPL_DOMAINS + 1)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cfg = array_index_nospec((long)cfg, NR_RAPL_DOMAINS + 1);
|
||||
bit = cfg - 1;
|
||||
|
||||
/* check event supported */
|
||||
if (!(rapl_cntr_mask & (1 << bit)))
|
||||
return -EINVAL;
|
||||
@ -407,7 +366,7 @@ static int rapl_pmu_event_init(struct perf_event *event)
|
||||
return -EINVAL;
|
||||
event->cpu = pmu->cpu;
|
||||
event->pmu_private = pmu;
|
||||
event->hw.event_base = msr;
|
||||
event->hw.event_base = rapl_msrs[bit].msr;
|
||||
event->hw.config = cfg;
|
||||
event->hw.idx = bit;
|
||||
|
||||
@ -457,90 +416,18 @@ RAPL_EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890
|
||||
RAPL_EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10");
|
||||
RAPL_EVENT_ATTR_STR(energy-psys.scale, rapl_psys_scale, "2.3283064365386962890625e-10");
|
||||
|
||||
static struct attribute *rapl_events_srv_attr[] = {
|
||||
EVENT_PTR(rapl_cores),
|
||||
EVENT_PTR(rapl_pkg),
|
||||
EVENT_PTR(rapl_ram),
|
||||
|
||||
EVENT_PTR(rapl_cores_unit),
|
||||
EVENT_PTR(rapl_pkg_unit),
|
||||
EVENT_PTR(rapl_ram_unit),
|
||||
|
||||
EVENT_PTR(rapl_cores_scale),
|
||||
EVENT_PTR(rapl_pkg_scale),
|
||||
EVENT_PTR(rapl_ram_scale),
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute *rapl_events_cln_attr[] = {
|
||||
EVENT_PTR(rapl_cores),
|
||||
EVENT_PTR(rapl_pkg),
|
||||
EVENT_PTR(rapl_gpu),
|
||||
|
||||
EVENT_PTR(rapl_cores_unit),
|
||||
EVENT_PTR(rapl_pkg_unit),
|
||||
EVENT_PTR(rapl_gpu_unit),
|
||||
|
||||
EVENT_PTR(rapl_cores_scale),
|
||||
EVENT_PTR(rapl_pkg_scale),
|
||||
EVENT_PTR(rapl_gpu_scale),
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute *rapl_events_hsw_attr[] = {
|
||||
EVENT_PTR(rapl_cores),
|
||||
EVENT_PTR(rapl_pkg),
|
||||
EVENT_PTR(rapl_gpu),
|
||||
EVENT_PTR(rapl_ram),
|
||||
|
||||
EVENT_PTR(rapl_cores_unit),
|
||||
EVENT_PTR(rapl_pkg_unit),
|
||||
EVENT_PTR(rapl_gpu_unit),
|
||||
EVENT_PTR(rapl_ram_unit),
|
||||
|
||||
EVENT_PTR(rapl_cores_scale),
|
||||
EVENT_PTR(rapl_pkg_scale),
|
||||
EVENT_PTR(rapl_gpu_scale),
|
||||
EVENT_PTR(rapl_ram_scale),
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute *rapl_events_skl_attr[] = {
|
||||
EVENT_PTR(rapl_cores),
|
||||
EVENT_PTR(rapl_pkg),
|
||||
EVENT_PTR(rapl_gpu),
|
||||
EVENT_PTR(rapl_ram),
|
||||
EVENT_PTR(rapl_psys),
|
||||
|
||||
EVENT_PTR(rapl_cores_unit),
|
||||
EVENT_PTR(rapl_pkg_unit),
|
||||
EVENT_PTR(rapl_gpu_unit),
|
||||
EVENT_PTR(rapl_ram_unit),
|
||||
EVENT_PTR(rapl_psys_unit),
|
||||
|
||||
EVENT_PTR(rapl_cores_scale),
|
||||
EVENT_PTR(rapl_pkg_scale),
|
||||
EVENT_PTR(rapl_gpu_scale),
|
||||
EVENT_PTR(rapl_ram_scale),
|
||||
EVENT_PTR(rapl_psys_scale),
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute *rapl_events_knl_attr[] = {
|
||||
EVENT_PTR(rapl_pkg),
|
||||
EVENT_PTR(rapl_ram),
|
||||
|
||||
EVENT_PTR(rapl_pkg_unit),
|
||||
EVENT_PTR(rapl_ram_unit),
|
||||
|
||||
EVENT_PTR(rapl_pkg_scale),
|
||||
EVENT_PTR(rapl_ram_scale),
|
||||
/*
|
||||
* There are no default events, but we need to create
|
||||
* "events" group (with empty attrs) before updating
|
||||
* it with detected events.
|
||||
*/
|
||||
static struct attribute *attrs_empty[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group rapl_pmu_events_group = {
|
||||
.name = "events",
|
||||
.attrs = NULL, /* patched at runtime */
|
||||
.attrs = attrs_empty,
|
||||
};
|
||||
|
||||
DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7");
|
||||
@ -561,6 +448,79 @@ static const struct attribute_group *rapl_attr_groups[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute *rapl_events_cores[] = {
|
||||
EVENT_PTR(rapl_cores),
|
||||
EVENT_PTR(rapl_cores_unit),
|
||||
EVENT_PTR(rapl_cores_scale),
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group rapl_events_cores_group = {
|
||||
.name = "events",
|
||||
.attrs = rapl_events_cores,
|
||||
};
|
||||
|
||||
static struct attribute *rapl_events_pkg[] = {
|
||||
EVENT_PTR(rapl_pkg),
|
||||
EVENT_PTR(rapl_pkg_unit),
|
||||
EVENT_PTR(rapl_pkg_scale),
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group rapl_events_pkg_group = {
|
||||
.name = "events",
|
||||
.attrs = rapl_events_pkg,
|
||||
};
|
||||
|
||||
static struct attribute *rapl_events_ram[] = {
|
||||
EVENT_PTR(rapl_ram),
|
||||
EVENT_PTR(rapl_ram_unit),
|
||||
EVENT_PTR(rapl_ram_scale),
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group rapl_events_ram_group = {
|
||||
.name = "events",
|
||||
.attrs = rapl_events_ram,
|
||||
};
|
||||
|
||||
static struct attribute *rapl_events_gpu[] = {
|
||||
EVENT_PTR(rapl_gpu),
|
||||
EVENT_PTR(rapl_gpu_unit),
|
||||
EVENT_PTR(rapl_gpu_scale),
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group rapl_events_gpu_group = {
|
||||
.name = "events",
|
||||
.attrs = rapl_events_gpu,
|
||||
};
|
||||
|
||||
static struct attribute *rapl_events_psys[] = {
|
||||
EVENT_PTR(rapl_psys),
|
||||
EVENT_PTR(rapl_psys_unit),
|
||||
EVENT_PTR(rapl_psys_scale),
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group rapl_events_psys_group = {
|
||||
.name = "events",
|
||||
.attrs = rapl_events_psys,
|
||||
};
|
||||
|
||||
static bool test_msr(int idx, void *data)
|
||||
{
|
||||
return test_bit(idx, (unsigned long *) data);
|
||||
}
|
||||
|
||||
static struct perf_msr rapl_msrs[] = {
|
||||
[PERF_RAPL_PP0] = { MSR_PP0_ENERGY_STATUS, &rapl_events_cores_group, test_msr },
|
||||
[PERF_RAPL_PKG] = { MSR_PKG_ENERGY_STATUS, &rapl_events_pkg_group, test_msr },
|
||||
[PERF_RAPL_RAM] = { MSR_DRAM_ENERGY_STATUS, &rapl_events_ram_group, test_msr },
|
||||
[PERF_RAPL_PP1] = { MSR_PP1_ENERGY_STATUS, &rapl_events_gpu_group, test_msr },
|
||||
[PERF_RAPL_PSYS] = { MSR_PLATFORM_ENERGY_STATUS, &rapl_events_psys_group, test_msr },
|
||||
};
|
||||
|
||||
static int rapl_cpu_offline(unsigned int cpu)
|
||||
{
|
||||
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
|
||||
@ -633,7 +593,7 @@ static int rapl_check_hw_unit(bool apply_quirk)
|
||||
* of 2. Datasheet, September 2014, Reference Number: 330784-001 "
|
||||
*/
|
||||
if (apply_quirk)
|
||||
rapl_hw_unit[RAPL_IDX_RAM_NRG_STAT] = 16;
|
||||
rapl_hw_unit[PERF_RAPL_RAM] = 16;
|
||||
|
||||
/*
|
||||
* Calculate the timer rate:
|
||||
@ -674,6 +634,15 @@ static void cleanup_rapl_pmus(void)
|
||||
kfree(rapl_pmus);
|
||||
}
|
||||
|
||||
const struct attribute_group *rapl_attr_update[] = {
|
||||
&rapl_events_cores_group,
|
||||
&rapl_events_pkg_group,
|
||||
&rapl_events_ram_group,
|
||||
&rapl_events_gpu_group,
|
||||
&rapl_events_gpu_group,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static int __init init_rapl_pmus(void)
|
||||
{
|
||||
int maxdie = topology_max_packages() * topology_max_die_per_package();
|
||||
@ -686,6 +655,7 @@ static int __init init_rapl_pmus(void)
|
||||
|
||||
rapl_pmus->maxdie = maxdie;
|
||||
rapl_pmus->pmu.attr_groups = rapl_attr_groups;
|
||||
rapl_pmus->pmu.attr_update = rapl_attr_update;
|
||||
rapl_pmus->pmu.task_ctx_nr = perf_invalid_context;
|
||||
rapl_pmus->pmu.event_init = rapl_pmu_event_init;
|
||||
rapl_pmus->pmu.add = rapl_pmu_event_add;
|
||||
@ -701,105 +671,96 @@ static int __init init_rapl_pmus(void)
|
||||
#define X86_RAPL_MODEL_MATCH(model, init) \
|
||||
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
|
||||
|
||||
struct intel_rapl_init_fun {
|
||||
bool apply_quirk;
|
||||
int cntr_mask;
|
||||
struct attribute **attrs;
|
||||
static struct rapl_model model_snb = {
|
||||
.events = BIT(PERF_RAPL_PP0) |
|
||||
BIT(PERF_RAPL_PKG) |
|
||||
BIT(PERF_RAPL_PP1),
|
||||
.apply_quirk = false,
|
||||
};
|
||||
|
||||
static const struct intel_rapl_init_fun snb_rapl_init __initconst = {
|
||||
.apply_quirk = false,
|
||||
.cntr_mask = RAPL_IDX_CLN,
|
||||
.attrs = rapl_events_cln_attr,
|
||||
static struct rapl_model model_snbep = {
|
||||
.events = BIT(PERF_RAPL_PP0) |
|
||||
BIT(PERF_RAPL_PKG) |
|
||||
BIT(PERF_RAPL_RAM),
|
||||
.apply_quirk = false,
|
||||
};
|
||||
|
||||
static const struct intel_rapl_init_fun hsx_rapl_init __initconst = {
|
||||
.apply_quirk = true,
|
||||
.cntr_mask = RAPL_IDX_SRV,
|
||||
.attrs = rapl_events_srv_attr,
|
||||
static struct rapl_model model_hsw = {
|
||||
.events = BIT(PERF_RAPL_PP0) |
|
||||
BIT(PERF_RAPL_PKG) |
|
||||
BIT(PERF_RAPL_RAM) |
|
||||
BIT(PERF_RAPL_PP1),
|
||||
.apply_quirk = false,
|
||||
};
|
||||
|
||||
static const struct intel_rapl_init_fun hsw_rapl_init __initconst = {
|
||||
.apply_quirk = false,
|
||||
.cntr_mask = RAPL_IDX_HSW,
|
||||
.attrs = rapl_events_hsw_attr,
|
||||
static struct rapl_model model_hsx = {
|
||||
.events = BIT(PERF_RAPL_PP0) |
|
||||
BIT(PERF_RAPL_PKG) |
|
||||
BIT(PERF_RAPL_RAM),
|
||||
.apply_quirk = true,
|
||||
};
|
||||
|
||||
static const struct intel_rapl_init_fun snbep_rapl_init __initconst = {
|
||||
.apply_quirk = false,
|
||||
.cntr_mask = RAPL_IDX_SRV,
|
||||
.attrs = rapl_events_srv_attr,
|
||||
static struct rapl_model model_knl = {
|
||||
.events = BIT(PERF_RAPL_PKG) |
|
||||
BIT(PERF_RAPL_RAM),
|
||||
.apply_quirk = true,
|
||||
};
|
||||
|
||||
static const struct intel_rapl_init_fun knl_rapl_init __initconst = {
|
||||
.apply_quirk = true,
|
||||
.cntr_mask = RAPL_IDX_KNL,
|
||||
.attrs = rapl_events_knl_attr,
|
||||
static struct rapl_model model_skl = {
|
||||
.events = BIT(PERF_RAPL_PP0) |
|
||||
BIT(PERF_RAPL_PKG) |
|
||||
BIT(PERF_RAPL_RAM) |
|
||||
BIT(PERF_RAPL_PP1) |
|
||||
BIT(PERF_RAPL_PSYS),
|
||||
.apply_quirk = false,
|
||||
};
|
||||
|
||||
static const struct intel_rapl_init_fun skl_rapl_init __initconst = {
|
||||
.apply_quirk = false,
|
||||
.cntr_mask = RAPL_IDX_SKL_CLN,
|
||||
.attrs = rapl_events_skl_attr,
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, snb_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, snbep_rapl_init),
|
||||
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, snb_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, snbep_rapl_init),
|
||||
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hsx_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_rapl_init),
|
||||
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, hsw_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, hsw_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, hsx_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsx_rapl_init),
|
||||
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_rapl_init),
|
||||
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP, skl_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, hsx_rapl_init),
|
||||
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_rapl_init),
|
||||
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_CANNONLAKE_MOBILE, skl_rapl_init),
|
||||
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, hsw_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_X, hsw_rapl_init),
|
||||
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, hsw_rapl_init),
|
||||
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, skl_rapl_init),
|
||||
static const struct x86_cpu_id rapl_model_match[] __initconst = {
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, model_snb),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, model_snbep),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, model_snb),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, model_snbep),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, model_hsw),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X, model_hsx),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, model_hsw),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, model_hsw),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, model_hsw),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, model_hsw),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, model_hsx),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, model_hsx),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, model_knl),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, model_knl),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, model_skl),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP, model_skl),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, model_hsx),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, model_skl),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, model_skl),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_CANNONLAKE_MOBILE, model_skl),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, model_hsw),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_X, model_hsw),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, model_hsw),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, model_skl),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_DESKTOP, model_skl),
|
||||
{},
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(x86cpu, rapl_cpu_match);
|
||||
MODULE_DEVICE_TABLE(x86cpu, rapl_model_match);
|
||||
|
||||
static int __init rapl_pmu_init(void)
|
||||
{
|
||||
const struct x86_cpu_id *id;
|
||||
struct intel_rapl_init_fun *rapl_init;
|
||||
bool apply_quirk;
|
||||
struct rapl_model *rm;
|
||||
int ret;
|
||||
|
||||
id = x86_match_cpu(rapl_cpu_match);
|
||||
id = x86_match_cpu(rapl_model_match);
|
||||
if (!id)
|
||||
return -ENODEV;
|
||||
|
||||
rapl_init = (struct intel_rapl_init_fun *)id->driver_data;
|
||||
apply_quirk = rapl_init->apply_quirk;
|
||||
rapl_cntr_mask = rapl_init->cntr_mask;
|
||||
rapl_pmu_events_group.attrs = rapl_init->attrs;
|
||||
rm = (struct rapl_model *) id->driver_data;
|
||||
rapl_cntr_mask = perf_msr_probe(rapl_msrs, PERF_RAPL_MAX,
|
||||
false, (void *) &rm->events);
|
||||
|
||||
ret = rapl_check_hw_unit(apply_quirk);
|
||||
ret = rapl_check_hw_unit(rm->apply_quirk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -8,6 +8,7 @@
|
||||
static struct intel_uncore_type *empty_uncore[] = { NULL, };
|
||||
struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
|
||||
struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
|
||||
struct intel_uncore_type **uncore_mmio_uncores = empty_uncore;
|
||||
|
||||
static bool pcidrv_registered;
|
||||
struct pci_driver *uncore_pci_driver;
|
||||
@ -28,7 +29,7 @@ struct event_constraint uncore_constraint_empty =
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
static int uncore_pcibus_to_physid(struct pci_bus *bus)
|
||||
int uncore_pcibus_to_physid(struct pci_bus *bus)
|
||||
{
|
||||
struct pci2phy_map *map;
|
||||
int phys_id = -1;
|
||||
@ -119,6 +120,21 @@ u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *eve
|
||||
return count;
|
||||
}
|
||||
|
||||
void uncore_mmio_exit_box(struct intel_uncore_box *box)
|
||||
{
|
||||
if (box->io_addr)
|
||||
iounmap(box->io_addr);
|
||||
}
|
||||
|
||||
u64 uncore_mmio_read_counter(struct intel_uncore_box *box,
|
||||
struct perf_event *event)
|
||||
{
|
||||
if (!box->io_addr)
|
||||
return 0;
|
||||
|
||||
return readq(box->io_addr + event->hw.event_base);
|
||||
}
|
||||
|
||||
/*
|
||||
* generic get constraint function for shared match/mask registers.
|
||||
*/
|
||||
@ -1143,12 +1159,27 @@ static void uncore_change_context(struct intel_uncore_type **uncores,
|
||||
uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
|
||||
}
|
||||
|
||||
static int uncore_event_cpu_offline(unsigned int cpu)
|
||||
static void uncore_box_unref(struct intel_uncore_type **types, int id)
|
||||
{
|
||||
struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
||||
struct intel_uncore_type *type;
|
||||
struct intel_uncore_pmu *pmu;
|
||||
struct intel_uncore_box *box;
|
||||
int i, die, target;
|
||||
int i;
|
||||
|
||||
for (; *types; types++) {
|
||||
type = *types;
|
||||
pmu = type->pmus;
|
||||
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
||||
box = pmu->boxes[id];
|
||||
if (box && atomic_dec_return(&box->refcnt) == 0)
|
||||
uncore_box_exit(box);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int uncore_event_cpu_offline(unsigned int cpu)
|
||||
{
|
||||
int die, target;
|
||||
|
||||
/* Check if exiting cpu is used for collecting uncore events */
|
||||
if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
|
||||
@ -1163,20 +1194,14 @@ static int uncore_event_cpu_offline(unsigned int cpu)
|
||||
target = -1;
|
||||
|
||||
uncore_change_context(uncore_msr_uncores, cpu, target);
|
||||
uncore_change_context(uncore_mmio_uncores, cpu, target);
|
||||
uncore_change_context(uncore_pci_uncores, cpu, target);
|
||||
|
||||
unref:
|
||||
/* Clear the references */
|
||||
die = topology_logical_die_id(cpu);
|
||||
for (; *types; types++) {
|
||||
type = *types;
|
||||
pmu = type->pmus;
|
||||
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
||||
box = pmu->boxes[die];
|
||||
if (box && atomic_dec_return(&box->refcnt) == 0)
|
||||
uncore_box_exit(box);
|
||||
}
|
||||
}
|
||||
uncore_box_unref(uncore_msr_uncores, die);
|
||||
uncore_box_unref(uncore_mmio_uncores, die);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1219,15 +1244,15 @@ cleanup:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int uncore_event_cpu_online(unsigned int cpu)
|
||||
static int uncore_box_ref(struct intel_uncore_type **types,
|
||||
int id, unsigned int cpu)
|
||||
{
|
||||
struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
||||
struct intel_uncore_type *type;
|
||||
struct intel_uncore_pmu *pmu;
|
||||
struct intel_uncore_box *box;
|
||||
int i, ret, die, target;
|
||||
int i, ret;
|
||||
|
||||
die = topology_logical_die_id(cpu);
|
||||
ret = allocate_boxes(types, die, cpu);
|
||||
ret = allocate_boxes(types, id, cpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1235,11 +1260,23 @@ static int uncore_event_cpu_online(unsigned int cpu)
|
||||
type = *types;
|
||||
pmu = type->pmus;
|
||||
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
||||
box = pmu->boxes[die];
|
||||
box = pmu->boxes[id];
|
||||
if (box && atomic_inc_return(&box->refcnt) == 1)
|
||||
uncore_box_init(box);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int uncore_event_cpu_online(unsigned int cpu)
|
||||
{
|
||||
int die, target, msr_ret, mmio_ret;
|
||||
|
||||
die = topology_logical_die_id(cpu);
|
||||
msr_ret = uncore_box_ref(uncore_msr_uncores, die, cpu);
|
||||
mmio_ret = uncore_box_ref(uncore_mmio_uncores, die, cpu);
|
||||
if (msr_ret && mmio_ret)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Check if there is an online cpu in the package
|
||||
@ -1251,7 +1288,10 @@ static int uncore_event_cpu_online(unsigned int cpu)
|
||||
|
||||
cpumask_set_cpu(cpu, &uncore_cpu_mask);
|
||||
|
||||
uncore_change_context(uncore_msr_uncores, -1, cpu);
|
||||
if (!msr_ret)
|
||||
uncore_change_context(uncore_msr_uncores, -1, cpu);
|
||||
if (!mmio_ret)
|
||||
uncore_change_context(uncore_mmio_uncores, -1, cpu);
|
||||
uncore_change_context(uncore_pci_uncores, -1, cpu);
|
||||
return 0;
|
||||
}
|
||||
@ -1299,12 +1339,35 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __init uncore_mmio_init(void)
|
||||
{
|
||||
struct intel_uncore_type **types = uncore_mmio_uncores;
|
||||
int ret;
|
||||
|
||||
ret = uncore_types_init(types, true);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
for (; *types; types++) {
|
||||
ret = type_pmu_register(*types);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
return 0;
|
||||
err:
|
||||
uncore_types_exit(uncore_mmio_uncores);
|
||||
uncore_mmio_uncores = empty_uncore;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
#define X86_UNCORE_MODEL_MATCH(model, init) \
|
||||
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
|
||||
|
||||
struct intel_uncore_init_fun {
|
||||
void (*cpu_init)(void);
|
||||
int (*pci_init)(void);
|
||||
void (*mmio_init)(void);
|
||||
};
|
||||
|
||||
static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
|
||||
@ -1375,6 +1438,12 @@ static const struct intel_uncore_init_fun icl_uncore_init __initconst = {
|
||||
.pci_init = skl_uncore_pci_init,
|
||||
};
|
||||
|
||||
static const struct intel_uncore_init_fun snr_uncore_init __initconst = {
|
||||
.cpu_init = snr_uncore_cpu_init,
|
||||
.pci_init = snr_uncore_pci_init,
|
||||
.mmio_init = snr_uncore_mmio_init,
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id intel_uncore_match[] __initconst = {
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP, nhm_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM, nhm_uncore_init),
|
||||
@ -1403,6 +1472,8 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, icl_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_NNPI, icl_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_DESKTOP, icl_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ATOM_TREMONT_X, snr_uncore_init),
|
||||
{},
|
||||
};
|
||||
|
||||
@ -1412,7 +1483,7 @@ static int __init intel_uncore_init(void)
|
||||
{
|
||||
const struct x86_cpu_id *id;
|
||||
struct intel_uncore_init_fun *uncore_init;
|
||||
int pret = 0, cret = 0, ret;
|
||||
int pret = 0, cret = 0, mret = 0, ret;
|
||||
|
||||
id = x86_match_cpu(intel_uncore_match);
|
||||
if (!id)
|
||||
@ -1435,7 +1506,12 @@ static int __init intel_uncore_init(void)
|
||||
cret = uncore_cpu_init();
|
||||
}
|
||||
|
||||
if (cret && pret)
|
||||
if (uncore_init->mmio_init) {
|
||||
uncore_init->mmio_init();
|
||||
mret = uncore_mmio_init();
|
||||
}
|
||||
|
||||
if (cret && pret && mret)
|
||||
return -ENODEV;
|
||||
|
||||
/* Install hotplug callbacks to setup the targets for each package */
|
||||
@ -1449,6 +1525,7 @@ static int __init intel_uncore_init(void)
|
||||
|
||||
err:
|
||||
uncore_types_exit(uncore_msr_uncores);
|
||||
uncore_types_exit(uncore_mmio_uncores);
|
||||
uncore_pci_exit();
|
||||
return ret;
|
||||
}
|
||||
@ -1458,6 +1535,7 @@ static void __exit intel_uncore_exit(void)
|
||||
{
|
||||
cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
|
||||
uncore_types_exit(uncore_msr_uncores);
|
||||
uncore_types_exit(uncore_mmio_uncores);
|
||||
uncore_pci_exit();
|
||||
}
|
||||
module_exit(intel_uncore_exit);
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pci.h>
|
||||
#include <asm/apicdef.h>
|
||||
#include <linux/io-64-nonatomic-lo-hi.h>
|
||||
|
||||
#include <linux/perf_event.h>
|
||||
#include "../perf_event.h"
|
||||
@ -56,7 +57,10 @@ struct intel_uncore_type {
|
||||
unsigned fixed_ctr;
|
||||
unsigned fixed_ctl;
|
||||
unsigned box_ctl;
|
||||
unsigned msr_offset;
|
||||
union {
|
||||
unsigned msr_offset;
|
||||
unsigned mmio_offset;
|
||||
};
|
||||
unsigned num_shared_regs:8;
|
||||
unsigned single_fixed:1;
|
||||
unsigned pair_ctr_ctl:1;
|
||||
@ -125,7 +129,7 @@ struct intel_uncore_box {
|
||||
struct hrtimer hrtimer;
|
||||
struct list_head list;
|
||||
struct list_head active_list;
|
||||
void *io_addr;
|
||||
void __iomem *io_addr;
|
||||
struct intel_uncore_extra_reg shared_regs[0];
|
||||
};
|
||||
|
||||
@ -159,6 +163,7 @@ struct pci2phy_map {
|
||||
};
|
||||
|
||||
struct pci2phy_map *__find_pci2phy_map(int segment);
|
||||
int uncore_pcibus_to_physid(struct pci_bus *bus);
|
||||
|
||||
ssize_t uncore_event_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf);
|
||||
@ -190,6 +195,13 @@ static inline bool uncore_pmc_freerunning(int idx)
|
||||
return idx == UNCORE_PMC_IDX_FREERUNNING;
|
||||
}
|
||||
|
||||
static inline
|
||||
unsigned int uncore_mmio_box_ctl(struct intel_uncore_box *box)
|
||||
{
|
||||
return box->pmu->type->box_ctl +
|
||||
box->pmu->type->mmio_offset * box->pmu->pmu_idx;
|
||||
}
|
||||
|
||||
static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
|
||||
{
|
||||
return box->pmu->type->box_ctl;
|
||||
@ -330,7 +342,7 @@ unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
|
||||
static inline
|
||||
unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
|
||||
{
|
||||
if (box->pci_dev)
|
||||
if (box->pci_dev || box->io_addr)
|
||||
return uncore_pci_fixed_ctl(box);
|
||||
else
|
||||
return uncore_msr_fixed_ctl(box);
|
||||
@ -339,7 +351,7 @@ unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
|
||||
static inline
|
||||
unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
|
||||
{
|
||||
if (box->pci_dev)
|
||||
if (box->pci_dev || box->io_addr)
|
||||
return uncore_pci_fixed_ctr(box);
|
||||
else
|
||||
return uncore_msr_fixed_ctr(box);
|
||||
@ -348,7 +360,7 @@ unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
|
||||
static inline
|
||||
unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
|
||||
{
|
||||
if (box->pci_dev)
|
||||
if (box->pci_dev || box->io_addr)
|
||||
return uncore_pci_event_ctl(box, idx);
|
||||
else
|
||||
return uncore_msr_event_ctl(box, idx);
|
||||
@ -357,7 +369,7 @@ unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
|
||||
static inline
|
||||
unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
|
||||
{
|
||||
if (box->pci_dev)
|
||||
if (box->pci_dev || box->io_addr)
|
||||
return uncore_pci_perf_ctr(box, idx);
|
||||
else
|
||||
return uncore_msr_perf_ctr(box, idx);
|
||||
@ -419,6 +431,16 @@ static inline bool is_freerunning_event(struct perf_event *event)
|
||||
(((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START);
|
||||
}
|
||||
|
||||
/* Check and reject invalid config */
|
||||
static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box,
|
||||
struct perf_event *event)
|
||||
{
|
||||
if (is_freerunning_event(event))
|
||||
return 0;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void uncore_disable_box(struct intel_uncore_box *box)
|
||||
{
|
||||
if (box->pmu->type->ops->disable_box)
|
||||
@ -482,6 +504,9 @@ static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *ev
|
||||
|
||||
struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
|
||||
u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event);
|
||||
void uncore_mmio_exit_box(struct intel_uncore_box *box);
|
||||
u64 uncore_mmio_read_counter(struct intel_uncore_box *box,
|
||||
struct perf_event *event);
|
||||
void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
|
||||
void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
|
||||
void uncore_pmu_event_start(struct perf_event *event, int flags);
|
||||
@ -497,6 +522,7 @@ u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
|
||||
|
||||
extern struct intel_uncore_type **uncore_msr_uncores;
|
||||
extern struct intel_uncore_type **uncore_pci_uncores;
|
||||
extern struct intel_uncore_type **uncore_mmio_uncores;
|
||||
extern struct pci_driver *uncore_pci_driver;
|
||||
extern raw_spinlock_t pci2phy_map_lock;
|
||||
extern struct list_head pci2phy_map_head;
|
||||
@ -528,6 +554,9 @@ int knl_uncore_pci_init(void);
|
||||
void knl_uncore_cpu_init(void);
|
||||
int skx_uncore_pci_init(void);
|
||||
void skx_uncore_cpu_init(void);
|
||||
int snr_uncore_pci_init(void);
|
||||
void snr_uncore_cpu_init(void);
|
||||
void snr_uncore_mmio_init(void);
|
||||
|
||||
/* uncore_nhmex.c */
|
||||
void nhmex_uncore_cpu_init(void);
|
||||
|
@ -3,27 +3,29 @@
|
||||
#include "uncore.h"
|
||||
|
||||
/* Uncore IMC PCI IDs */
|
||||
#define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
|
||||
#define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
|
||||
#define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
|
||||
#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
|
||||
#define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04
|
||||
#define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604
|
||||
#define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x1904
|
||||
#define PCI_DEVICE_ID_INTEL_SKL_Y_IMC 0x190c
|
||||
#define PCI_DEVICE_ID_INTEL_SKL_HD_IMC 0x1900
|
||||
#define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910
|
||||
#define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f
|
||||
#define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f
|
||||
#define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c
|
||||
#define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904
|
||||
#define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914
|
||||
#define PCI_DEVICE_ID_INTEL_KBL_SD_IMC 0x590f
|
||||
#define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC 0x591f
|
||||
#define PCI_DEVICE_ID_INTEL_CFL_2U_IMC 0x3ecc
|
||||
#define PCI_DEVICE_ID_INTEL_CFL_4U_IMC 0x3ed0
|
||||
#define PCI_DEVICE_ID_INTEL_CFL_4H_IMC 0x3e10
|
||||
#define PCI_DEVICE_ID_INTEL_CFL_6H_IMC 0x3ec4
|
||||
#define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
|
||||
#define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
|
||||
#define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
|
||||
#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
|
||||
#define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04
|
||||
#define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604
|
||||
#define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x1904
|
||||
#define PCI_DEVICE_ID_INTEL_SKL_Y_IMC 0x190c
|
||||
#define PCI_DEVICE_ID_INTEL_SKL_HD_IMC 0x1900
|
||||
#define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910
|
||||
#define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f
|
||||
#define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f
|
||||
#define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c
|
||||
#define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904
|
||||
#define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914
|
||||
#define PCI_DEVICE_ID_INTEL_KBL_SD_IMC 0x590f
|
||||
#define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC 0x591f
|
||||
#define PCI_DEVICE_ID_INTEL_KBL_HQ_IMC 0x5910
|
||||
#define PCI_DEVICE_ID_INTEL_KBL_WQ_IMC 0x5918
|
||||
#define PCI_DEVICE_ID_INTEL_CFL_2U_IMC 0x3ecc
|
||||
#define PCI_DEVICE_ID_INTEL_CFL_4U_IMC 0x3ed0
|
||||
#define PCI_DEVICE_ID_INTEL_CFL_4H_IMC 0x3e10
|
||||
#define PCI_DEVICE_ID_INTEL_CFL_6H_IMC 0x3ec4
|
||||
#define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC 0x3e0f
|
||||
#define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC 0x3e1f
|
||||
#define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC 0x3ec2
|
||||
@ -34,9 +36,15 @@
|
||||
#define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33
|
||||
#define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca
|
||||
#define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32
|
||||
#define PCI_DEVICE_ID_INTEL_AML_YD_IMC 0x590c
|
||||
#define PCI_DEVICE_ID_INTEL_AML_YQ_IMC 0x590d
|
||||
#define PCI_DEVICE_ID_INTEL_WHL_UQ_IMC 0x3ed0
|
||||
#define PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC 0x3e34
|
||||
#define PCI_DEVICE_ID_INTEL_WHL_UD_IMC 0x3e35
|
||||
#define PCI_DEVICE_ID_INTEL_ICL_U_IMC 0x8a02
|
||||
#define PCI_DEVICE_ID_INTEL_ICL_U2_IMC 0x8a12
|
||||
|
||||
|
||||
/* SNB event control */
|
||||
#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
|
||||
#define SNB_UNC_CTL_UMASK_MASK 0x0000ff00
|
||||
@ -420,11 +428,6 @@ static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
|
||||
box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
|
||||
}
|
||||
|
||||
static void snb_uncore_imc_exit_box(struct intel_uncore_box *box)
|
||||
{
|
||||
iounmap(box->io_addr);
|
||||
}
|
||||
|
||||
static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
|
||||
{}
|
||||
|
||||
@ -437,13 +440,6 @@ static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct per
|
||||
static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
|
||||
{}
|
||||
|
||||
static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
return (u64)*(unsigned int *)(box->io_addr + hwc->event_base);
|
||||
}
|
||||
|
||||
/*
|
||||
* Keep the custom event_init() function compatible with old event
|
||||
* encoding for free running counters.
|
||||
@ -570,13 +566,13 @@ static struct pmu snb_uncore_imc_pmu = {
|
||||
|
||||
static struct intel_uncore_ops snb_uncore_imc_ops = {
|
||||
.init_box = snb_uncore_imc_init_box,
|
||||
.exit_box = snb_uncore_imc_exit_box,
|
||||
.exit_box = uncore_mmio_exit_box,
|
||||
.enable_box = snb_uncore_imc_enable_box,
|
||||
.disable_box = snb_uncore_imc_disable_box,
|
||||
.disable_event = snb_uncore_imc_disable_event,
|
||||
.enable_event = snb_uncore_imc_enable_event,
|
||||
.hw_config = snb_uncore_imc_hw_config,
|
||||
.read_counter = snb_uncore_imc_read_counter,
|
||||
.read_counter = uncore_mmio_read_counter,
|
||||
};
|
||||
|
||||
static struct intel_uncore_type snb_uncore_imc = {
|
||||
@ -681,6 +677,14 @@ static const struct pci_device_id skl_uncore_pci_ids[] = {
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC),
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||
},
|
||||
{ /* IMC */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_HQ_IMC),
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||
},
|
||||
{ /* IMC */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_WQ_IMC),
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||
},
|
||||
{ /* IMC */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC),
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||
@ -737,6 +741,26 @@ static const struct pci_device_id skl_uncore_pci_ids[] = {
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC),
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||
},
|
||||
{ /* IMC */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AML_YD_IMC),
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||
},
|
||||
{ /* IMC */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AML_YQ_IMC),
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||
},
|
||||
{ /* IMC */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_UQ_IMC),
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||
},
|
||||
{ /* IMC */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC),
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||
},
|
||||
{ /* IMC */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_UD_IMC),
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||
},
|
||||
{ /* end: all zeroes */ },
|
||||
};
|
||||
|
||||
@ -807,6 +831,8 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
|
||||
IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */
|
||||
IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Dual Core */
|
||||
IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Quad Core */
|
||||
IMC_DEV(KBL_HQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core H Quad Core */
|
||||
IMC_DEV(KBL_WQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S 4 cores Work Station */
|
||||
IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 2 Cores */
|
||||
IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 4 Cores */
|
||||
IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 4 Cores */
|
||||
@ -821,6 +847,11 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
|
||||
IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */
|
||||
IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */
|
||||
IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */
|
||||
IMC_DEV(AML_YD_IMC, &skl_uncore_pci_driver), /* 8th Gen Core Y Mobile Dual Core */
|
||||
IMC_DEV(AML_YQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core Y Mobile Quad Core */
|
||||
IMC_DEV(WHL_UQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Quad Core */
|
||||
IMC_DEV(WHL_4_UQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Quad Core */
|
||||
IMC_DEV(WHL_UD_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Dual Core */
|
||||
IMC_DEV(ICL_U_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */
|
||||
IMC_DEV(ICL_U2_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */
|
||||
{ /* end marker */ }
|
||||
|
@ -324,12 +324,77 @@
|
||||
#define SKX_M2M_PCI_PMON_CTR0 0x200
|
||||
#define SKX_M2M_PCI_PMON_BOX_CTL 0x258
|
||||
|
||||
/* SNR Ubox */
|
||||
#define SNR_U_MSR_PMON_CTR0 0x1f98
|
||||
#define SNR_U_MSR_PMON_CTL0 0x1f91
|
||||
#define SNR_U_MSR_PMON_UCLK_FIXED_CTL 0x1f93
|
||||
#define SNR_U_MSR_PMON_UCLK_FIXED_CTR 0x1f94
|
||||
|
||||
/* SNR CHA */
|
||||
#define SNR_CHA_RAW_EVENT_MASK_EXT 0x3ffffff
|
||||
#define SNR_CHA_MSR_PMON_CTL0 0x1c01
|
||||
#define SNR_CHA_MSR_PMON_CTR0 0x1c08
|
||||
#define SNR_CHA_MSR_PMON_BOX_CTL 0x1c00
|
||||
#define SNR_C0_MSR_PMON_BOX_FILTER0 0x1c05
|
||||
|
||||
|
||||
/* SNR IIO */
|
||||
#define SNR_IIO_MSR_PMON_CTL0 0x1e08
|
||||
#define SNR_IIO_MSR_PMON_CTR0 0x1e01
|
||||
#define SNR_IIO_MSR_PMON_BOX_CTL 0x1e00
|
||||
#define SNR_IIO_MSR_OFFSET 0x10
|
||||
#define SNR_IIO_PMON_RAW_EVENT_MASK_EXT 0x7ffff
|
||||
|
||||
/* SNR IRP */
|
||||
#define SNR_IRP0_MSR_PMON_CTL0 0x1ea8
|
||||
#define SNR_IRP0_MSR_PMON_CTR0 0x1ea1
|
||||
#define SNR_IRP0_MSR_PMON_BOX_CTL 0x1ea0
|
||||
#define SNR_IRP_MSR_OFFSET 0x10
|
||||
|
||||
/* SNR M2PCIE */
|
||||
#define SNR_M2PCIE_MSR_PMON_CTL0 0x1e58
|
||||
#define SNR_M2PCIE_MSR_PMON_CTR0 0x1e51
|
||||
#define SNR_M2PCIE_MSR_PMON_BOX_CTL 0x1e50
|
||||
#define SNR_M2PCIE_MSR_OFFSET 0x10
|
||||
|
||||
/* SNR PCU */
|
||||
#define SNR_PCU_MSR_PMON_CTL0 0x1ef1
|
||||
#define SNR_PCU_MSR_PMON_CTR0 0x1ef8
|
||||
#define SNR_PCU_MSR_PMON_BOX_CTL 0x1ef0
|
||||
#define SNR_PCU_MSR_PMON_BOX_FILTER 0x1efc
|
||||
|
||||
/* SNR M2M */
|
||||
#define SNR_M2M_PCI_PMON_CTL0 0x468
|
||||
#define SNR_M2M_PCI_PMON_CTR0 0x440
|
||||
#define SNR_M2M_PCI_PMON_BOX_CTL 0x438
|
||||
#define SNR_M2M_PCI_PMON_UMASK_EXT 0xff
|
||||
|
||||
/* SNR PCIE3 */
|
||||
#define SNR_PCIE3_PCI_PMON_CTL0 0x508
|
||||
#define SNR_PCIE3_PCI_PMON_CTR0 0x4e8
|
||||
#define SNR_PCIE3_PCI_PMON_BOX_CTL 0x4e4
|
||||
|
||||
/* SNR IMC */
|
||||
#define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54
|
||||
#define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38
|
||||
#define SNR_IMC_MMIO_PMON_CTL0 0x40
|
||||
#define SNR_IMC_MMIO_PMON_CTR0 0x8
|
||||
#define SNR_IMC_MMIO_PMON_BOX_CTL 0x22800
|
||||
#define SNR_IMC_MMIO_OFFSET 0x4000
|
||||
#define SNR_IMC_MMIO_SIZE 0x4000
|
||||
#define SNR_IMC_MMIO_BASE_OFFSET 0xd0
|
||||
#define SNR_IMC_MMIO_BASE_MASK 0x1FFFFFFF
|
||||
#define SNR_IMC_MMIO_MEM0_OFFSET 0xd8
|
||||
#define SNR_IMC_MMIO_MEM0_MASK 0x7FF
|
||||
|
||||
DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
|
||||
@ -343,11 +408,14 @@ DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
|
||||
@ -3585,6 +3653,7 @@ static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
|
||||
|
||||
static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
|
||||
.read_counter = uncore_msr_read_counter,
|
||||
.hw_config = uncore_freerunning_hw_config,
|
||||
};
|
||||
|
||||
static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
|
||||
@ -3967,3 +4036,535 @@ int skx_uncore_pci_init(void)
|
||||
}
|
||||
|
||||
/* end of SKX uncore support */
|
||||
|
||||
/* SNR uncore support */
|
||||
|
||||
static struct intel_uncore_type snr_uncore_ubox = {
|
||||
.name = "ubox",
|
||||
.num_counters = 2,
|
||||
.num_boxes = 1,
|
||||
.perf_ctr_bits = 48,
|
||||
.fixed_ctr_bits = 48,
|
||||
.perf_ctr = SNR_U_MSR_PMON_CTR0,
|
||||
.event_ctl = SNR_U_MSR_PMON_CTL0,
|
||||
.event_mask = SNBEP_PMON_RAW_EVENT_MASK,
|
||||
.fixed_ctr = SNR_U_MSR_PMON_UCLK_FIXED_CTR,
|
||||
.fixed_ctl = SNR_U_MSR_PMON_UCLK_FIXED_CTL,
|
||||
.ops = &ivbep_uncore_msr_ops,
|
||||
.format_group = &ivbep_uncore_format_group,
|
||||
};
|
||||
|
||||
static struct attribute *snr_uncore_cha_formats_attr[] = {
|
||||
&format_attr_event.attr,
|
||||
&format_attr_umask_ext2.attr,
|
||||
&format_attr_edge.attr,
|
||||
&format_attr_tid_en.attr,
|
||||
&format_attr_inv.attr,
|
||||
&format_attr_thresh8.attr,
|
||||
&format_attr_filter_tid5.attr,
|
||||
NULL,
|
||||
};
|
||||
static const struct attribute_group snr_uncore_chabox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = snr_uncore_cha_formats_attr,
|
||||
};
|
||||
|
||||
static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
|
||||
|
||||
reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
|
||||
box->pmu->type->msr_offset * box->pmu->pmu_idx;
|
||||
reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
|
||||
reg1->idx = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void snr_cha_enable_event(struct intel_uncore_box *box,
|
||||
struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
|
||||
|
||||
if (reg1->idx != EXTRA_REG_NONE)
|
||||
wrmsrl(reg1->reg, reg1->config);
|
||||
|
||||
wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
|
||||
}
|
||||
|
||||
static struct intel_uncore_ops snr_uncore_chabox_ops = {
|
||||
.init_box = ivbep_uncore_msr_init_box,
|
||||
.disable_box = snbep_uncore_msr_disable_box,
|
||||
.enable_box = snbep_uncore_msr_enable_box,
|
||||
.disable_event = snbep_uncore_msr_disable_event,
|
||||
.enable_event = snr_cha_enable_event,
|
||||
.read_counter = uncore_msr_read_counter,
|
||||
.hw_config = snr_cha_hw_config,
|
||||
};
|
||||
|
||||
static struct intel_uncore_type snr_uncore_chabox = {
|
||||
.name = "cha",
|
||||
.num_counters = 4,
|
||||
.num_boxes = 6,
|
||||
.perf_ctr_bits = 48,
|
||||
.event_ctl = SNR_CHA_MSR_PMON_CTL0,
|
||||
.perf_ctr = SNR_CHA_MSR_PMON_CTR0,
|
||||
.box_ctl = SNR_CHA_MSR_PMON_BOX_CTL,
|
||||
.msr_offset = HSWEP_CBO_MSR_OFFSET,
|
||||
.event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
|
||||
.event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
|
||||
.ops = &snr_uncore_chabox_ops,
|
||||
.format_group = &snr_uncore_chabox_format_group,
|
||||
};
|
||||
|
||||
static struct attribute *snr_uncore_iio_formats_attr[] = {
|
||||
&format_attr_event.attr,
|
||||
&format_attr_umask.attr,
|
||||
&format_attr_edge.attr,
|
||||
&format_attr_inv.attr,
|
||||
&format_attr_thresh9.attr,
|
||||
&format_attr_ch_mask2.attr,
|
||||
&format_attr_fc_mask2.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static const struct attribute_group snr_uncore_iio_format_group = {
|
||||
.name = "format",
|
||||
.attrs = snr_uncore_iio_formats_attr,
|
||||
};
|
||||
|
||||
static struct intel_uncore_type snr_uncore_iio = {
|
||||
.name = "iio",
|
||||
.num_counters = 4,
|
||||
.num_boxes = 5,
|
||||
.perf_ctr_bits = 48,
|
||||
.event_ctl = SNR_IIO_MSR_PMON_CTL0,
|
||||
.perf_ctr = SNR_IIO_MSR_PMON_CTR0,
|
||||
.event_mask = SNBEP_PMON_RAW_EVENT_MASK,
|
||||
.event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
|
||||
.box_ctl = SNR_IIO_MSR_PMON_BOX_CTL,
|
||||
.msr_offset = SNR_IIO_MSR_OFFSET,
|
||||
.ops = &ivbep_uncore_msr_ops,
|
||||
.format_group = &snr_uncore_iio_format_group,
|
||||
};
|
||||
|
||||
static struct intel_uncore_type snr_uncore_irp = {
|
||||
.name = "irp",
|
||||
.num_counters = 2,
|
||||
.num_boxes = 5,
|
||||
.perf_ctr_bits = 48,
|
||||
.event_ctl = SNR_IRP0_MSR_PMON_CTL0,
|
||||
.perf_ctr = SNR_IRP0_MSR_PMON_CTR0,
|
||||
.event_mask = SNBEP_PMON_RAW_EVENT_MASK,
|
||||
.box_ctl = SNR_IRP0_MSR_PMON_BOX_CTL,
|
||||
.msr_offset = SNR_IRP_MSR_OFFSET,
|
||||
.ops = &ivbep_uncore_msr_ops,
|
||||
.format_group = &ivbep_uncore_format_group,
|
||||
};
|
||||
|
||||
static struct intel_uncore_type snr_uncore_m2pcie = {
|
||||
.name = "m2pcie",
|
||||
.num_counters = 4,
|
||||
.num_boxes = 5,
|
||||
.perf_ctr_bits = 48,
|
||||
.event_ctl = SNR_M2PCIE_MSR_PMON_CTL0,
|
||||
.perf_ctr = SNR_M2PCIE_MSR_PMON_CTR0,
|
||||
.box_ctl = SNR_M2PCIE_MSR_PMON_BOX_CTL,
|
||||
.msr_offset = SNR_M2PCIE_MSR_OFFSET,
|
||||
.event_mask = SNBEP_PMON_RAW_EVENT_MASK,
|
||||
.ops = &ivbep_uncore_msr_ops,
|
||||
.format_group = &ivbep_uncore_format_group,
|
||||
};
|
||||
|
||||
static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
|
||||
int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
|
||||
|
||||
if (ev_sel >= 0xb && ev_sel <= 0xe) {
|
||||
reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
|
||||
reg1->idx = ev_sel - 0xb;
|
||||
reg1->config = event->attr.config1 & (0xff << reg1->idx);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct intel_uncore_ops snr_uncore_pcu_ops = {
|
||||
IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
|
||||
.hw_config = snr_pcu_hw_config,
|
||||
.get_constraint = snbep_pcu_get_constraint,
|
||||
.put_constraint = snbep_pcu_put_constraint,
|
||||
};
|
||||
|
||||
static struct intel_uncore_type snr_uncore_pcu = {
|
||||
.name = "pcu",
|
||||
.num_counters = 4,
|
||||
.num_boxes = 1,
|
||||
.perf_ctr_bits = 48,
|
||||
.perf_ctr = SNR_PCU_MSR_PMON_CTR0,
|
||||
.event_ctl = SNR_PCU_MSR_PMON_CTL0,
|
||||
.event_mask = SNBEP_PMON_RAW_EVENT_MASK,
|
||||
.box_ctl = SNR_PCU_MSR_PMON_BOX_CTL,
|
||||
.num_shared_regs = 1,
|
||||
.ops = &snr_uncore_pcu_ops,
|
||||
.format_group = &skx_uncore_pcu_format_group,
|
||||
};
|
||||
|
||||
enum perf_uncore_snr_iio_freerunning_type_id {
|
||||
SNR_IIO_MSR_IOCLK,
|
||||
SNR_IIO_MSR_BW_IN,
|
||||
|
||||
SNR_IIO_FREERUNNING_TYPE_MAX,
|
||||
};
|
||||
|
||||
static struct freerunning_counters snr_iio_freerunning[] = {
|
||||
[SNR_IIO_MSR_IOCLK] = { 0x1eac, 0x1, 0x10, 1, 48 },
|
||||
[SNR_IIO_MSR_BW_IN] = { 0x1f00, 0x1, 0x10, 8, 48 },
|
||||
};
|
||||
|
||||
static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
|
||||
/* Free-Running IIO CLOCKS Counter */
|
||||
INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
|
||||
/* Free-Running IIO BANDWIDTH IN Counters */
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
|
||||
{ /* end: all zeroes */ },
|
||||
};
|
||||
|
||||
static struct intel_uncore_type snr_uncore_iio_free_running = {
|
||||
.name = "iio_free_running",
|
||||
.num_counters = 9,
|
||||
.num_boxes = 5,
|
||||
.num_freerunning_types = SNR_IIO_FREERUNNING_TYPE_MAX,
|
||||
.freerunning = snr_iio_freerunning,
|
||||
.ops = &skx_uncore_iio_freerunning_ops,
|
||||
.event_descs = snr_uncore_iio_freerunning_events,
|
||||
.format_group = &skx_uncore_iio_freerunning_format_group,
|
||||
};
|
||||
|
||||
static struct intel_uncore_type *snr_msr_uncores[] = {
|
||||
&snr_uncore_ubox,
|
||||
&snr_uncore_chabox,
|
||||
&snr_uncore_iio,
|
||||
&snr_uncore_irp,
|
||||
&snr_uncore_m2pcie,
|
||||
&snr_uncore_pcu,
|
||||
&snr_uncore_iio_free_running,
|
||||
NULL,
|
||||
};
|
||||
|
||||
void snr_uncore_cpu_init(void)
|
||||
{
|
||||
uncore_msr_uncores = snr_msr_uncores;
|
||||
}
|
||||
|
||||
static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
|
||||
{
|
||||
struct pci_dev *pdev = box->pci_dev;
|
||||
int box_ctl = uncore_pci_box_ctl(box);
|
||||
|
||||
__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
|
||||
pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
|
||||
}
|
||||
|
||||
static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
|
||||
.init_box = snr_m2m_uncore_pci_init_box,
|
||||
.disable_box = snbep_uncore_pci_disable_box,
|
||||
.enable_box = snbep_uncore_pci_enable_box,
|
||||
.disable_event = snbep_uncore_pci_disable_event,
|
||||
.enable_event = snbep_uncore_pci_enable_event,
|
||||
.read_counter = snbep_uncore_pci_read_counter,
|
||||
};
|
||||
|
||||
static struct attribute *snr_m2m_uncore_formats_attr[] = {
|
||||
&format_attr_event.attr,
|
||||
&format_attr_umask_ext3.attr,
|
||||
&format_attr_edge.attr,
|
||||
&format_attr_inv.attr,
|
||||
&format_attr_thresh8.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static const struct attribute_group snr_m2m_uncore_format_group = {
|
||||
.name = "format",
|
||||
.attrs = snr_m2m_uncore_formats_attr,
|
||||
};
|
||||
|
||||
static struct intel_uncore_type snr_uncore_m2m = {
|
||||
.name = "m2m",
|
||||
.num_counters = 4,
|
||||
.num_boxes = 1,
|
||||
.perf_ctr_bits = 48,
|
||||
.perf_ctr = SNR_M2M_PCI_PMON_CTR0,
|
||||
.event_ctl = SNR_M2M_PCI_PMON_CTL0,
|
||||
.event_mask = SNBEP_PMON_RAW_EVENT_MASK,
|
||||
.event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
|
||||
.box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
|
||||
.ops = &snr_m2m_uncore_pci_ops,
|
||||
.format_group = &snr_m2m_uncore_format_group,
|
||||
};
|
||||
|
||||
static struct intel_uncore_type snr_uncore_pcie3 = {
|
||||
.name = "pcie3",
|
||||
.num_counters = 4,
|
||||
.num_boxes = 1,
|
||||
.perf_ctr_bits = 48,
|
||||
.perf_ctr = SNR_PCIE3_PCI_PMON_CTR0,
|
||||
.event_ctl = SNR_PCIE3_PCI_PMON_CTL0,
|
||||
.event_mask = SNBEP_PMON_RAW_EVENT_MASK,
|
||||
.box_ctl = SNR_PCIE3_PCI_PMON_BOX_CTL,
|
||||
.ops = &ivbep_uncore_pci_ops,
|
||||
.format_group = &ivbep_uncore_format_group,
|
||||
};
|
||||
|
||||
enum {
|
||||
SNR_PCI_UNCORE_M2M,
|
||||
SNR_PCI_UNCORE_PCIE3,
|
||||
};
|
||||
|
||||
static struct intel_uncore_type *snr_pci_uncores[] = {
|
||||
[SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m,
|
||||
[SNR_PCI_UNCORE_PCIE3] = &snr_uncore_pcie3,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static const struct pci_device_id snr_uncore_pci_ids[] = {
|
||||
{ /* M2M */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
|
||||
},
|
||||
{ /* PCIe3 */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
|
||||
},
|
||||
{ /* end: all zeroes */ }
|
||||
};
|
||||
|
||||
static struct pci_driver snr_uncore_pci_driver = {
|
||||
.name = "snr_uncore",
|
||||
.id_table = snr_uncore_pci_ids,
|
||||
};
|
||||
|
||||
int snr_uncore_pci_init(void)
|
||||
{
|
||||
/* SNR UBOX DID */
|
||||
int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
|
||||
SKX_GIDNIDMAP, true);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
uncore_pci_uncores = snr_pci_uncores;
|
||||
uncore_pci_driver = &snr_uncore_pci_driver;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct pci_dev *snr_uncore_get_mc_dev(int id)
|
||||
{
|
||||
struct pci_dev *mc_dev = NULL;
|
||||
int phys_id, pkg;
|
||||
|
||||
while (1) {
|
||||
mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3451, mc_dev);
|
||||
if (!mc_dev)
|
||||
break;
|
||||
phys_id = uncore_pcibus_to_physid(mc_dev->bus);
|
||||
if (phys_id < 0)
|
||||
continue;
|
||||
pkg = topology_phys_to_logical_pkg(phys_id);
|
||||
if (pkg < 0)
|
||||
continue;
|
||||
else if (pkg == id)
|
||||
break;
|
||||
}
|
||||
return mc_dev;
|
||||
}
|
||||
|
||||
static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
|
||||
{
|
||||
struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid);
|
||||
unsigned int box_ctl = uncore_mmio_box_ctl(box);
|
||||
resource_size_t addr;
|
||||
u32 pci_dword;
|
||||
|
||||
if (!pdev)
|
||||
return;
|
||||
|
||||
pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
|
||||
addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
|
||||
|
||||
pci_read_config_dword(pdev, SNR_IMC_MMIO_MEM0_OFFSET, &pci_dword);
|
||||
addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
|
||||
|
||||
addr += box_ctl;
|
||||
|
||||
box->io_addr = ioremap(addr, SNR_IMC_MMIO_SIZE);
|
||||
if (!box->io_addr)
|
||||
return;
|
||||
|
||||
writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
|
||||
}
|
||||
|
||||
static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
|
||||
{
|
||||
u32 config;
|
||||
|
||||
if (!box->io_addr)
|
||||
return;
|
||||
|
||||
config = readl(box->io_addr);
|
||||
config |= SNBEP_PMON_BOX_CTL_FRZ;
|
||||
writel(config, box->io_addr);
|
||||
}
|
||||
|
||||
static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
|
||||
{
|
||||
u32 config;
|
||||
|
||||
if (!box->io_addr)
|
||||
return;
|
||||
|
||||
config = readl(box->io_addr);
|
||||
config &= ~SNBEP_PMON_BOX_CTL_FRZ;
|
||||
writel(config, box->io_addr);
|
||||
}
|
||||
|
||||
static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
|
||||
struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
if (!box->io_addr)
|
||||
return;
|
||||
|
||||
writel(hwc->config | SNBEP_PMON_CTL_EN,
|
||||
box->io_addr + hwc->config_base);
|
||||
}
|
||||
|
||||
static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
|
||||
struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
if (!box->io_addr)
|
||||
return;
|
||||
|
||||
writel(hwc->config, box->io_addr + hwc->config_base);
|
||||
}
|
||||
|
||||
static struct intel_uncore_ops snr_uncore_mmio_ops = {
|
||||
.init_box = snr_uncore_mmio_init_box,
|
||||
.exit_box = uncore_mmio_exit_box,
|
||||
.disable_box = snr_uncore_mmio_disable_box,
|
||||
.enable_box = snr_uncore_mmio_enable_box,
|
||||
.disable_event = snr_uncore_mmio_disable_event,
|
||||
.enable_event = snr_uncore_mmio_enable_event,
|
||||
.read_counter = uncore_mmio_read_counter,
|
||||
};
|
||||
|
||||
static struct uncore_event_desc snr_uncore_imc_events[] = {
|
||||
INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
|
||||
INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x0f"),
|
||||
INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
|
||||
INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
|
||||
INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
|
||||
INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
|
||||
{ /* end: all zeroes */ },
|
||||
};
|
||||
|
||||
static struct intel_uncore_type snr_uncore_imc = {
|
||||
.name = "imc",
|
||||
.num_counters = 4,
|
||||
.num_boxes = 2,
|
||||
.perf_ctr_bits = 48,
|
||||
.fixed_ctr_bits = 48,
|
||||
.fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
|
||||
.fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
|
||||
.event_descs = snr_uncore_imc_events,
|
||||
.perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
|
||||
.event_ctl = SNR_IMC_MMIO_PMON_CTL0,
|
||||
.event_mask = SNBEP_PMON_RAW_EVENT_MASK,
|
||||
.box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
|
||||
.mmio_offset = SNR_IMC_MMIO_OFFSET,
|
||||
.ops = &snr_uncore_mmio_ops,
|
||||
.format_group = &skx_uncore_format_group,
|
||||
};
|
||||
|
||||
enum perf_uncore_snr_imc_freerunning_type_id {
|
||||
SNR_IMC_DCLK,
|
||||
SNR_IMC_DDR,
|
||||
|
||||
SNR_IMC_FREERUNNING_TYPE_MAX,
|
||||
};
|
||||
|
||||
static struct freerunning_counters snr_imc_freerunning[] = {
|
||||
[SNR_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
|
||||
[SNR_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
|
||||
};
|
||||
|
||||
static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
|
||||
INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
|
||||
|
||||
INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
|
||||
INTEL_UNCORE_EVENT_DESC(read.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
|
||||
INTEL_UNCORE_EVENT_DESC(write.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
|
||||
};
|
||||
|
||||
static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
|
||||
.init_box = snr_uncore_mmio_init_box,
|
||||
.exit_box = uncore_mmio_exit_box,
|
||||
.read_counter = uncore_mmio_read_counter,
|
||||
.hw_config = uncore_freerunning_hw_config,
|
||||
};
|
||||
|
||||
static struct intel_uncore_type snr_uncore_imc_free_running = {
|
||||
.name = "imc_free_running",
|
||||
.num_counters = 3,
|
||||
.num_boxes = 1,
|
||||
.num_freerunning_types = SNR_IMC_FREERUNNING_TYPE_MAX,
|
||||
.freerunning = snr_imc_freerunning,
|
||||
.ops = &snr_uncore_imc_freerunning_ops,
|
||||
.event_descs = snr_uncore_imc_freerunning_events,
|
||||
.format_group = &skx_uncore_iio_freerunning_format_group,
|
||||
};
|
||||
|
||||
static struct intel_uncore_type *snr_mmio_uncores[] = {
|
||||
&snr_uncore_imc,
|
||||
&snr_uncore_imc_free_running,
|
||||
NULL,
|
||||
};
|
||||
|
||||
void snr_uncore_mmio_init(void)
|
||||
{
|
||||
uncore_mmio_uncores = snr_mmio_uncores;
|
||||
}
|
||||
|
||||
/* end of SNR uncore support */
|
||||
|
@ -1,7 +1,9 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <asm/intel-family.h>
|
||||
#include "probe.h"
|
||||
|
||||
enum perf_msr_id {
|
||||
PERF_MSR_TSC = 0,
|
||||
@ -12,32 +14,30 @@ enum perf_msr_id {
|
||||
PERF_MSR_PTSC = 5,
|
||||
PERF_MSR_IRPERF = 6,
|
||||
PERF_MSR_THERM = 7,
|
||||
PERF_MSR_THERM_SNAP = 8,
|
||||
PERF_MSR_THERM_UNIT = 9,
|
||||
PERF_MSR_EVENT_MAX,
|
||||
};
|
||||
|
||||
static bool test_aperfmperf(int idx)
|
||||
static bool test_aperfmperf(int idx, void *data)
|
||||
{
|
||||
return boot_cpu_has(X86_FEATURE_APERFMPERF);
|
||||
}
|
||||
|
||||
static bool test_ptsc(int idx)
|
||||
static bool test_ptsc(int idx, void *data)
|
||||
{
|
||||
return boot_cpu_has(X86_FEATURE_PTSC);
|
||||
}
|
||||
|
||||
static bool test_irperf(int idx)
|
||||
static bool test_irperf(int idx, void *data)
|
||||
{
|
||||
return boot_cpu_has(X86_FEATURE_IRPERF);
|
||||
}
|
||||
|
||||
static bool test_therm_status(int idx)
|
||||
static bool test_therm_status(int idx, void *data)
|
||||
{
|
||||
return boot_cpu_has(X86_FEATURE_DTHERM);
|
||||
}
|
||||
|
||||
static bool test_intel(int idx)
|
||||
static bool test_intel(int idx, void *data)
|
||||
{
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
|
||||
boot_cpu_data.x86 != 6)
|
||||
@ -98,37 +98,51 @@ static bool test_intel(int idx)
|
||||
return false;
|
||||
}
|
||||
|
||||
struct perf_msr {
|
||||
u64 msr;
|
||||
struct perf_pmu_events_attr *attr;
|
||||
bool (*test)(int idx);
|
||||
PMU_EVENT_ATTR_STRING(tsc, attr_tsc, "event=0x00" );
|
||||
PMU_EVENT_ATTR_STRING(aperf, attr_aperf, "event=0x01" );
|
||||
PMU_EVENT_ATTR_STRING(mperf, attr_mperf, "event=0x02" );
|
||||
PMU_EVENT_ATTR_STRING(pperf, attr_pperf, "event=0x03" );
|
||||
PMU_EVENT_ATTR_STRING(smi, attr_smi, "event=0x04" );
|
||||
PMU_EVENT_ATTR_STRING(ptsc, attr_ptsc, "event=0x05" );
|
||||
PMU_EVENT_ATTR_STRING(irperf, attr_irperf, "event=0x06" );
|
||||
PMU_EVENT_ATTR_STRING(cpu_thermal_margin, attr_therm, "event=0x07" );
|
||||
PMU_EVENT_ATTR_STRING(cpu_thermal_margin.snapshot, attr_therm_snap, "1" );
|
||||
PMU_EVENT_ATTR_STRING(cpu_thermal_margin.unit, attr_therm_unit, "C" );
|
||||
|
||||
static unsigned long msr_mask;
|
||||
|
||||
PMU_EVENT_GROUP(events, aperf);
|
||||
PMU_EVENT_GROUP(events, mperf);
|
||||
PMU_EVENT_GROUP(events, pperf);
|
||||
PMU_EVENT_GROUP(events, smi);
|
||||
PMU_EVENT_GROUP(events, ptsc);
|
||||
PMU_EVENT_GROUP(events, irperf);
|
||||
|
||||
static struct attribute *attrs_therm[] = {
|
||||
&attr_therm.attr.attr,
|
||||
&attr_therm_snap.attr.attr,
|
||||
&attr_therm_unit.attr.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
PMU_EVENT_ATTR_STRING(tsc, evattr_tsc, "event=0x00" );
|
||||
PMU_EVENT_ATTR_STRING(aperf, evattr_aperf, "event=0x01" );
|
||||
PMU_EVENT_ATTR_STRING(mperf, evattr_mperf, "event=0x02" );
|
||||
PMU_EVENT_ATTR_STRING(pperf, evattr_pperf, "event=0x03" );
|
||||
PMU_EVENT_ATTR_STRING(smi, evattr_smi, "event=0x04" );
|
||||
PMU_EVENT_ATTR_STRING(ptsc, evattr_ptsc, "event=0x05" );
|
||||
PMU_EVENT_ATTR_STRING(irperf, evattr_irperf, "event=0x06" );
|
||||
PMU_EVENT_ATTR_STRING(cpu_thermal_margin, evattr_therm, "event=0x07" );
|
||||
PMU_EVENT_ATTR_STRING(cpu_thermal_margin.snapshot, evattr_therm_snap, "1" );
|
||||
PMU_EVENT_ATTR_STRING(cpu_thermal_margin.unit, evattr_therm_unit, "C" );
|
||||
static struct attribute_group group_therm = {
|
||||
.name = "events",
|
||||
.attrs = attrs_therm,
|
||||
};
|
||||
|
||||
static struct perf_msr msr[] = {
|
||||
[PERF_MSR_TSC] = { 0, &evattr_tsc, NULL, },
|
||||
[PERF_MSR_APERF] = { MSR_IA32_APERF, &evattr_aperf, test_aperfmperf, },
|
||||
[PERF_MSR_MPERF] = { MSR_IA32_MPERF, &evattr_mperf, test_aperfmperf, },
|
||||
[PERF_MSR_PPERF] = { MSR_PPERF, &evattr_pperf, test_intel, },
|
||||
[PERF_MSR_SMI] = { MSR_SMI_COUNT, &evattr_smi, test_intel, },
|
||||
[PERF_MSR_PTSC] = { MSR_F15H_PTSC, &evattr_ptsc, test_ptsc, },
|
||||
[PERF_MSR_IRPERF] = { MSR_F17H_IRPERF, &evattr_irperf, test_irperf, },
|
||||
[PERF_MSR_THERM] = { MSR_IA32_THERM_STATUS, &evattr_therm, test_therm_status, },
|
||||
[PERF_MSR_THERM_SNAP] = { MSR_IA32_THERM_STATUS, &evattr_therm_snap, test_therm_status, },
|
||||
[PERF_MSR_THERM_UNIT] = { MSR_IA32_THERM_STATUS, &evattr_therm_unit, test_therm_status, },
|
||||
[PERF_MSR_TSC] = { .no_check = true, },
|
||||
[PERF_MSR_APERF] = { MSR_IA32_APERF, &group_aperf, test_aperfmperf, },
|
||||
[PERF_MSR_MPERF] = { MSR_IA32_MPERF, &group_mperf, test_aperfmperf, },
|
||||
[PERF_MSR_PPERF] = { MSR_PPERF, &group_pperf, test_intel, },
|
||||
[PERF_MSR_SMI] = { MSR_SMI_COUNT, &group_smi, test_intel, },
|
||||
[PERF_MSR_PTSC] = { MSR_F15H_PTSC, &group_ptsc, test_ptsc, },
|
||||
[PERF_MSR_IRPERF] = { MSR_F17H_IRPERF, &group_irperf, test_irperf, },
|
||||
[PERF_MSR_THERM] = { MSR_IA32_THERM_STATUS, &group_therm, test_therm_status, },
|
||||
};
|
||||
|
||||
static struct attribute *events_attrs[PERF_MSR_EVENT_MAX + 1] = {
|
||||
static struct attribute *events_attrs[] = {
|
||||
&attr_tsc.attr.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
@ -153,6 +167,17 @@ static const struct attribute_group *attr_groups[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
const struct attribute_group *attr_update[] = {
|
||||
&group_aperf,
|
||||
&group_mperf,
|
||||
&group_pperf,
|
||||
&group_smi,
|
||||
&group_ptsc,
|
||||
&group_irperf,
|
||||
&group_therm,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static int msr_event_init(struct perf_event *event)
|
||||
{
|
||||
u64 cfg = event->attr.config;
|
||||
@ -169,7 +194,7 @@ static int msr_event_init(struct perf_event *event)
|
||||
|
||||
cfg = array_index_nospec((unsigned long)cfg, PERF_MSR_EVENT_MAX);
|
||||
|
||||
if (!msr[cfg].attr)
|
||||
if (!(msr_mask & (1 << cfg)))
|
||||
return -EINVAL;
|
||||
|
||||
event->hw.idx = -1;
|
||||
@ -252,32 +277,17 @@ static struct pmu pmu_msr = {
|
||||
.stop = msr_event_stop,
|
||||
.read = msr_event_update,
|
||||
.capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
|
||||
.attr_update = attr_update,
|
||||
};
|
||||
|
||||
static int __init msr_init(void)
|
||||
{
|
||||
int i, j = 0;
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_TSC)) {
|
||||
pr_cont("no MSR PMU driver.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Probe the MSRs. */
|
||||
for (i = PERF_MSR_TSC + 1; i < PERF_MSR_EVENT_MAX; i++) {
|
||||
u64 val;
|
||||
|
||||
/* Virt sucks; you cannot tell if a R/O MSR is present :/ */
|
||||
if (!msr[i].test(i) || rdmsrl_safe(msr[i].msr, &val))
|
||||
msr[i].attr = NULL;
|
||||
}
|
||||
|
||||
/* List remaining MSRs in the sysfs attrs. */
|
||||
for (i = 0; i < PERF_MSR_EVENT_MAX; i++) {
|
||||
if (msr[i].attr)
|
||||
events_attrs[j++] = &msr[i].attr->attr.attr;
|
||||
}
|
||||
events_attrs[j] = NULL;
|
||||
msr_mask = perf_msr_probe(msr, PERF_MSR_EVENT_MAX, true, NULL);
|
||||
|
||||
perf_pmu_register(&pmu_msr, "msr", -1);
|
||||
|
||||
|
@ -613,14 +613,11 @@ struct x86_pmu {
|
||||
int attr_rdpmc_broken;
|
||||
int attr_rdpmc;
|
||||
struct attribute **format_attrs;
|
||||
struct attribute **event_attrs;
|
||||
struct attribute **caps_attrs;
|
||||
|
||||
ssize_t (*events_sysfs_show)(char *page, u64 config);
|
||||
struct attribute **cpu_events;
|
||||
const struct attribute_group **attr_update;
|
||||
|
||||
unsigned long attr_freeze_on_smi;
|
||||
struct attribute **attrs;
|
||||
|
||||
/*
|
||||
* CPU Hotplug hooks
|
||||
@ -886,8 +883,6 @@ static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
|
||||
ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
|
||||
ssize_t intel_event_sysfs_show(char *page, u64 config);
|
||||
|
||||
struct attribute **merge_attr(struct attribute **a, struct attribute **b);
|
||||
|
||||
ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
|
||||
char *page);
|
||||
ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
|
||||
|
45
arch/x86/events/probe.c
Normal file
45
arch/x86/events/probe.c
Normal file
@ -0,0 +1,45 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/export.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/bits.h>
|
||||
#include "probe.h"
|
||||
|
||||
static umode_t
|
||||
not_visible(struct kobject *kobj, struct attribute *attr, int i)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned long
|
||||
perf_msr_probe(struct perf_msr *msr, int cnt, bool zero, void *data)
|
||||
{
|
||||
unsigned long avail = 0;
|
||||
unsigned int bit;
|
||||
u64 val;
|
||||
|
||||
if (cnt >= BITS_PER_LONG)
|
||||
return 0;
|
||||
|
||||
for (bit = 0; bit < cnt; bit++) {
|
||||
if (!msr[bit].no_check) {
|
||||
struct attribute_group *grp = msr[bit].grp;
|
||||
|
||||
grp->is_visible = not_visible;
|
||||
|
||||
if (msr[bit].test && !msr[bit].test(bit, data))
|
||||
continue;
|
||||
/* Virt sucks; you cannot tell if a R/O MSR is present :/ */
|
||||
if (rdmsrl_safe(msr[bit].msr, &val))
|
||||
continue;
|
||||
/* Disable zero counters if requested. */
|
||||
if (!zero && !val)
|
||||
continue;
|
||||
|
||||
grp->is_visible = NULL;
|
||||
}
|
||||
avail |= BIT(bit);
|
||||
}
|
||||
|
||||
return avail;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(perf_msr_probe);
|
29
arch/x86/events/probe.h
Normal file
29
arch/x86/events/probe.h
Normal file
@ -0,0 +1,29 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __ARCH_X86_EVENTS_PROBE_H__
|
||||
#define __ARCH_X86_EVENTS_PROBE_H__
|
||||
#include <linux/sysfs.h>
|
||||
|
||||
struct perf_msr {
|
||||
u64 msr;
|
||||
struct attribute_group *grp;
|
||||
bool (*test)(int idx, void *data);
|
||||
bool no_check;
|
||||
};
|
||||
|
||||
unsigned long
|
||||
perf_msr_probe(struct perf_msr *msr, int cnt, bool no_zero, void *data);
|
||||
|
||||
#define __PMU_EVENT_GROUP(_name) \
|
||||
static struct attribute *attrs_##_name[] = { \
|
||||
&attr_##_name.attr.attr, \
|
||||
NULL, \
|
||||
}
|
||||
|
||||
#define PMU_EVENT_GROUP(_grp, _name) \
|
||||
__PMU_EVENT_GROUP(_name); \
|
||||
static struct attribute_group group_##_name = { \
|
||||
.name = #_grp, \
|
||||
.attrs = attrs_##_name, \
|
||||
}
|
||||
|
||||
#endif /* __ARCH_X86_EVENTS_PROBE_H__ */
|
@ -175,6 +175,26 @@ int sysfs_create_group(struct kobject *kobj,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sysfs_create_group);
|
||||
|
||||
static int internal_create_groups(struct kobject *kobj, int update,
|
||||
const struct attribute_group **groups)
|
||||
{
|
||||
int error = 0;
|
||||
int i;
|
||||
|
||||
if (!groups)
|
||||
return 0;
|
||||
|
||||
for (i = 0; groups[i]; i++) {
|
||||
error = internal_create_group(kobj, update, groups[i]);
|
||||
if (error) {
|
||||
while (--i >= 0)
|
||||
sysfs_remove_group(kobj, groups[i]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* sysfs_create_groups - given a directory kobject, create a bunch of attribute groups
|
||||
* @kobj: The kobject to create the group on
|
||||
@ -191,24 +211,28 @@ EXPORT_SYMBOL_GPL(sysfs_create_group);
|
||||
int sysfs_create_groups(struct kobject *kobj,
|
||||
const struct attribute_group **groups)
|
||||
{
|
||||
int error = 0;
|
||||
int i;
|
||||
|
||||
if (!groups)
|
||||
return 0;
|
||||
|
||||
for (i = 0; groups[i]; i++) {
|
||||
error = sysfs_create_group(kobj, groups[i]);
|
||||
if (error) {
|
||||
while (--i >= 0)
|
||||
sysfs_remove_group(kobj, groups[i]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return error;
|
||||
return internal_create_groups(kobj, 0, groups);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sysfs_create_groups);
|
||||
|
||||
/**
|
||||
* sysfs_update_groups - given a directory kobject, create a bunch of attribute groups
|
||||
* @kobj: The kobject to update the group on
|
||||
* @groups: The attribute groups to update, NULL terminated
|
||||
*
|
||||
* This function update a bunch of attribute groups. If an error occurs when
|
||||
* updating a group, all previously updated groups will be removed together
|
||||
* with already existing (not updated) attributes.
|
||||
*
|
||||
* Returns 0 on success or error code from sysfs_update_group on failure.
|
||||
*/
|
||||
int sysfs_update_groups(struct kobject *kobj,
|
||||
const struct attribute_group **groups)
|
||||
{
|
||||
return internal_create_groups(kobj, 1, groups);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sysfs_update_groups);
|
||||
|
||||
/**
|
||||
* sysfs_update_group - given a directory kobject, update an attribute group
|
||||
* @kobj: The kobject to update the group on
|
||||
|
@ -256,6 +256,7 @@ struct pmu {
|
||||
struct module *module;
|
||||
struct device *dev;
|
||||
const struct attribute_group **attr_groups;
|
||||
const struct attribute_group **attr_update;
|
||||
const char *name;
|
||||
int type;
|
||||
|
||||
@ -749,6 +750,11 @@ struct perf_event_context {
|
||||
int nr_stat;
|
||||
int nr_freq;
|
||||
int rotate_disable;
|
||||
/*
|
||||
* Set when nr_events != nr_active, except tolerant to events not
|
||||
* necessary to be active due to scheduling constraints, such as cgroups.
|
||||
*/
|
||||
int rotate_necessary;
|
||||
refcount_t refcount;
|
||||
struct task_struct *task;
|
||||
|
||||
|
@ -268,6 +268,8 @@ int __must_check sysfs_create_group(struct kobject *kobj,
|
||||
const struct attribute_group *grp);
|
||||
int __must_check sysfs_create_groups(struct kobject *kobj,
|
||||
const struct attribute_group **groups);
|
||||
int __must_check sysfs_update_groups(struct kobject *kobj,
|
||||
const struct attribute_group **groups);
|
||||
int sysfs_update_group(struct kobject *kobj,
|
||||
const struct attribute_group *grp);
|
||||
void sysfs_remove_group(struct kobject *kobj,
|
||||
@ -433,6 +435,12 @@ static inline int sysfs_create_groups(struct kobject *kobj,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int sysfs_update_groups(struct kobject *kobj,
|
||||
const struct attribute_group **groups)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int sysfs_update_group(struct kobject *kobj,
|
||||
const struct attribute_group *grp)
|
||||
{
|
||||
|
@ -2952,6 +2952,12 @@ static void ctx_sched_out(struct perf_event_context *ctx,
|
||||
if (!ctx->nr_active || !(is_active & EVENT_ALL))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If we had been multiplexing, no rotations are necessary, now no events
|
||||
* are active.
|
||||
*/
|
||||
ctx->rotate_necessary = 0;
|
||||
|
||||
perf_pmu_disable(ctx->pmu);
|
||||
if (is_active & EVENT_PINNED) {
|
||||
list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list)
|
||||
@ -3319,10 +3325,13 @@ static int flexible_sched_in(struct perf_event *event, void *data)
|
||||
return 0;
|
||||
|
||||
if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
|
||||
if (!group_sched_in(event, sid->cpuctx, sid->ctx))
|
||||
list_add_tail(&event->active_list, &sid->ctx->flexible_active);
|
||||
else
|
||||
int ret = group_sched_in(event, sid->cpuctx, sid->ctx);
|
||||
if (ret) {
|
||||
sid->can_add_hw = 0;
|
||||
sid->ctx->rotate_necessary = 1;
|
||||
return 0;
|
||||
}
|
||||
list_add_tail(&event->active_list, &sid->ctx->flexible_active);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -3690,24 +3699,17 @@ ctx_first_active(struct perf_event_context *ctx)
|
||||
static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
|
||||
{
|
||||
struct perf_event *cpu_event = NULL, *task_event = NULL;
|
||||
bool cpu_rotate = false, task_rotate = false;
|
||||
struct perf_event_context *ctx = NULL;
|
||||
struct perf_event_context *task_ctx = NULL;
|
||||
int cpu_rotate, task_rotate;
|
||||
|
||||
/*
|
||||
* Since we run this from IRQ context, nobody can install new
|
||||
* events, thus the event count values are stable.
|
||||
*/
|
||||
|
||||
if (cpuctx->ctx.nr_events) {
|
||||
if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
|
||||
cpu_rotate = true;
|
||||
}
|
||||
|
||||
ctx = cpuctx->task_ctx;
|
||||
if (ctx && ctx->nr_events) {
|
||||
if (ctx->nr_events != ctx->nr_active)
|
||||
task_rotate = true;
|
||||
}
|
||||
cpu_rotate = cpuctx->ctx.rotate_necessary;
|
||||
task_ctx = cpuctx->task_ctx;
|
||||
task_rotate = task_ctx ? task_ctx->rotate_necessary : 0;
|
||||
|
||||
if (!(cpu_rotate || task_rotate))
|
||||
return false;
|
||||
@ -3716,7 +3718,7 @@ static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
|
||||
perf_pmu_disable(cpuctx->ctx.pmu);
|
||||
|
||||
if (task_rotate)
|
||||
task_event = ctx_first_active(ctx);
|
||||
task_event = ctx_first_active(task_ctx);
|
||||
if (cpu_rotate)
|
||||
cpu_event = ctx_first_active(&cpuctx->ctx);
|
||||
|
||||
@ -3724,17 +3726,17 @@ static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
|
||||
* As per the order given at ctx_resched() first 'pop' task flexible
|
||||
* and then, if needed CPU flexible.
|
||||
*/
|
||||
if (task_event || (ctx && cpu_event))
|
||||
ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
|
||||
if (task_event || (task_ctx && cpu_event))
|
||||
ctx_sched_out(task_ctx, cpuctx, EVENT_FLEXIBLE);
|
||||
if (cpu_event)
|
||||
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
|
||||
|
||||
if (task_event)
|
||||
rotate_ctx(ctx, task_event);
|
||||
rotate_ctx(task_ctx, task_event);
|
||||
if (cpu_event)
|
||||
rotate_ctx(&cpuctx->ctx, cpu_event);
|
||||
|
||||
perf_event_sched_in(cpuctx, ctx, current);
|
||||
perf_event_sched_in(cpuctx, task_ctx, current);
|
||||
|
||||
perf_pmu_enable(cpuctx->ctx.pmu);
|
||||
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
|
||||
@ -8535,9 +8537,9 @@ static int perf_tp_event_match(struct perf_event *event,
|
||||
if (event->hw.state & PERF_HES_STOPPED)
|
||||
return 0;
|
||||
/*
|
||||
* All tracepoints are from kernel-space.
|
||||
* If exclude_kernel, only trace user-space tracepoints (uprobes)
|
||||
*/
|
||||
if (event->attr.exclude_kernel)
|
||||
if (event->attr.exclude_kernel && !user_mode(regs))
|
||||
return 0;
|
||||
|
||||
if (!perf_tp_filter_match(event, data))
|
||||
@ -9877,6 +9879,12 @@ static int pmu_dev_alloc(struct pmu *pmu)
|
||||
if (ret)
|
||||
goto del_dev;
|
||||
|
||||
if (pmu->attr_update)
|
||||
ret = sysfs_update_groups(&pmu->dev->kobj, pmu->attr_update);
|
||||
|
||||
if (ret)
|
||||
goto del_dev;
|
||||
|
||||
out:
|
||||
return ret;
|
||||
|
||||
|
@ -1336,7 +1336,7 @@ static inline void init_trace_event_call(struct trace_uprobe *tu,
|
||||
call->event.funcs = &uprobe_funcs;
|
||||
call->class->define_fields = uprobe_event_define_fields;
|
||||
|
||||
call->flags = TRACE_EVENT_FL_UPROBE;
|
||||
call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
|
||||
call->class->reg = trace_uprobe_register;
|
||||
call->data = tu;
|
||||
}
|
||||
|
@ -260,6 +260,13 @@ struct kvm_vcpu_events {
|
||||
KVM_REG_SIZE_U256 | \
|
||||
((i) & (KVM_ARM64_SVE_MAX_SLICES - 1)))
|
||||
|
||||
/*
|
||||
* Register values for KVM_REG_ARM64_SVE_ZREG(), KVM_REG_ARM64_SVE_PREG() and
|
||||
* KVM_REG_ARM64_SVE_FFR() are represented in memory in an endianness-
|
||||
* invariant layout which differs from the layout used for the FPSIMD
|
||||
* V-registers on big-endian systems: see sigcontext.h for more explanation.
|
||||
*/
|
||||
|
||||
#define KVM_ARM64_SVE_VQ_MIN __SVE_VQ_MIN
|
||||
#define KVM_ARM64_SVE_VQ_MAX __SVE_VQ_MAX
|
||||
|
||||
|
@ -239,12 +239,14 @@
|
||||
#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
|
||||
#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
|
||||
#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
|
||||
#define X86_FEATURE_FDP_EXCPTN_ONLY ( 9*32+ 6) /* "" FPU data pointer updated only on x87 exceptions */
|
||||
#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
|
||||
#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
|
||||
#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */
|
||||
#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
|
||||
#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
|
||||
#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
|
||||
#define X86_FEATURE_ZERO_FCS_FDS ( 9*32+13) /* "" Zero out FPU CS and FPU DS */
|
||||
#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
|
||||
#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */
|
||||
#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
|
||||
@ -269,13 +271,19 @@
|
||||
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */
|
||||
#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */
|
||||
|
||||
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 11 */
|
||||
#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
|
||||
/*
|
||||
* Extended auxiliary flags: Linux defined - for features scattered in various
|
||||
* CPUID levels like 0xf, etc.
|
||||
*
|
||||
* Reuse free bits when adding new feature flags!
|
||||
*/
|
||||
#define X86_FEATURE_CQM_LLC (11*32+ 0) /* LLC QoS if 1 */
|
||||
#define X86_FEATURE_CQM_OCCUP_LLC (11*32+ 1) /* LLC occupancy monitoring */
|
||||
#define X86_FEATURE_CQM_MBM_TOTAL (11*32+ 2) /* LLC Total MBM monitoring */
|
||||
#define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */
|
||||
|
||||
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */
|
||||
#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring */
|
||||
#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */
|
||||
#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
|
||||
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
|
||||
|
||||
/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
|
||||
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
|
||||
@ -322,6 +330,7 @@
|
||||
#define X86_FEATURE_UMIP (16*32+ 2) /* User Mode Instruction Protection */
|
||||
#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
|
||||
#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
|
||||
#define X86_FEATURE_WAITPKG (16*32+ 5) /* UMONITOR/UMWAIT/TPAUSE Instructions */
|
||||
#define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */
|
||||
#define X86_FEATURE_GFNI (16*32+ 8) /* Galois Field New Instructions */
|
||||
#define X86_FEATURE_VAES (16*32+ 9) /* Vector AES */
|
||||
|
@ -383,6 +383,9 @@ struct kvm_sync_regs {
|
||||
#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
|
||||
#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3)
|
||||
|
||||
#define KVM_STATE_NESTED_FORMAT_VMX 0
|
||||
#define KVM_STATE_NESTED_FORMAT_SVM 1 /* unused */
|
||||
|
||||
#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
|
||||
#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
|
||||
#define KVM_STATE_NESTED_EVMCS 0x00000004
|
||||
@ -390,7 +393,14 @@ struct kvm_sync_regs {
|
||||
#define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001
|
||||
#define KVM_STATE_NESTED_SMM_VMXON 0x00000002
|
||||
|
||||
struct kvm_vmx_nested_state {
|
||||
#define KVM_STATE_NESTED_VMX_VMCS_SIZE 0x1000
|
||||
|
||||
struct kvm_vmx_nested_state_data {
|
||||
__u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
|
||||
__u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
|
||||
};
|
||||
|
||||
struct kvm_vmx_nested_state_hdr {
|
||||
__u64 vmxon_pa;
|
||||
__u64 vmcs12_pa;
|
||||
|
||||
@ -401,24 +411,25 @@ struct kvm_vmx_nested_state {
|
||||
|
||||
/* for KVM_CAP_NESTED_STATE */
|
||||
struct kvm_nested_state {
|
||||
/* KVM_STATE_* flags */
|
||||
__u16 flags;
|
||||
|
||||
/* 0 for VMX, 1 for SVM. */
|
||||
__u16 format;
|
||||
|
||||
/* 128 for SVM, 128 + VMCS size for VMX. */
|
||||
__u32 size;
|
||||
|
||||
union {
|
||||
/* VMXON, VMCS */
|
||||
struct kvm_vmx_nested_state vmx;
|
||||
struct kvm_vmx_nested_state_hdr vmx;
|
||||
|
||||
/* Pad the header to 128 bytes. */
|
||||
__u8 pad[120];
|
||||
};
|
||||
} hdr;
|
||||
|
||||
__u8 data[0];
|
||||
/*
|
||||
* Define data region as 0 bytes to preserve backwards-compatability
|
||||
* to old definition of kvm_nested_state in order to avoid changing
|
||||
* KVM_{GET,PUT}_NESTED_STATE ioctl values.
|
||||
*/
|
||||
union {
|
||||
struct kvm_vmx_nested_state_data vmx[0];
|
||||
} data;
|
||||
};
|
||||
|
||||
#endif /* _ASM_X86_KVM_H */
|
||||
|
@ -36,6 +36,7 @@ FEATURE_TESTS_BASIC := \
|
||||
fortify-source \
|
||||
sync-compare-and-swap \
|
||||
get_current_dir_name \
|
||||
gettid \
|
||||
glibc \
|
||||
gtk2 \
|
||||
gtk2-infobar \
|
||||
@ -52,6 +53,7 @@ FEATURE_TESTS_BASIC := \
|
||||
libpython \
|
||||
libpython-version \
|
||||
libslang \
|
||||
libslang-include-subdir \
|
||||
libcrypto \
|
||||
libunwind \
|
||||
pthread-attr-setaffinity-np \
|
||||
@ -113,7 +115,6 @@ FEATURE_DISPLAY ?= \
|
||||
numa_num_possible_cpus \
|
||||
libperl \
|
||||
libpython \
|
||||
libslang \
|
||||
libcrypto \
|
||||
libunwind \
|
||||
libdw-dwarf-unwind \
|
||||
|
@ -31,6 +31,7 @@ FILES= \
|
||||
test-libpython.bin \
|
||||
test-libpython-version.bin \
|
||||
test-libslang.bin \
|
||||
test-libslang-include-subdir.bin \
|
||||
test-libcrypto.bin \
|
||||
test-libunwind.bin \
|
||||
test-libunwind-debug-frame.bin \
|
||||
@ -54,6 +55,7 @@ FILES= \
|
||||
test-get_cpuid.bin \
|
||||
test-sdt.bin \
|
||||
test-cxx.bin \
|
||||
test-gettid.bin \
|
||||
test-jvmti.bin \
|
||||
test-jvmti-cmlr.bin \
|
||||
test-sched_getcpu.bin \
|
||||
@ -181,7 +183,10 @@ $(OUTPUT)test-libaudit.bin:
|
||||
$(BUILD) -laudit
|
||||
|
||||
$(OUTPUT)test-libslang.bin:
|
||||
$(BUILD) -I/usr/include/slang -lslang
|
||||
$(BUILD) -lslang
|
||||
|
||||
$(OUTPUT)test-libslang-include-subdir.bin:
|
||||
$(BUILD) -lslang
|
||||
|
||||
$(OUTPUT)test-libcrypto.bin:
|
||||
$(BUILD) -lcrypto
|
||||
@ -267,6 +272,9 @@ $(OUTPUT)test-sdt.bin:
|
||||
$(OUTPUT)test-cxx.bin:
|
||||
$(BUILDXX) -std=gnu++11
|
||||
|
||||
$(OUTPUT)test-gettid.bin:
|
||||
$(BUILD)
|
||||
|
||||
$(OUTPUT)test-jvmti.bin:
|
||||
$(BUILD)
|
||||
|
||||
|
@ -38,6 +38,10 @@
|
||||
# include "test-get_current_dir_name.c"
|
||||
#undef main
|
||||
|
||||
#define main main_test_gettid
|
||||
# include "test-gettid.c"
|
||||
#undef main
|
||||
|
||||
#define main main_test_glibc
|
||||
# include "test-glibc.c"
|
||||
#undef main
|
||||
@ -182,7 +186,7 @@
|
||||
# include "test-disassembler-four-args.c"
|
||||
#undef main
|
||||
|
||||
#define main main_test_zstd
|
||||
#define main main_test_libzstd
|
||||
# include "test-libzstd.c"
|
||||
#undef main
|
||||
|
||||
@ -195,6 +199,7 @@ int main(int argc, char *argv[])
|
||||
main_test_libelf();
|
||||
main_test_libelf_mmap();
|
||||
main_test_get_current_dir_name();
|
||||
main_test_gettid();
|
||||
main_test_glibc();
|
||||
main_test_dwarf();
|
||||
main_test_dwarf_getlocations();
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <stdio.h>
|
||||
|
||||
int main(void)
|
||||
|
11
tools/build/feature/test-gettid.c
Normal file
11
tools/build/feature/test-gettid.c
Normal file
@ -0,0 +1,11 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (C) 2019, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
|
||||
#define _GNU_SOURCE
|
||||
#include <unistd.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
return gettid();
|
||||
}
|
||||
|
||||
#undef _GNU_SOURCE
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <stdio.h>
|
||||
|
||||
int main(void)
|
||||
|
7
tools/build/feature/test-libslang-include-subdir.c
Normal file
7
tools/build/feature/test-libslang-include-subdir.c
Normal file
@ -0,0 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <slang/slang.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
return SLsmg_init_smg();
|
||||
}
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#define _GNU_SOURCE
|
||||
#include <sched.h>
|
||||
|
||||
|
75
tools/include/linux/ctype.h
Normal file
75
tools/include/linux/ctype.h
Normal file
@ -0,0 +1,75 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_CTYPE_H
|
||||
#define _LINUX_CTYPE_H
|
||||
|
||||
/*
|
||||
* NOTE! This ctype does not handle EOF like the standard C
|
||||
* library is required to.
|
||||
*/
|
||||
|
||||
#define _U 0x01 /* upper */
|
||||
#define _L 0x02 /* lower */
|
||||
#define _D 0x04 /* digit */
|
||||
#define _C 0x08 /* cntrl */
|
||||
#define _P 0x10 /* punct */
|
||||
#define _S 0x20 /* white space (space/lf/tab) */
|
||||
#define _X 0x40 /* hex digit */
|
||||
#define _SP 0x80 /* hard space (0x20) */
|
||||
|
||||
extern const unsigned char _ctype[];
|
||||
|
||||
#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
|
||||
|
||||
#define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0)
|
||||
#define isalpha(c) ((__ismask(c)&(_U|_L)) != 0)
|
||||
#define iscntrl(c) ((__ismask(c)&(_C)) != 0)
|
||||
static inline int __isdigit(int c)
|
||||
{
|
||||
return '0' <= c && c <= '9';
|
||||
}
|
||||
#define isdigit(c) __isdigit(c)
|
||||
#define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0)
|
||||
#define islower(c) ((__ismask(c)&(_L)) != 0)
|
||||
#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
|
||||
#define ispunct(c) ((__ismask(c)&(_P)) != 0)
|
||||
/* Note: isspace() must return false for %NUL-terminator */
|
||||
#define isspace(c) ((__ismask(c)&(_S)) != 0)
|
||||
#define isupper(c) ((__ismask(c)&(_U)) != 0)
|
||||
#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
|
||||
|
||||
#define isascii(c) (((unsigned char)(c))<=0x7f)
|
||||
#define toascii(c) (((unsigned char)(c))&0x7f)
|
||||
|
||||
static inline unsigned char __tolower(unsigned char c)
|
||||
{
|
||||
if (isupper(c))
|
||||
c -= 'A'-'a';
|
||||
return c;
|
||||
}
|
||||
|
||||
static inline unsigned char __toupper(unsigned char c)
|
||||
{
|
||||
if (islower(c))
|
||||
c -= 'a'-'A';
|
||||
return c;
|
||||
}
|
||||
|
||||
#define tolower(c) __tolower(c)
|
||||
#define toupper(c) __toupper(c)
|
||||
|
||||
/*
|
||||
* Fast implementation of tolower() for internal usage. Do not use in your
|
||||
* code.
|
||||
*/
|
||||
static inline char _tolower(const char c)
|
||||
{
|
||||
return c | 0x20;
|
||||
}
|
||||
|
||||
/* Fast check for octal digit */
|
||||
static inline int isodigit(const char c)
|
||||
{
|
||||
return c >= '0' && c <= '7';
|
||||
}
|
||||
|
||||
#endif
|
@ -102,6 +102,7 @@
|
||||
|
||||
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
|
||||
int scnprintf(char * buf, size_t size, const char * fmt, ...);
|
||||
int scnprintf_pad(char * buf, size_t size, const char * fmt, ...);
|
||||
|
||||
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
|
||||
|
||||
|
@ -7,6 +7,9 @@
|
||||
|
||||
void *memdup(const void *src, size_t len);
|
||||
|
||||
char **argv_split(const char *str, int *argcp);
|
||||
void argv_free(char **argv);
|
||||
|
||||
int strtobool(const char *s, bool *res);
|
||||
|
||||
/*
|
||||
@ -19,6 +22,8 @@ extern size_t strlcpy(char *dest, const char *src, size_t size);
|
||||
|
||||
char *str_error_r(int errnum, char *buf, size_t buflen);
|
||||
|
||||
char *strreplace(char *s, char old, char new);
|
||||
|
||||
/**
|
||||
* strstarts - does @str start with @prefix?
|
||||
* @str: string to examine
|
||||
@ -29,4 +34,8 @@ static inline bool strstarts(const char *str, const char *prefix)
|
||||
return strncmp(str, prefix, strlen(prefix)) == 0;
|
||||
}
|
||||
|
||||
#endif /* _LINUX_STRING_H_ */
|
||||
extern char * __must_check skip_spaces(const char *);
|
||||
|
||||
extern char *strim(char *);
|
||||
|
||||
#endif /* _TOOLS_LINUX_STRING_H_ */
|
||||
|
100
tools/lib/argv_split.c
Normal file
100
tools/lib/argv_split.c
Normal file
@ -0,0 +1,100 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Helper function for splitting a string into an argv-like array.
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
static const char *skip_arg(const char *cp)
|
||||
{
|
||||
while (*cp && !isspace(*cp))
|
||||
cp++;
|
||||
|
||||
return cp;
|
||||
}
|
||||
|
||||
static int count_argc(const char *str)
|
||||
{
|
||||
int count = 0;
|
||||
|
||||
while (*str) {
|
||||
str = skip_spaces(str);
|
||||
if (*str) {
|
||||
count++;
|
||||
str = skip_arg(str);
|
||||
}
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
/**
|
||||
* argv_free - free an argv
|
||||
* @argv - the argument vector to be freed
|
||||
*
|
||||
* Frees an argv and the strings it points to.
|
||||
*/
|
||||
void argv_free(char **argv)
|
||||
{
|
||||
char **p;
|
||||
for (p = argv; *p; p++) {
|
||||
free(*p);
|
||||
*p = NULL;
|
||||
}
|
||||
|
||||
free(argv);
|
||||
}
|
||||
|
||||
/**
|
||||
* argv_split - split a string at whitespace, returning an argv
|
||||
* @str: the string to be split
|
||||
* @argcp: returned argument count
|
||||
*
|
||||
* Returns an array of pointers to strings which are split out from
|
||||
* @str. This is performed by strictly splitting on white-space; no
|
||||
* quote processing is performed. Multiple whitespace characters are
|
||||
* considered to be a single argument separator. The returned array
|
||||
* is always NULL-terminated. Returns NULL on memory allocation
|
||||
* failure.
|
||||
*/
|
||||
char **argv_split(const char *str, int *argcp)
|
||||
{
|
||||
int argc = count_argc(str);
|
||||
char **argv = calloc(argc + 1, sizeof(*argv));
|
||||
char **argvp;
|
||||
|
||||
if (argv == NULL)
|
||||
goto out;
|
||||
|
||||
if (argcp)
|
||||
*argcp = argc;
|
||||
|
||||
argvp = argv;
|
||||
|
||||
while (*str) {
|
||||
str = skip_spaces(str);
|
||||
|
||||
if (*str) {
|
||||
const char *p = str;
|
||||
char *t;
|
||||
|
||||
str = skip_arg(str);
|
||||
|
||||
t = strndup(p, str-p);
|
||||
if (t == NULL)
|
||||
goto fail;
|
||||
*argvp++ = t;
|
||||
}
|
||||
}
|
||||
*argvp = NULL;
|
||||
|
||||
out:
|
||||
return argv;
|
||||
|
||||
fail:
|
||||
argv_free(argv);
|
||||
return NULL;
|
||||
}
|
35
tools/lib/ctype.c
Normal file
35
tools/lib/ctype.c
Normal file
@ -0,0 +1,35 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* linux/lib/ctype.c
|
||||
*
|
||||
* Copyright (C) 1991, 1992 Linus Torvalds
|
||||
*/
|
||||
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
const unsigned char _ctype[] = {
|
||||
_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
|
||||
_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
|
||||
_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
|
||||
_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */
|
||||
_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */
|
||||
_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */
|
||||
_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */
|
||||
_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */
|
||||
_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */
|
||||
_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */
|
||||
_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */
|
||||
_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */
|
||||
_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */
|
||||
_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */
|
||||
_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */
|
||||
_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */
|
||||
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */
|
||||
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */
|
||||
_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */
|
||||
_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */
|
||||
_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */
|
||||
_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
|
||||
_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
|
||||
_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
|
@ -17,6 +17,7 @@
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
/**
|
||||
@ -106,3 +107,57 @@ size_t __weak strlcpy(char *dest, const char *src, size_t size)
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* skip_spaces - Removes leading whitespace from @str.
|
||||
* @str: The string to be stripped.
|
||||
*
|
||||
* Returns a pointer to the first non-whitespace character in @str.
|
||||
*/
|
||||
char *skip_spaces(const char *str)
|
||||
{
|
||||
while (isspace(*str))
|
||||
++str;
|
||||
return (char *)str;
|
||||
}
|
||||
|
||||
/**
|
||||
* strim - Removes leading and trailing whitespace from @s.
|
||||
* @s: The string to be stripped.
|
||||
*
|
||||
* Note that the first trailing whitespace is replaced with a %NUL-terminator
|
||||
* in the given string @s. Returns a pointer to the first non-whitespace
|
||||
* character in @s.
|
||||
*/
|
||||
char *strim(char *s)
|
||||
{
|
||||
size_t size;
|
||||
char *end;
|
||||
|
||||
size = strlen(s);
|
||||
if (!size)
|
||||
return s;
|
||||
|
||||
end = s + size - 1;
|
||||
while (end >= s && isspace(*end))
|
||||
end--;
|
||||
*(end + 1) = '\0';
|
||||
|
||||
return skip_spaces(s);
|
||||
}
|
||||
|
||||
/**
|
||||
* strreplace - Replace all occurrences of character in string.
|
||||
* @s: The string to operate on.
|
||||
* @old: The character being replaced.
|
||||
* @new: The character @old is replaced with.
|
||||
*
|
||||
* Returns pointer to the nul byte at the end of @s.
|
||||
*/
|
||||
char *strreplace(char *s, char old, char new)
|
||||
{
|
||||
for (; *s; ++s)
|
||||
if (*s == old)
|
||||
*s = new;
|
||||
return s;
|
||||
}
|
||||
|
@ -1,5 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <ctype.h>
|
||||
#include "symbol/kallsyms.h"
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
@ -16,6 +15,19 @@ bool kallsyms__is_function(char symbol_type)
|
||||
return symbol_type == 'T' || symbol_type == 'W';
|
||||
}
|
||||
|
||||
/*
|
||||
* While we find nice hex chars, build a long_val.
|
||||
* Return number of chars processed.
|
||||
*/
|
||||
int hex2u64(const char *ptr, u64 *long_val)
|
||||
{
|
||||
char *p;
|
||||
|
||||
*long_val = strtoull(ptr, &p, 16);
|
||||
|
||||
return p - ptr;
|
||||
}
|
||||
|
||||
int kallsyms__parse(const char *filename, void *arg,
|
||||
int (*process_symbol)(void *arg, const char *name,
|
||||
char type, u64 start))
|
||||
|
@ -18,6 +18,8 @@ static inline u8 kallsyms2elf_binding(char type)
|
||||
return isupper(type) ? STB_GLOBAL : STB_LOCAL;
|
||||
}
|
||||
|
||||
int hex2u64(const char *ptr, u64 *long_val);
|
||||
|
||||
u8 kallsyms2elf_type(char type);
|
||||
|
||||
bool kallsyms__is_function(char symbol_type);
|
||||
|
@ -23,3 +23,22 @@ int scnprintf(char * buf, size_t size, const char * fmt, ...)
|
||||
|
||||
return (i >= ssize) ? (ssize - 1) : i;
|
||||
}
|
||||
|
||||
int scnprintf_pad(char * buf, size_t size, const char * fmt, ...)
|
||||
{
|
||||
ssize_t ssize = size;
|
||||
va_list args;
|
||||
int i;
|
||||
|
||||
va_start(args, fmt);
|
||||
i = vscnprintf(buf, size, fmt, args);
|
||||
va_end(args);
|
||||
|
||||
if (i < (int) size) {
|
||||
for (; i < (int) size; i++)
|
||||
buf[i] = ' ';
|
||||
buf[i] = 0x0;
|
||||
}
|
||||
|
||||
return (i >= ssize) ? (ssize - 1) : i;
|
||||
}
|
||||
|
@ -9,6 +9,7 @@ objtool-y += special.o
|
||||
objtool-y += objtool.o
|
||||
|
||||
objtool-y += libstring.o
|
||||
objtool-y += libctype.o
|
||||
objtool-y += str_error_r.o
|
||||
|
||||
CFLAGS += -I$(srctree)/tools/lib
|
||||
@ -17,6 +18,10 @@ $(OUTPUT)libstring.o: ../lib/string.c FORCE
|
||||
$(call rule_mkdir)
|
||||
$(call if_changed_dep,cc_o_c)
|
||||
|
||||
$(OUTPUT)libctype.o: ../lib/ctype.c FORCE
|
||||
$(call rule_mkdir)
|
||||
$(call if_changed_dep,cc_o_c)
|
||||
|
||||
$(OUTPUT)str_error_r.o: ../lib/str_error_r.c FORCE
|
||||
$(call rule_mkdir)
|
||||
$(call if_changed_dep,cc_o_c)
|
||||
|
41
tools/perf/Documentation/db-export.txt
Normal file
41
tools/perf/Documentation/db-export.txt
Normal file
@ -0,0 +1,41 @@
|
||||
Database Export
|
||||
===============
|
||||
|
||||
perf tool's python scripting engine:
|
||||
|
||||
tools/perf/util/scripting-engines/trace-event-python.c
|
||||
|
||||
supports scripts:
|
||||
|
||||
tools/perf/scripts/python/export-to-sqlite.py
|
||||
tools/perf/scripts/python/export-to-postgresql.py
|
||||
|
||||
which export data to a SQLite3 or PostgreSQL database.
|
||||
|
||||
The export process provides records with unique sequential ids which allows the
|
||||
data to be imported directly to a database and provides the relationships
|
||||
between tables.
|
||||
|
||||
Over time it is possible to continue to expand the export while maintaining
|
||||
backward and forward compatibility, by following some simple rules:
|
||||
|
||||
1. Because of the nature of SQL, existing tables and columns can continue to be
|
||||
used so long as the names and meanings (and to some extent data types) remain
|
||||
the same.
|
||||
|
||||
2. New tables and columns can be added, without affecting existing SQL queries,
|
||||
so long as the new names are unique.
|
||||
|
||||
3. Scripts that use a database (e.g. exported-sql-viewer.py) can maintain
|
||||
backward compatibility by testing for the presence of new tables and columns
|
||||
before using them. e.g. function IsSelectable() in exported-sql-viewer.py
|
||||
|
||||
4. The export scripts themselves maintain forward compatibility (i.e. an existing
|
||||
script will continue to work with new versions of perf) by accepting a variable
|
||||
number of arguments (e.g. def call_return_table(*x)) i.e. perf can pass more
|
||||
arguments which old scripts will ignore.
|
||||
|
||||
5. The scripting engine tests for the existence of script handler functions
|
||||
before calling them. The scripting engine can also test for the support of new
|
||||
or optional features by checking for the existence and value of script global
|
||||
variables.
|
@ -88,21 +88,51 @@ smaller.
|
||||
|
||||
To represent software control flow, "branches" samples are produced. By default
|
||||
a branch sample is synthesized for every single branch. To get an idea what
|
||||
data is available you can use the 'perf script' tool with no parameters, which
|
||||
will list all the samples.
|
||||
data is available you can use the 'perf script' tool with all itrace sampling
|
||||
options, which will list all the samples.
|
||||
|
||||
perf record -e intel_pt//u ls
|
||||
perf script
|
||||
perf script --itrace=ibxwpe
|
||||
|
||||
An interesting field that is not printed by default is 'flags' which can be
|
||||
displayed as follows:
|
||||
|
||||
perf script -Fcomm,tid,pid,time,cpu,event,trace,ip,sym,dso,addr,symoff,flags
|
||||
perf script --itrace=ibxwpe -F+flags
|
||||
|
||||
The flags are "bcrosyiABEx" which stand for branch, call, return, conditional,
|
||||
system, asynchronous, interrupt, transaction abort, trace begin, trace end, and
|
||||
in transaction, respectively.
|
||||
|
||||
Another interesting field that is not printed by default is 'ipc' which can be
|
||||
displayed as follows:
|
||||
|
||||
perf script --itrace=be -F+ipc
|
||||
|
||||
There are two ways that instructions-per-cycle (IPC) can be calculated depending
|
||||
on the recording.
|
||||
|
||||
If the 'cyc' config term (see config terms section below) was used, then IPC is
|
||||
calculated using the cycle count from CYC packets, otherwise MTC packets are
|
||||
used - refer to the 'mtc' config term. When MTC is used, however, the values
|
||||
are less accurate because the timing is less accurate.
|
||||
|
||||
Because Intel PT does not update the cycle count on every branch or instruction,
|
||||
the values will often be zero. When there are values, they will be the number
|
||||
of instructions and number of cycles since the last update, and thus represent
|
||||
the average IPC since the last IPC for that event type. Note IPC for "branches"
|
||||
events is calculated separately from IPC for "instructions" events.
|
||||
|
||||
Also note that the IPC instruction count may or may not include the current
|
||||
instruction. If the cycle count is associated with an asynchronous branch
|
||||
(e.g. page fault or interrupt), then the instruction count does not include the
|
||||
current instruction, otherwise it does. That is consistent with whether or not
|
||||
that instruction has retired when the cycle count is updated.
|
||||
|
||||
Another note, in the case of "branches" events, non-taken branches are not
|
||||
presently sampled, so IPC values for them do not appear e.g. a CYC packet with a
|
||||
TNT packet that starts with a non-taken branch. To see every possible IPC
|
||||
value, "instructions" events can be used e.g. --itrace=i0ns
|
||||
|
||||
While it is possible to create scripts to analyze the data, an alternative
|
||||
approach is available to export the data to a sqlite or postgresql database.
|
||||
Refer to script export-to-sqlite.py or export-to-postgresql.py for more details,
|
||||
@ -713,7 +743,7 @@ Having no option is the same as
|
||||
|
||||
which, in turn, is the same as
|
||||
|
||||
--itrace=ibxwpe
|
||||
--itrace=cepwx
|
||||
|
||||
The letters are:
|
||||
|
||||
|
@ -564,9 +564,12 @@ llvm.*::
|
||||
llvm.clang-bpf-cmd-template::
|
||||
Cmdline template. Below lines show its default value. Environment
|
||||
variable is used to pass options.
|
||||
"$CLANG_EXEC -D__KERNEL__ $CLANG_OPTIONS $KERNEL_INC_OPTIONS \
|
||||
-Wno-unused-value -Wno-pointer-sign -working-directory \
|
||||
$WORKING_DIR -c $CLANG_SOURCE -target bpf -O2 -o -"
|
||||
"$CLANG_EXEC -D__KERNEL__ -D__NR_CPUS__=$NR_CPUS "\
|
||||
"-DLINUX_VERSION_CODE=$LINUX_VERSION_CODE " \
|
||||
"$CLANG_OPTIONS $PERF_BPF_INC_OPTIONS $KERNEL_INC_OPTIONS " \
|
||||
"-Wno-unused-value -Wno-pointer-sign " \
|
||||
"-working-directory $WORKING_DIR " \
|
||||
"-c \"$CLANG_SOURCE\" -target bpf $CLANG_EMIT_LLVM -O2 -o - $LLVM_OPTIONS_PIPE"
|
||||
|
||||
llvm.clang-opt::
|
||||
Options passed to clang.
|
||||
|
@ -90,9 +90,10 @@ OPTIONS
|
||||
|
||||
-c::
|
||||
--compute::
|
||||
Differential computation selection - delta, ratio, wdiff, delta-abs
|
||||
(default is delta-abs). Default can be changed using diff.compute
|
||||
config option. See COMPARISON METHODS section for more info.
|
||||
Differential computation selection - delta, ratio, wdiff, cycles,
|
||||
delta-abs (default is delta-abs). Default can be changed using
|
||||
diff.compute config option. See COMPARISON METHODS section for
|
||||
more info.
|
||||
|
||||
-p::
|
||||
--period::
|
||||
@ -142,12 +143,14 @@ OPTIONS
|
||||
perf diff --time 0%-10%,30%-40%
|
||||
|
||||
It also supports analyzing samples within a given time window
|
||||
<start>,<stop>. Times have the format seconds.microseconds. If 'start'
|
||||
is not given (i.e., time string is ',x.y') then analysis starts at
|
||||
the beginning of the file. If stop time is not given (i.e, time
|
||||
string is 'x.y,') then analysis goes to the end of the file. Time string is
|
||||
'a1.b1,c1.d1:a2.b2,c2.d2'. Use ':' to separate timestamps for different
|
||||
perf.data files.
|
||||
<start>,<stop>. Times have the format seconds.nanoseconds. If 'start'
|
||||
is not given (i.e. time string is ',x.y') then analysis starts at
|
||||
the beginning of the file. If stop time is not given (i.e. time
|
||||
string is 'x.y,') then analysis goes to the end of the file.
|
||||
Multiple ranges can be separated by spaces, which requires the argument
|
||||
to be quoted e.g. --time "1234.567,1234.789 1235,"
|
||||
Time string is'a1.b1,c1.d1:a2.b2,c2.d2'. Use ':' to separate timestamps
|
||||
for different perf.data files.
|
||||
|
||||
For example, we get the timestamp information from 'perf script'.
|
||||
|
||||
@ -278,6 +281,16 @@ If specified the 'Weighted diff' column is displayed with value 'd' computed as:
|
||||
- WEIGHT-A being the weight of the data file
|
||||
- WEIGHT-B being the weight of the baseline data file
|
||||
|
||||
cycles
|
||||
~~~~~~
|
||||
If specified the '[Program Block Range] Cycles Diff' column is displayed.
|
||||
It displays the cycles difference of same program basic block amongst
|
||||
two perf.data. The program basic block is the code between two branches.
|
||||
|
||||
'[Program Block Range]' indicates the range of a program basic block.
|
||||
Source line is reported if it can be found otherwise uses symbol+offset
|
||||
instead.
|
||||
|
||||
SEE ALSO
|
||||
--------
|
||||
linkperf:perf-record[1], linkperf:perf-report[1]
|
||||
|
@ -490,6 +490,17 @@ Configure all used events to run in kernel space.
|
||||
--all-user::
|
||||
Configure all used events to run in user space.
|
||||
|
||||
--kernel-callchains::
|
||||
Collect callchains only from kernel space. I.e. this option sets
|
||||
perf_event_attr.exclude_callchain_user to 1.
|
||||
|
||||
--user-callchains::
|
||||
Collect callchains only from user space. I.e. this option sets
|
||||
perf_event_attr.exclude_callchain_kernel to 1.
|
||||
|
||||
Don't use both --kernel-callchains and --user-callchains at the same time or no
|
||||
callchains will be collected.
|
||||
|
||||
--timestamp-filename
|
||||
Append timestamp to output file name.
|
||||
|
||||
|
@ -89,7 +89,7 @@ OPTIONS
|
||||
- socket: processor socket number the task ran at the time of sample
|
||||
- srcline: filename and line number executed at the time of sample. The
|
||||
DWARF debugging info must be provided.
|
||||
- srcfile: file name of the source file of the same. Requires dwarf
|
||||
- srcfile: file name of the source file of the samples. Requires dwarf
|
||||
information.
|
||||
- weight: Event specific weight, e.g. memory latency or transaction
|
||||
abort cost. This is the global weight.
|
||||
@ -412,12 +412,13 @@ OPTIONS
|
||||
|
||||
--time::
|
||||
Only analyze samples within given time window: <start>,<stop>. Times
|
||||
have the format seconds.microseconds. If start is not given (i.e., time
|
||||
have the format seconds.nanoseconds. If start is not given (i.e. time
|
||||
string is ',x.y') then analysis starts at the beginning of the file. If
|
||||
stop time is not given (i.e, time string is 'x.y,') then analysis goes
|
||||
to end of file.
|
||||
stop time is not given (i.e. time string is 'x.y,') then analysis goes
|
||||
to end of file. Multiple ranges can be separated by spaces, which
|
||||
requires the argument to be quoted e.g. --time "1234.567,1234.789 1235,"
|
||||
|
||||
Also support time percent with multiple time range. Time string is
|
||||
Also support time percent with multiple time ranges. Time string is
|
||||
'a%/n,b%/m,...' or 'a%-b%,c%-%d,...'.
|
||||
|
||||
For example:
|
||||
|
@ -117,7 +117,7 @@ OPTIONS
|
||||
Comma separated list of fields to print. Options are:
|
||||
comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
|
||||
srcline, period, iregs, uregs, brstack, brstacksym, flags, bpf-output, brstackinsn,
|
||||
brstackoff, callindent, insn, insnlen, synth, phys_addr, metric, misc, srccode.
|
||||
brstackoff, callindent, insn, insnlen, synth, phys_addr, metric, misc, srccode, ipc.
|
||||
Field list can be prepended with the type, trace, sw or hw,
|
||||
to indicate to which event type the field list applies.
|
||||
e.g., -F sw:comm,tid,time,ip,sym and -F trace:time,cpu,trace
|
||||
@ -203,6 +203,9 @@ OPTIONS
|
||||
The synth field is used by synthesized events which may be created when
|
||||
Instruction Trace decoding.
|
||||
|
||||
The ipc (instructions per cycle) field is synthesized and may have a value when
|
||||
Instruction Trace decoding.
|
||||
|
||||
Finally, a user may not set fields to none for all event types.
|
||||
i.e., -F "" is not allowed.
|
||||
|
||||
@ -313,6 +316,9 @@ OPTIONS
|
||||
--show-round-events
|
||||
Display finished round events i.e. events of type PERF_RECORD_FINISHED_ROUND.
|
||||
|
||||
--show-bpf-events
|
||||
Display bpf events i.e. events of type PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT.
|
||||
|
||||
--demangle::
|
||||
Demangle symbol names to human readable form. It's enabled by default,
|
||||
disable with --no-demangle.
|
||||
@ -355,12 +361,13 @@ include::itrace.txt[]
|
||||
|
||||
--time::
|
||||
Only analyze samples within given time window: <start>,<stop>. Times
|
||||
have the format seconds.microseconds. If start is not given (i.e., time
|
||||
have the format seconds.nanoseconds. If start is not given (i.e. time
|
||||
string is ',x.y') then analysis starts at the beginning of the file. If
|
||||
stop time is not given (i.e, time string is 'x.y,') then analysis goes
|
||||
to end of file.
|
||||
stop time is not given (i.e. time string is 'x.y,') then analysis goes
|
||||
to end of file. Multiple ranges can be separated by spaces, which
|
||||
requires the argument to be quoted e.g. --time "1234.567,1234.789 1235,"
|
||||
|
||||
Also support time percent with multipe time range. Time string is
|
||||
Also support time percent with multiple time ranges. Time string is
|
||||
'a%/n,b%/m,...' or 'a%-b%,c%-%d,...'.
|
||||
|
||||
For example:
|
||||
|
@ -200,6 +200,13 @@ use --per-socket in addition to -a. (system-wide). The output includes the
|
||||
socket number and the number of online processors on that socket. This is
|
||||
useful to gauge the amount of aggregation.
|
||||
|
||||
--per-die::
|
||||
Aggregate counts per processor die for system-wide mode measurements. This
|
||||
is a useful mode to detect imbalance between dies. To enable this mode,
|
||||
use --per-die in addition to -a. (system-wide). The output includes the
|
||||
die number and the number of online processors on that die. This is
|
||||
useful to gauge the amount of aggregation.
|
||||
|
||||
--per-core::
|
||||
Aggregate counts per physical processor for system-wide mode measurements. This
|
||||
is a useful mode to detect imbalance between physical cores. To enable this mode,
|
||||
@ -239,6 +246,9 @@ Input file name.
|
||||
--per-socket::
|
||||
Aggregate counts per processor socket for system-wide mode measurements.
|
||||
|
||||
--per-die::
|
||||
Aggregate counts per processor die for system-wide mode measurements.
|
||||
|
||||
--per-core::
|
||||
Aggregate counts per physical processor for system-wide mode measurements.
|
||||
|
||||
|
@ -262,6 +262,11 @@ Default is to monitor all CPUS.
|
||||
The number of threads to run when synthesizing events for existing processes.
|
||||
By default, the number of threads equals to the number of online CPUs.
|
||||
|
||||
--namespaces::
|
||||
Record events of type PERF_RECORD_NAMESPACES and display it with the
|
||||
'cgroup_id' sort key.
|
||||
|
||||
|
||||
INTERACTIVE PROMPTING KEYS
|
||||
--------------------------
|
||||
|
||||
|
@ -151,25 +151,45 @@ struct {
|
||||
|
||||
HEADER_CPU_TOPOLOGY = 13,
|
||||
|
||||
String lists defining the core and CPU threads topology.
|
||||
The string lists are followed by a variable length array
|
||||
which contains core_id and socket_id of each cpu.
|
||||
The number of entries can be determined by the size of the
|
||||
section minus the sizes of both string lists.
|
||||
|
||||
struct {
|
||||
/*
|
||||
* First revision of HEADER_CPU_TOPOLOGY
|
||||
*
|
||||
* See 'struct perf_header_string_list' definition earlier
|
||||
* in this file.
|
||||
*/
|
||||
|
||||
struct perf_header_string_list cores; /* Variable length */
|
||||
struct perf_header_string_list threads; /* Variable length */
|
||||
|
||||
/*
|
||||
* Second revision of HEADER_CPU_TOPOLOGY, older tools
|
||||
* will not consider what comes next
|
||||
*/
|
||||
|
||||
struct {
|
||||
uint32_t core_id;
|
||||
uint32_t socket_id;
|
||||
} cpus[nr]; /* Variable length records */
|
||||
/* 'nr' comes from previously processed HEADER_NRCPUS's nr_cpu_avail */
|
||||
|
||||
/*
|
||||
* Third revision of HEADER_CPU_TOPOLOGY, older tools
|
||||
* will not consider what comes next
|
||||
*/
|
||||
|
||||
struct perf_header_string_list dies; /* Variable length */
|
||||
uint32_t die_id[nr_cpus_avail]; /* from previously processed HEADER_NR_CPUS, VLA */
|
||||
};
|
||||
|
||||
Example:
|
||||
sibling cores : 0-3
|
||||
sibling sockets : 0-8
|
||||
sibling dies : 0-3
|
||||
sibling dies : 4-7
|
||||
sibling threads : 0-1
|
||||
sibling threads : 2-3
|
||||
sibling threads : 4-5
|
||||
sibling threads : 6-7
|
||||
|
||||
HEADER_NUMA_TOPOLOGY = 14,
|
||||
|
||||
@ -272,6 +292,69 @@ struct {
|
||||
|
||||
Two uint64_t for the time of first sample and the time of last sample.
|
||||
|
||||
HEADER_SAMPLE_TOPOLOGY = 22,
|
||||
|
||||
Physical memory map and its node assignments.
|
||||
|
||||
The format of data in MEM_TOPOLOGY is as follows:
|
||||
|
||||
0 - version | for future changes
|
||||
8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes
|
||||
16 - count | number of nodes
|
||||
|
||||
For each node we store map of physical indexes:
|
||||
|
||||
32 - node id | node index
|
||||
40 - size | size of bitmap
|
||||
48 - bitmap | bitmap of memory indexes that belongs to node
|
||||
| /sys/devices/system/node/node<NODE>/memory<INDEX>
|
||||
|
||||
The MEM_TOPOLOGY can be displayed with following command:
|
||||
|
||||
$ perf report --header-only -I
|
||||
...
|
||||
# memory nodes (nr 1, block size 0x8000000):
|
||||
# 0 [7G]: 0-23,32-69
|
||||
|
||||
HEADER_CLOCKID = 23,
|
||||
|
||||
One uint64_t for the clockid frequency, specified, for instance, via 'perf
|
||||
record -k' (see clock_gettime()), to enable timestamps derived metrics
|
||||
conversion into wall clock time on the reporting stage.
|
||||
|
||||
HEADER_DIR_FORMAT = 24,
|
||||
|
||||
The data files layout is described by HEADER_DIR_FORMAT feature. Currently it
|
||||
holds only version number (1):
|
||||
|
||||
uint64_t version;
|
||||
|
||||
The current version holds only version value (1) means that data files:
|
||||
|
||||
- Follow the 'data.*' name format.
|
||||
|
||||
- Contain raw events data in standard perf format as read from kernel (and need
|
||||
to be sorted)
|
||||
|
||||
Future versions are expected to describe different data files layout according
|
||||
to special needs.
|
||||
|
||||
HEADER_BPF_PROG_INFO = 25,
|
||||
|
||||
struct bpf_prog_info_linear, which contains detailed information about
|
||||
a BPF program, including type, id, tag, jited/xlated instructions, etc.
|
||||
|
||||
HEADER_BPF_BTF = 26,
|
||||
|
||||
Contains BPF Type Format (BTF). For more information about BTF, please
|
||||
refer to Documentation/bpf/btf.rst.
|
||||
|
||||
struct {
|
||||
u32 id;
|
||||
u32 data_size;
|
||||
char data[];
|
||||
};
|
||||
|
||||
HEADER_COMPRESSED = 27,
|
||||
|
||||
struct {
|
||||
|
@ -38,6 +38,6 @@ To report cacheline events from previous recording: perf c2c report
|
||||
To browse sample contexts use perf report --sample 10 and select in context menu
|
||||
To separate samples by time use perf report --sort time,overhead,sym
|
||||
To set sample time separation other than 100ms with --sort time use --time-quantum
|
||||
Add -I to perf report to sample register values visible in perf report context.
|
||||
Add -I to perf record to sample register values, which will be visible in perf report sample context.
|
||||
To show IPC for sampling periods use perf record -e '{cycles,instructions}:S' and then browse context
|
||||
To show context switches in perf report sample context add --switch-events to perf record.
|
||||
|
@ -7,6 +7,8 @@ tools/lib/traceevent
|
||||
tools/lib/api
|
||||
tools/lib/bpf
|
||||
tools/lib/subcmd
|
||||
tools/lib/argv_split.c
|
||||
tools/lib/ctype.c
|
||||
tools/lib/hweight.c
|
||||
tools/lib/rbtree.c
|
||||
tools/lib/string.c
|
||||
|
@ -332,6 +332,10 @@ ifeq ($(feature-get_current_dir_name), 1)
|
||||
CFLAGS += -DHAVE_GET_CURRENT_DIR_NAME
|
||||
endif
|
||||
|
||||
ifeq ($(feature-gettid), 1)
|
||||
CFLAGS += -DHAVE_GETTID
|
||||
endif
|
||||
|
||||
ifdef NO_LIBELF
|
||||
NO_DWARF := 1
|
||||
NO_DEMANGLE := 1
|
||||
@ -413,6 +417,9 @@ ifdef CORESIGHT
|
||||
$(call feature_check,libopencsd)
|
||||
ifeq ($(feature-libopencsd), 1)
|
||||
CFLAGS += -DHAVE_CSTRACE_SUPPORT $(LIBOPENCSD_CFLAGS)
|
||||
ifeq ($(feature-reallocarray), 0)
|
||||
CFLAGS += -DCOMPAT_NEED_REALLOCARRAY
|
||||
endif
|
||||
LDFLAGS += $(LIBOPENCSD_LDFLAGS)
|
||||
EXTLIBS += $(OPENCSDLIBS)
|
||||
$(call detected,CONFIG_LIBOPENCSD)
|
||||
@ -637,11 +644,15 @@ endif
|
||||
|
||||
ifndef NO_SLANG
|
||||
ifneq ($(feature-libslang), 1)
|
||||
msg := $(warning slang not found, disables TUI support. Please install slang-devel, libslang-dev or libslang2-dev);
|
||||
NO_SLANG := 1
|
||||
else
|
||||
ifneq ($(feature-libslang-include-subdir), 1)
|
||||
msg := $(warning slang not found, disables TUI support. Please install slang-devel, libslang-dev or libslang2-dev);
|
||||
NO_SLANG := 1
|
||||
else
|
||||
CFLAGS += -DHAVE_SLANG_INCLUDE_SUBDIR
|
||||
endif
|
||||
endif
|
||||
ifndef NO_SLANG
|
||||
# Fedora has /usr/include/slang/slang.h, but ubuntu /usr/include/slang.h
|
||||
CFLAGS += -I/usr/include/slang
|
||||
CFLAGS += -DHAVE_SLANG_SUPPORT
|
||||
EXTLIBS += -lslang
|
||||
$(call detected,CONFIG_SLANG)
|
||||
|
@ -420,6 +420,24 @@ fadvise_advice_tbl := $(srctree)/tools/perf/trace/beauty/fadvise.sh
|
||||
$(fadvise_advice_array): $(linux_uapi_dir)/in.h $(fadvise_advice_tbl)
|
||||
$(Q)$(SHELL) '$(fadvise_advice_tbl)' $(linux_uapi_dir) > $@
|
||||
|
||||
fsmount_arrays := $(beauty_outdir)/fsmount_arrays.c
|
||||
fsmount_tbls := $(srctree)/tools/perf/trace/beauty/fsmount.sh
|
||||
|
||||
$(fsmount_arrays): $(linux_uapi_dir)/fs.h $(fsmount_tbls)
|
||||
$(Q)$(SHELL) '$(fsmount_tbls)' $(linux_uapi_dir) > $@
|
||||
|
||||
fspick_arrays := $(beauty_outdir)/fspick_arrays.c
|
||||
fspick_tbls := $(srctree)/tools/perf/trace/beauty/fspick.sh
|
||||
|
||||
$(fspick_arrays): $(linux_uapi_dir)/fs.h $(fspick_tbls)
|
||||
$(Q)$(SHELL) '$(fspick_tbls)' $(linux_uapi_dir) > $@
|
||||
|
||||
fsconfig_arrays := $(beauty_outdir)/fsconfig_arrays.c
|
||||
fsconfig_tbls := $(srctree)/tools/perf/trace/beauty/fsconfig.sh
|
||||
|
||||
$(fsconfig_arrays): $(linux_uapi_dir)/fs.h $(fsconfig_tbls)
|
||||
$(Q)$(SHELL) '$(fsconfig_tbls)' $(linux_uapi_dir) > $@
|
||||
|
||||
pkey_alloc_access_rights_array := $(beauty_outdir)/pkey_alloc_access_rights_array.c
|
||||
asm_generic_hdr_dir := $(srctree)/tools/include/uapi/asm-generic/
|
||||
pkey_alloc_access_rights_tbl := $(srctree)/tools/perf/trace/beauty/pkey_alloc_access_rights.sh
|
||||
@ -494,6 +512,12 @@ mount_flags_tbl := $(srctree)/tools/perf/trace/beauty/mount_flags.sh
|
||||
$(mount_flags_array): $(linux_uapi_dir)/fs.h $(mount_flags_tbl)
|
||||
$(Q)$(SHELL) '$(mount_flags_tbl)' $(linux_uapi_dir) > $@
|
||||
|
||||
move_mount_flags_array := $(beauty_outdir)/move_mount_flags_array.c
|
||||
move_mount_flags_tbl := $(srctree)/tools/perf/trace/beauty/move_mount_flags.sh
|
||||
|
||||
$(move_mount_flags_array): $(linux_uapi_dir)/fs.h $(move_mount_flags_tbl)
|
||||
$(Q)$(SHELL) '$(move_mount_flags_tbl)' $(linux_uapi_dir) > $@
|
||||
|
||||
prctl_option_array := $(beauty_outdir)/prctl_option_array.c
|
||||
prctl_hdr_dir := $(srctree)/tools/include/uapi/linux/
|
||||
prctl_option_tbl := $(srctree)/tools/perf/trace/beauty/prctl_option.sh
|
||||
@ -526,6 +550,12 @@ arch_errno_tbl := $(srctree)/tools/perf/trace/beauty/arch_errno_names.sh
|
||||
$(arch_errno_name_array): $(arch_errno_tbl)
|
||||
$(Q)$(SHELL) '$(arch_errno_tbl)' $(CC) $(arch_errno_hdr_dir) > $@
|
||||
|
||||
sync_file_range_arrays := $(beauty_outdir)/sync_file_range_arrays.c
|
||||
sync_file_range_tbls := $(srctree)/tools/perf/trace/beauty/sync_file_range.sh
|
||||
|
||||
$(sync_file_range_arrays): $(linux_uapi_dir)/fs.h $(sync_file_range_tbls)
|
||||
$(Q)$(SHELL) '$(sync_file_range_tbls)' $(linux_uapi_dir) > $@
|
||||
|
||||
all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS)
|
||||
|
||||
# Create python binding output directory if not already present
|
||||
@ -629,6 +659,9 @@ build-dir = $(if $(__build-dir),$(__build-dir),.)
|
||||
|
||||
prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioctl_array) \
|
||||
$(fadvise_advice_array) \
|
||||
$(fsconfig_arrays) \
|
||||
$(fsmount_arrays) \
|
||||
$(fspick_arrays) \
|
||||
$(pkey_alloc_access_rights_array) \
|
||||
$(sndrv_pcm_ioctl_array) \
|
||||
$(sndrv_ctl_ioctl_array) \
|
||||
@ -639,12 +672,14 @@ prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioc
|
||||
$(madvise_behavior_array) \
|
||||
$(mmap_flags_array) \
|
||||
$(mount_flags_array) \
|
||||
$(move_mount_flags_array) \
|
||||
$(perf_ioctl_array) \
|
||||
$(prctl_option_array) \
|
||||
$(usbdevfs_ioctl_array) \
|
||||
$(x86_arch_prctl_code_array) \
|
||||
$(rename_flags_array) \
|
||||
$(arch_errno_name_array)
|
||||
$(arch_errno_name_array) \
|
||||
$(sync_file_range_arrays)
|
||||
|
||||
$(OUTPUT)%.o: %.c prepare FORCE
|
||||
$(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
|
||||
@ -923,9 +958,13 @@ clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clea
|
||||
$(OUTPUT)tests/llvm-src-{base,kbuild,prologue,relocation}.c \
|
||||
$(OUTPUT)pmu-events/pmu-events.c \
|
||||
$(OUTPUT)$(fadvise_advice_array) \
|
||||
$(OUTPUT)$(fsconfig_arrays) \
|
||||
$(OUTPUT)$(fsmount_arrays) \
|
||||
$(OUTPUT)$(fspick_arrays) \
|
||||
$(OUTPUT)$(madvise_behavior_array) \
|
||||
$(OUTPUT)$(mmap_flags_array) \
|
||||
$(OUTPUT)$(mount_flags_array) \
|
||||
$(OUTPUT)$(move_mount_flags_array) \
|
||||
$(OUTPUT)$(drm_ioctl_array) \
|
||||
$(OUTPUT)$(pkey_alloc_access_rights_array) \
|
||||
$(OUTPUT)$(sndrv_ctl_ioctl_array) \
|
||||
@ -939,7 +978,8 @@ clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clea
|
||||
$(OUTPUT)$(usbdevfs_ioctl_array) \
|
||||
$(OUTPUT)$(x86_arch_prctl_code_array) \
|
||||
$(OUTPUT)$(rename_flags_array) \
|
||||
$(OUTPUT)$(arch_errno_name_array)
|
||||
$(OUTPUT)$(arch_errno_name_array) \
|
||||
$(OUTPUT)$(sync_file_range_arrays)
|
||||
$(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean
|
||||
|
||||
#
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include "../../util/pmu.h"
|
||||
#include "../../util/thread_map.h"
|
||||
#include "../../util/cs-etm.h"
|
||||
#include "../../util/util.h"
|
||||
|
||||
#include <errno.h>
|
||||
#include <stdlib.h>
|
||||
@ -31,12 +32,158 @@ struct cs_etm_recording {
|
||||
struct auxtrace_record itr;
|
||||
struct perf_pmu *cs_etm_pmu;
|
||||
struct perf_evlist *evlist;
|
||||
int wrapped_cnt;
|
||||
bool *wrapped;
|
||||
bool snapshot_mode;
|
||||
size_t snapshot_size;
|
||||
};
|
||||
|
||||
static const char *metadata_etmv3_ro[CS_ETM_PRIV_MAX] = {
|
||||
[CS_ETM_ETMCCER] = "mgmt/etmccer",
|
||||
[CS_ETM_ETMIDR] = "mgmt/etmidr",
|
||||
};
|
||||
|
||||
static const char *metadata_etmv4_ro[CS_ETMV4_PRIV_MAX] = {
|
||||
[CS_ETMV4_TRCIDR0] = "trcidr/trcidr0",
|
||||
[CS_ETMV4_TRCIDR1] = "trcidr/trcidr1",
|
||||
[CS_ETMV4_TRCIDR2] = "trcidr/trcidr2",
|
||||
[CS_ETMV4_TRCIDR8] = "trcidr/trcidr8",
|
||||
[CS_ETMV4_TRCAUTHSTATUS] = "mgmt/trcauthstatus",
|
||||
};
|
||||
|
||||
static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu);
|
||||
|
||||
static int cs_etm_set_context_id(struct auxtrace_record *itr,
|
||||
struct perf_evsel *evsel, int cpu)
|
||||
{
|
||||
struct cs_etm_recording *ptr;
|
||||
struct perf_pmu *cs_etm_pmu;
|
||||
char path[PATH_MAX];
|
||||
int err = -EINVAL;
|
||||
u32 val;
|
||||
|
||||
ptr = container_of(itr, struct cs_etm_recording, itr);
|
||||
cs_etm_pmu = ptr->cs_etm_pmu;
|
||||
|
||||
if (!cs_etm_is_etmv4(itr, cpu))
|
||||
goto out;
|
||||
|
||||
/* Get a handle on TRCIRD2 */
|
||||
snprintf(path, PATH_MAX, "cpu%d/%s",
|
||||
cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR2]);
|
||||
err = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
|
||||
|
||||
/* There was a problem reading the file, bailing out */
|
||||
if (err != 1) {
|
||||
pr_err("%s: can't read file %s\n",
|
||||
CORESIGHT_ETM_PMU_NAME, path);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* TRCIDR2.CIDSIZE, bit [9-5], indicates whether contextID tracing
|
||||
* is supported:
|
||||
* 0b00000 Context ID tracing is not supported.
|
||||
* 0b00100 Maximum of 32-bit Context ID size.
|
||||
* All other values are reserved.
|
||||
*/
|
||||
val = BMVAL(val, 5, 9);
|
||||
if (!val || val != 0x4) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* All good, let the kernel know */
|
||||
evsel->attr.config |= (1 << ETM_OPT_CTXTID);
|
||||
err = 0;
|
||||
|
||||
out:
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int cs_etm_set_timestamp(struct auxtrace_record *itr,
|
||||
struct perf_evsel *evsel, int cpu)
|
||||
{
|
||||
struct cs_etm_recording *ptr;
|
||||
struct perf_pmu *cs_etm_pmu;
|
||||
char path[PATH_MAX];
|
||||
int err = -EINVAL;
|
||||
u32 val;
|
||||
|
||||
ptr = container_of(itr, struct cs_etm_recording, itr);
|
||||
cs_etm_pmu = ptr->cs_etm_pmu;
|
||||
|
||||
if (!cs_etm_is_etmv4(itr, cpu))
|
||||
goto out;
|
||||
|
||||
/* Get a handle on TRCIRD0 */
|
||||
snprintf(path, PATH_MAX, "cpu%d/%s",
|
||||
cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
|
||||
err = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
|
||||
|
||||
/* There was a problem reading the file, bailing out */
|
||||
if (err != 1) {
|
||||
pr_err("%s: can't read file %s\n",
|
||||
CORESIGHT_ETM_PMU_NAME, path);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* TRCIDR0.TSSIZE, bit [28-24], indicates whether global timestamping
|
||||
* is supported:
|
||||
* 0b00000 Global timestamping is not implemented
|
||||
* 0b00110 Implementation supports a maximum timestamp of 48bits.
|
||||
* 0b01000 Implementation supports a maximum timestamp of 64bits.
|
||||
*/
|
||||
val &= GENMASK(28, 24);
|
||||
if (!val) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* All good, let the kernel know */
|
||||
evsel->attr.config |= (1 << ETM_OPT_TS);
|
||||
err = 0;
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int cs_etm_set_option(struct auxtrace_record *itr,
|
||||
struct perf_evsel *evsel, u32 option)
|
||||
{
|
||||
int i, err = -EINVAL;
|
||||
struct cpu_map *event_cpus = evsel->evlist->cpus;
|
||||
struct cpu_map *online_cpus = cpu_map__new(NULL);
|
||||
|
||||
/* Set option of each CPU we have */
|
||||
for (i = 0; i < cpu__max_cpu(); i++) {
|
||||
if (!cpu_map__has(event_cpus, i) ||
|
||||
!cpu_map__has(online_cpus, i))
|
||||
continue;
|
||||
|
||||
if (option & ETM_OPT_CTXTID) {
|
||||
err = cs_etm_set_context_id(itr, evsel, i);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
if (option & ETM_OPT_TS) {
|
||||
err = cs_etm_set_timestamp(itr, evsel, i);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
if (option & ~(ETM_OPT_CTXTID | ETM_OPT_TS))
|
||||
/* Nothing else is currently supported */
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = 0;
|
||||
out:
|
||||
cpu_map__put(online_cpus);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int cs_etm_parse_snapshot_options(struct auxtrace_record *itr,
|
||||
struct record_opts *opts,
|
||||
const char *str)
|
||||
@ -105,12 +252,16 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
|
||||
container_of(itr, struct cs_etm_recording, itr);
|
||||
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
|
||||
struct perf_evsel *evsel, *cs_etm_evsel = NULL;
|
||||
const struct cpu_map *cpus = evlist->cpus;
|
||||
struct cpu_map *cpus = evlist->cpus;
|
||||
bool privileged = (geteuid() == 0 || perf_event_paranoid() < 0);
|
||||
int err = 0;
|
||||
|
||||
ptr->evlist = evlist;
|
||||
ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
|
||||
|
||||
if (perf_can_record_switch_events())
|
||||
opts->record_switch_events = true;
|
||||
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
if (evsel->attr.type == cs_etm_pmu->type) {
|
||||
if (cs_etm_evsel) {
|
||||
@ -241,19 +392,25 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
|
||||
|
||||
/*
|
||||
* In the case of per-cpu mmaps, we need the CPU on the
|
||||
* AUX event.
|
||||
* AUX event. We also need the contextID in order to be notified
|
||||
* when a context switch happened.
|
||||
*/
|
||||
if (!cpu_map__empty(cpus))
|
||||
if (!cpu_map__empty(cpus)) {
|
||||
perf_evsel__set_sample_bit(cs_etm_evsel, CPU);
|
||||
|
||||
err = cs_etm_set_option(itr, cs_etm_evsel,
|
||||
ETM_OPT_CTXTID | ETM_OPT_TS);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Add dummy event to keep tracking */
|
||||
if (opts->full_auxtrace) {
|
||||
struct perf_evsel *tracking_evsel;
|
||||
int err;
|
||||
|
||||
err = parse_events(evlist, "dummy:u", NULL);
|
||||
if (err)
|
||||
return err;
|
||||
goto out;
|
||||
|
||||
tracking_evsel = perf_evlist__last(evlist);
|
||||
perf_evlist__set_tracking_event(evlist, tracking_evsel);
|
||||
@ -266,7 +423,8 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
|
||||
perf_evsel__set_sample_bit(tracking_evsel, TIME);
|
||||
}
|
||||
|
||||
return 0;
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static u64 cs_etm_get_config(struct auxtrace_record *itr)
|
||||
@ -314,6 +472,8 @@ static u64 cs_etmv4_get_config(struct auxtrace_record *itr)
|
||||
config_opts = cs_etm_get_config(itr);
|
||||
if (config_opts & BIT(ETM_OPT_CYCACC))
|
||||
config |= BIT(ETM4_CFG_BIT_CYCACC);
|
||||
if (config_opts & BIT(ETM_OPT_CTXTID))
|
||||
config |= BIT(ETM4_CFG_BIT_CTXTID);
|
||||
if (config_opts & BIT(ETM_OPT_TS))
|
||||
config |= BIT(ETM4_CFG_BIT_TS);
|
||||
if (config_opts & BIT(ETM_OPT_RETSTK))
|
||||
@ -363,19 +523,6 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
|
||||
(etmv3 * CS_ETMV3_PRIV_SIZE));
|
||||
}
|
||||
|
||||
static const char *metadata_etmv3_ro[CS_ETM_PRIV_MAX] = {
|
||||
[CS_ETM_ETMCCER] = "mgmt/etmccer",
|
||||
[CS_ETM_ETMIDR] = "mgmt/etmidr",
|
||||
};
|
||||
|
||||
static const char *metadata_etmv4_ro[CS_ETMV4_PRIV_MAX] = {
|
||||
[CS_ETMV4_TRCIDR0] = "trcidr/trcidr0",
|
||||
[CS_ETMV4_TRCIDR1] = "trcidr/trcidr1",
|
||||
[CS_ETMV4_TRCIDR2] = "trcidr/trcidr2",
|
||||
[CS_ETMV4_TRCIDR8] = "trcidr/trcidr8",
|
||||
[CS_ETMV4_TRCAUTHSTATUS] = "mgmt/trcauthstatus",
|
||||
};
|
||||
|
||||
static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu)
|
||||
{
|
||||
bool ret = false;
|
||||
@ -536,16 +683,131 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cs_etm_find_snapshot(struct auxtrace_record *itr __maybe_unused,
|
||||
static int cs_etm_alloc_wrapped_array(struct cs_etm_recording *ptr, int idx)
|
||||
{
|
||||
bool *wrapped;
|
||||
int cnt = ptr->wrapped_cnt;
|
||||
|
||||
/* Make @ptr->wrapped as big as @idx */
|
||||
while (cnt <= idx)
|
||||
cnt++;
|
||||
|
||||
/*
|
||||
* Free'ed in cs_etm_recording_free(). Using realloc() to avoid
|
||||
* cross compilation problems where the host's system supports
|
||||
* reallocarray() but not the target.
|
||||
*/
|
||||
wrapped = realloc(ptr->wrapped, cnt * sizeof(bool));
|
||||
if (!wrapped)
|
||||
return -ENOMEM;
|
||||
|
||||
wrapped[cnt - 1] = false;
|
||||
ptr->wrapped_cnt = cnt;
|
||||
ptr->wrapped = wrapped;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool cs_etm_buffer_has_wrapped(unsigned char *buffer,
|
||||
size_t buffer_size, u64 head)
|
||||
{
|
||||
u64 i, watermark;
|
||||
u64 *buf = (u64 *)buffer;
|
||||
size_t buf_size = buffer_size;
|
||||
|
||||
/*
|
||||
* We want to look the very last 512 byte (chosen arbitrarily) in
|
||||
* the ring buffer.
|
||||
*/
|
||||
watermark = buf_size - 512;
|
||||
|
||||
/*
|
||||
* @head is continuously increasing - if its value is equal or greater
|
||||
* than the size of the ring buffer, it has wrapped around.
|
||||
*/
|
||||
if (head >= buffer_size)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* The value of @head is somewhere within the size of the ring buffer.
|
||||
* This can be that there hasn't been enough data to fill the ring
|
||||
* buffer yet or the trace time was so long that @head has numerically
|
||||
* wrapped around. To find we need to check if we have data at the very
|
||||
* end of the ring buffer. We can reliably do this because mmap'ed
|
||||
* pages are zeroed out and there is a fresh mapping with every new
|
||||
* session.
|
||||
*/
|
||||
|
||||
/* @head is less than 512 byte from the end of the ring buffer */
|
||||
if (head > watermark)
|
||||
watermark = head;
|
||||
|
||||
/*
|
||||
* Speed things up by using 64 bit transactions (see "u64 *buf" above)
|
||||
*/
|
||||
watermark >>= 3;
|
||||
buf_size >>= 3;
|
||||
|
||||
/*
|
||||
* If we find trace data at the end of the ring buffer, @head has
|
||||
* been there and has numerically wrapped around at least once.
|
||||
*/
|
||||
for (i = watermark; i < buf_size; i++)
|
||||
if (buf[i])
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int cs_etm_find_snapshot(struct auxtrace_record *itr,
|
||||
int idx, struct auxtrace_mmap *mm,
|
||||
unsigned char *data __maybe_unused,
|
||||
unsigned char *data,
|
||||
u64 *head, u64 *old)
|
||||
{
|
||||
int err;
|
||||
bool wrapped;
|
||||
struct cs_etm_recording *ptr =
|
||||
container_of(itr, struct cs_etm_recording, itr);
|
||||
|
||||
/*
|
||||
* Allocate memory to keep track of wrapping if this is the first
|
||||
* time we deal with this *mm.
|
||||
*/
|
||||
if (idx >= ptr->wrapped_cnt) {
|
||||
err = cs_etm_alloc_wrapped_array(ptr, idx);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check to see if *head has wrapped around. If it hasn't only the
|
||||
* amount of data between *head and *old is snapshot'ed to avoid
|
||||
* bloating the perf.data file with zeros. But as soon as *head has
|
||||
* wrapped around the entire size of the AUX ring buffer it taken.
|
||||
*/
|
||||
wrapped = ptr->wrapped[idx];
|
||||
if (!wrapped && cs_etm_buffer_has_wrapped(data, mm->len, *head)) {
|
||||
wrapped = true;
|
||||
ptr->wrapped[idx] = true;
|
||||
}
|
||||
|
||||
pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n",
|
||||
__func__, idx, (size_t)*old, (size_t)*head, mm->len);
|
||||
|
||||
*old = *head;
|
||||
*head += mm->len;
|
||||
/* No wrap has occurred, we can just use *head and *old. */
|
||||
if (!wrapped)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* *head has wrapped around - adjust *head and *old to pickup the
|
||||
* entire content of the AUX buffer.
|
||||
*/
|
||||
if (*head >= mm->len) {
|
||||
*old = *head - mm->len;
|
||||
} else {
|
||||
*head += mm->len;
|
||||
*old = *head - mm->len;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -586,6 +848,8 @@ static void cs_etm_recording_free(struct auxtrace_record *itr)
|
||||
{
|
||||
struct cs_etm_recording *ptr =
|
||||
container_of(itr, struct cs_etm_recording, itr);
|
||||
|
||||
zfree(&ptr->wrapped);
|
||||
free(ptr);
|
||||
}
|
||||
|
||||
|
@ -1,2 +1,2 @@
|
||||
perf-y += util/
|
||||
perf-$(CONFIG_DWARF_UNWIND) += tests/
|
||||
perf-y += tests/
|
||||
|
@ -1,4 +1,4 @@
|
||||
perf-y += regs_load.o
|
||||
perf-y += dwarf-unwind.o
|
||||
perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
|
||||
|
||||
perf-y += arch-tests.o
|
||||
|
48
tools/perf/arch/csky/annotate/instructions.c
Normal file
48
tools/perf/arch/csky/annotate/instructions.c
Normal file
@ -0,0 +1,48 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.
|
||||
|
||||
#include <linux/compiler.h>
|
||||
|
||||
static struct ins_ops *csky__associate_ins_ops(struct arch *arch,
|
||||
const char *name)
|
||||
{
|
||||
struct ins_ops *ops = NULL;
|
||||
|
||||
/* catch all kind of jumps */
|
||||
if (!strcmp(name, "bt") ||
|
||||
!strcmp(name, "bf") ||
|
||||
!strcmp(name, "bez") ||
|
||||
!strcmp(name, "bnez") ||
|
||||
!strcmp(name, "bnezad") ||
|
||||
!strcmp(name, "bhsz") ||
|
||||
!strcmp(name, "bhz") ||
|
||||
!strcmp(name, "blsz") ||
|
||||
!strcmp(name, "blz") ||
|
||||
!strcmp(name, "br") ||
|
||||
!strcmp(name, "jmpi") ||
|
||||
!strcmp(name, "jmp"))
|
||||
ops = &jump_ops;
|
||||
|
||||
/* catch function call */
|
||||
if (!strcmp(name, "bsr") ||
|
||||
!strcmp(name, "jsri") ||
|
||||
!strcmp(name, "jsr"))
|
||||
ops = &call_ops;
|
||||
|
||||
/* catch function return */
|
||||
if (!strcmp(name, "rts"))
|
||||
ops = &ret_ops;
|
||||
|
||||
if (ops)
|
||||
arch__associate_ins_ops(arch, name, ops);
|
||||
return ops;
|
||||
}
|
||||
|
||||
static int csky__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
|
||||
{
|
||||
arch->initialized = true;
|
||||
arch->objdump.comment_char = '/';
|
||||
arch->associate_instruction_ops = csky__associate_ins_ops;
|
||||
|
||||
return 0;
|
||||
}
|
@ -11,7 +11,7 @@
|
||||
#include <unistd.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <ctype.h>
|
||||
#include <linux/ctype.h>
|
||||
|
||||
#include "../../util/header.h"
|
||||
#include "../../util/util.h"
|
||||
|
@ -9,6 +9,7 @@ struct test;
|
||||
int test__rdpmc(struct test *test __maybe_unused, int subtest);
|
||||
int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest);
|
||||
int test__insn_x86(struct test *test __maybe_unused, int subtest);
|
||||
int test__intel_pt_pkt_decoder(struct test *test, int subtest);
|
||||
int test__bp_modify(struct test *test, int subtest);
|
||||
|
||||
#ifdef HAVE_DWARF_UNWIND_SUPPORT
|
||||
|
@ -4,5 +4,5 @@ perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
|
||||
perf-y += arch-tests.o
|
||||
perf-y += rdpmc.o
|
||||
perf-y += perf-time-to-tsc.o
|
||||
perf-$(CONFIG_AUXTRACE) += insn-x86.o
|
||||
perf-$(CONFIG_AUXTRACE) += insn-x86.o intel-pt-pkt-decoder-test.o
|
||||
perf-$(CONFIG_X86_64) += bp-modify.o
|
||||
|
@ -23,6 +23,10 @@ struct test arch_tests[] = {
|
||||
.desc = "x86 instruction decoder - new instructions",
|
||||
.func = test__insn_x86,
|
||||
},
|
||||
{
|
||||
.desc = "Intel PT packet decoder",
|
||||
.func = test__intel_pt_pkt_decoder,
|
||||
},
|
||||
#endif
|
||||
#if defined(__x86_64__)
|
||||
{
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include "evlist.h"
|
||||
#include "evsel.h"
|
||||
#include "arch-tests.h"
|
||||
#include "util.h"
|
||||
|
||||
#include <signal.h>
|
||||
#include <sys/mman.h>
|
||||
|
304
tools/perf/arch/x86/tests/intel-pt-pkt-decoder-test.c
Normal file
304
tools/perf/arch/x86/tests/intel-pt-pkt-decoder-test.c
Normal file
@ -0,0 +1,304 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "intel-pt-decoder/intel-pt-pkt-decoder.h"
|
||||
|
||||
#include "debug.h"
|
||||
#include "tests/tests.h"
|
||||
#include "arch-tests.h"
|
||||
|
||||
/**
|
||||
* struct test_data - Test data.
|
||||
* @len: number of bytes to decode
|
||||
* @bytes: bytes to decode
|
||||
* @ctx: packet context to decode
|
||||
* @packet: expected packet
|
||||
* @new_ctx: expected new packet context
|
||||
* @ctx_unchanged: the packet context must not change
|
||||
*/
|
||||
struct test_data {
|
||||
int len;
|
||||
u8 bytes[INTEL_PT_PKT_MAX_SZ];
|
||||
enum intel_pt_pkt_ctx ctx;
|
||||
struct intel_pt_pkt packet;
|
||||
enum intel_pt_pkt_ctx new_ctx;
|
||||
int ctx_unchanged;
|
||||
} data[] = {
|
||||
/* Padding Packet */
|
||||
{1, {0}, 0, {INTEL_PT_PAD, 0, 0}, 0, 1 },
|
||||
/* Short Taken/Not Taken Packet */
|
||||
{1, {4}, 0, {INTEL_PT_TNT, 1, 0}, 0, 0 },
|
||||
{1, {6}, 0, {INTEL_PT_TNT, 1, 0x20ULL << 58}, 0, 0 },
|
||||
{1, {0x80}, 0, {INTEL_PT_TNT, 6, 0}, 0, 0 },
|
||||
{1, {0xfe}, 0, {INTEL_PT_TNT, 6, 0x3fULL << 58}, 0, 0 },
|
||||
/* Long Taken/Not Taken Packet */
|
||||
{8, {0x02, 0xa3, 2}, 0, {INTEL_PT_TNT, 1, 0xa302ULL << 47}, 0, 0 },
|
||||
{8, {0x02, 0xa3, 3}, 0, {INTEL_PT_TNT, 1, 0x1a302ULL << 47}, 0, 0 },
|
||||
{8, {0x02, 0xa3, 0, 0, 0, 0, 0, 0x80}, 0, {INTEL_PT_TNT, 47, 0xa302ULL << 1}, 0, 0 },
|
||||
{8, {0x02, 0xa3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, 0, {INTEL_PT_TNT, 47, 0xffffffffffffa302ULL << 1}, 0, 0 },
|
||||
/* Target IP Packet */
|
||||
{1, {0x0d}, 0, {INTEL_PT_TIP, 0, 0}, 0, 0 },
|
||||
{3, {0x2d, 1, 2}, 0, {INTEL_PT_TIP, 1, 0x201}, 0, 0 },
|
||||
{5, {0x4d, 1, 2, 3, 4}, 0, {INTEL_PT_TIP, 2, 0x4030201}, 0, 0 },
|
||||
{7, {0x6d, 1, 2, 3, 4, 5, 6}, 0, {INTEL_PT_TIP, 3, 0x60504030201}, 0, 0 },
|
||||
{7, {0x8d, 1, 2, 3, 4, 5, 6}, 0, {INTEL_PT_TIP, 4, 0x60504030201}, 0, 0 },
|
||||
{9, {0xcd, 1, 2, 3, 4, 5, 6, 7, 8}, 0, {INTEL_PT_TIP, 6, 0x807060504030201}, 0, 0 },
|
||||
/* Packet Generation Enable */
|
||||
{1, {0x11}, 0, {INTEL_PT_TIP_PGE, 0, 0}, 0, 0 },
|
||||
{3, {0x31, 1, 2}, 0, {INTEL_PT_TIP_PGE, 1, 0x201}, 0, 0 },
|
||||
{5, {0x51, 1, 2, 3, 4}, 0, {INTEL_PT_TIP_PGE, 2, 0x4030201}, 0, 0 },
|
||||
{7, {0x71, 1, 2, 3, 4, 5, 6}, 0, {INTEL_PT_TIP_PGE, 3, 0x60504030201}, 0, 0 },
|
||||
{7, {0x91, 1, 2, 3, 4, 5, 6}, 0, {INTEL_PT_TIP_PGE, 4, 0x60504030201}, 0, 0 },
|
||||
{9, {0xd1, 1, 2, 3, 4, 5, 6, 7, 8}, 0, {INTEL_PT_TIP_PGE, 6, 0x807060504030201}, 0, 0 },
|
||||
/* Packet Generation Disable */
|
||||
{1, {0x01}, 0, {INTEL_PT_TIP_PGD, 0, 0}, 0, 0 },
|
||||
{3, {0x21, 1, 2}, 0, {INTEL_PT_TIP_PGD, 1, 0x201}, 0, 0 },
|
||||
{5, {0x41, 1, 2, 3, 4}, 0, {INTEL_PT_TIP_PGD, 2, 0x4030201}, 0, 0 },
|
||||
{7, {0x61, 1, 2, 3, 4, 5, 6}, 0, {INTEL_PT_TIP_PGD, 3, 0x60504030201}, 0, 0 },
|
||||
{7, {0x81, 1, 2, 3, 4, 5, 6}, 0, {INTEL_PT_TIP_PGD, 4, 0x60504030201}, 0, 0 },
|
||||
{9, {0xc1, 1, 2, 3, 4, 5, 6, 7, 8}, 0, {INTEL_PT_TIP_PGD, 6, 0x807060504030201}, 0, 0 },
|
||||
/* Flow Update Packet */
|
||||
{1, {0x1d}, 0, {INTEL_PT_FUP, 0, 0}, 0, 0 },
|
||||
{3, {0x3d, 1, 2}, 0, {INTEL_PT_FUP, 1, 0x201}, 0, 0 },
|
||||
{5, {0x5d, 1, 2, 3, 4}, 0, {INTEL_PT_FUP, 2, 0x4030201}, 0, 0 },
|
||||
{7, {0x7d, 1, 2, 3, 4, 5, 6}, 0, {INTEL_PT_FUP, 3, 0x60504030201}, 0, 0 },
|
||||
{7, {0x9d, 1, 2, 3, 4, 5, 6}, 0, {INTEL_PT_FUP, 4, 0x60504030201}, 0, 0 },
|
||||
{9, {0xdd, 1, 2, 3, 4, 5, 6, 7, 8}, 0, {INTEL_PT_FUP, 6, 0x807060504030201}, 0, 0 },
|
||||
/* Paging Information Packet */
|
||||
{8, {0x02, 0x43, 2, 4, 6, 8, 10, 12}, 0, {INTEL_PT_PIP, 0, 0x60504030201}, 0, 0 },
|
||||
{8, {0x02, 0x43, 3, 4, 6, 8, 10, 12}, 0, {INTEL_PT_PIP, 0, 0x60504030201 | (1ULL << 63)}, 0, 0 },
|
||||
/* Mode Exec Packet */
|
||||
{2, {0x99, 0x00}, 0, {INTEL_PT_MODE_EXEC, 0, 16}, 0, 0 },
|
||||
{2, {0x99, 0x01}, 0, {INTEL_PT_MODE_EXEC, 0, 64}, 0, 0 },
|
||||
{2, {0x99, 0x02}, 0, {INTEL_PT_MODE_EXEC, 0, 32}, 0, 0 },
|
||||
/* Mode TSX Packet */
|
||||
{2, {0x99, 0x20}, 0, {INTEL_PT_MODE_TSX, 0, 0}, 0, 0 },
|
||||
{2, {0x99, 0x21}, 0, {INTEL_PT_MODE_TSX, 0, 1}, 0, 0 },
|
||||
{2, {0x99, 0x22}, 0, {INTEL_PT_MODE_TSX, 0, 2}, 0, 0 },
|
||||
/* Trace Stop Packet */
|
||||
{2, {0x02, 0x83}, 0, {INTEL_PT_TRACESTOP, 0, 0}, 0, 0 },
|
||||
/* Core:Bus Ratio Packet */
|
||||
{4, {0x02, 0x03, 0x12, 0}, 0, {INTEL_PT_CBR, 0, 0x12}, 0, 1 },
|
||||
/* Timestamp Counter Packet */
|
||||
{8, {0x19, 1, 2, 3, 4, 5, 6, 7}, 0, {INTEL_PT_TSC, 0, 0x7060504030201}, 0, 1 },
|
||||
/* Mini Time Counter Packet */
|
||||
{2, {0x59, 0x12}, 0, {INTEL_PT_MTC, 0, 0x12}, 0, 1 },
|
||||
/* TSC / MTC Alignment Packet */
|
||||
{7, {0x02, 0x73}, 0, {INTEL_PT_TMA, 0, 0}, 0, 1 },
|
||||
{7, {0x02, 0x73, 1, 2}, 0, {INTEL_PT_TMA, 0, 0x201}, 0, 1 },
|
||||
{7, {0x02, 0x73, 0, 0, 0, 0xff, 1}, 0, {INTEL_PT_TMA, 0x1ff, 0}, 0, 1 },
|
||||
{7, {0x02, 0x73, 0x80, 0xc0, 0, 0xff, 1}, 0, {INTEL_PT_TMA, 0x1ff, 0xc080}, 0, 1 },
|
||||
/* Cycle Count Packet */
|
||||
{1, {0x03}, 0, {INTEL_PT_CYC, 0, 0}, 0, 1 },
|
||||
{1, {0x0b}, 0, {INTEL_PT_CYC, 0, 1}, 0, 1 },
|
||||
{1, {0xfb}, 0, {INTEL_PT_CYC, 0, 0x1f}, 0, 1 },
|
||||
{2, {0x07, 2}, 0, {INTEL_PT_CYC, 0, 0x20}, 0, 1 },
|
||||
{2, {0xff, 0xfe}, 0, {INTEL_PT_CYC, 0, 0xfff}, 0, 1 },
|
||||
{3, {0x07, 1, 2}, 0, {INTEL_PT_CYC, 0, 0x1000}, 0, 1 },
|
||||
{3, {0xff, 0xff, 0xfe}, 0, {INTEL_PT_CYC, 0, 0x7ffff}, 0, 1 },
|
||||
{4, {0x07, 1, 1, 2}, 0, {INTEL_PT_CYC, 0, 0x80000}, 0, 1 },
|
||||
{4, {0xff, 0xff, 0xff, 0xfe}, 0, {INTEL_PT_CYC, 0, 0x3ffffff}, 0, 1 },
|
||||
{5, {0x07, 1, 1, 1, 2}, 0, {INTEL_PT_CYC, 0, 0x4000000}, 0, 1 },
|
||||
{5, {0xff, 0xff, 0xff, 0xff, 0xfe}, 0, {INTEL_PT_CYC, 0, 0x1ffffffff}, 0, 1 },
|
||||
{6, {0x07, 1, 1, 1, 1, 2}, 0, {INTEL_PT_CYC, 0, 0x200000000}, 0, 1 },
|
||||
{6, {0xff, 0xff, 0xff, 0xff, 0xff, 0xfe}, 0, {INTEL_PT_CYC, 0, 0xffffffffff}, 0, 1 },
|
||||
{7, {0x07, 1, 1, 1, 1, 1, 2}, 0, {INTEL_PT_CYC, 0, 0x10000000000}, 0, 1 },
|
||||
{7, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe}, 0, {INTEL_PT_CYC, 0, 0x7fffffffffff}, 0, 1 },
|
||||
{8, {0x07, 1, 1, 1, 1, 1, 1, 2}, 0, {INTEL_PT_CYC, 0, 0x800000000000}, 0, 1 },
|
||||
{8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe}, 0, {INTEL_PT_CYC, 0, 0x3fffffffffffff}, 0, 1 },
|
||||
{9, {0x07, 1, 1, 1, 1, 1, 1, 1, 2}, 0, {INTEL_PT_CYC, 0, 0x40000000000000}, 0, 1 },
|
||||
{9, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe}, 0, {INTEL_PT_CYC, 0, 0x1fffffffffffffff}, 0, 1 },
|
||||
{10, {0x07, 1, 1, 1, 1, 1, 1, 1, 1, 2}, 0, {INTEL_PT_CYC, 0, 0x2000000000000000}, 0, 1 },
|
||||
{10, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe}, 0, {INTEL_PT_CYC, 0, 0xffffffffffffffff}, 0, 1 },
|
||||
/* Virtual-Machine Control Structure Packet */
|
||||
{7, {0x02, 0xc8, 1, 2, 3, 4, 5}, 0, {INTEL_PT_VMCS, 5, 0x504030201}, 0, 0 },
|
||||
/* Overflow Packet */
|
||||
{2, {0x02, 0xf3}, 0, {INTEL_PT_OVF, 0, 0}, 0, 0 },
|
||||
{2, {0x02, 0xf3}, INTEL_PT_BLK_4_CTX, {INTEL_PT_OVF, 0, 0}, 0, 0 },
|
||||
{2, {0x02, 0xf3}, INTEL_PT_BLK_8_CTX, {INTEL_PT_OVF, 0, 0}, 0, 0 },
|
||||
/* Packet Stream Boundary*/
|
||||
{16, {0x02, 0x82, 0x02, 0x82, 0x02, 0x82, 0x02, 0x82, 0x02, 0x82, 0x02, 0x82, 0x02, 0x82, 0x02, 0x82}, 0, {INTEL_PT_PSB, 0, 0}, 0, 0 },
|
||||
{16, {0x02, 0x82, 0x02, 0x82, 0x02, 0x82, 0x02, 0x82, 0x02, 0x82, 0x02, 0x82, 0x02, 0x82, 0x02, 0x82}, INTEL_PT_BLK_4_CTX, {INTEL_PT_PSB, 0, 0}, 0, 0 },
|
||||
{16, {0x02, 0x82, 0x02, 0x82, 0x02, 0x82, 0x02, 0x82, 0x02, 0x82, 0x02, 0x82, 0x02, 0x82, 0x02, 0x82}, INTEL_PT_BLK_8_CTX, {INTEL_PT_PSB, 0, 0}, 0, 0 },
|
||||
/* PSB End Packet */
|
||||
{2, {0x02, 0x23}, 0, {INTEL_PT_PSBEND, 0, 0}, 0, 0 },
|
||||
/* Maintenance Packet */
|
||||
{11, {0x02, 0xc3, 0x88, 1, 2, 3, 4, 5, 6, 7}, 0, {INTEL_PT_MNT, 0, 0x7060504030201}, 0, 1 },
|
||||
/* Write Data to PT Packet */
|
||||
{6, {0x02, 0x12, 1, 2, 3, 4}, 0, {INTEL_PT_PTWRITE, 0, 0x4030201}, 0, 0 },
|
||||
{10, {0x02, 0x32, 1, 2, 3, 4, 5, 6, 7, 8}, 0, {INTEL_PT_PTWRITE, 1, 0x807060504030201}, 0, 0 },
|
||||
{6, {0x02, 0x92, 1, 2, 3, 4}, 0, {INTEL_PT_PTWRITE_IP, 0, 0x4030201}, 0, 0 },
|
||||
{10, {0x02, 0xb2, 1, 2, 3, 4, 5, 6, 7, 8}, 0, {INTEL_PT_PTWRITE_IP, 1, 0x807060504030201}, 0, 0 },
|
||||
/* Execution Stop Packet */
|
||||
{2, {0x02, 0x62}, 0, {INTEL_PT_EXSTOP, 0, 0}, 0, 1 },
|
||||
{2, {0x02, 0xe2}, 0, {INTEL_PT_EXSTOP_IP, 0, 0}, 0, 1 },
|
||||
/* Monitor Wait Packet */
|
||||
{10, {0x02, 0xc2}, 0, {INTEL_PT_MWAIT, 0, 0}, 0, 0 },
|
||||
{10, {0x02, 0xc2, 1, 2, 3, 4, 5, 6, 7, 8}, 0, {INTEL_PT_MWAIT, 0, 0x807060504030201}, 0, 0 },
|
||||
{10, {0x02, 0xc2, 0xff, 2, 3, 4, 7, 6, 7, 8}, 0, {INTEL_PT_MWAIT, 0, 0x8070607040302ff}, 0, 0 },
|
||||
/* Power Entry Packet */
|
||||
{4, {0x02, 0x22}, 0, {INTEL_PT_PWRE, 0, 0}, 0, 1 },
|
||||
{4, {0x02, 0x22, 1, 2}, 0, {INTEL_PT_PWRE, 0, 0x0201}, 0, 1 },
|
||||
{4, {0x02, 0x22, 0x80, 0x34}, 0, {INTEL_PT_PWRE, 0, 0x3480}, 0, 1 },
|
||||
{4, {0x02, 0x22, 0x00, 0x56}, 0, {INTEL_PT_PWRE, 0, 0x5600}, 0, 1 },
|
||||
/* Power Exit Packet */
|
||||
{7, {0x02, 0xa2}, 0, {INTEL_PT_PWRX, 0, 0}, 0, 1 },
|
||||
{7, {0x02, 0xa2, 1, 2, 3, 4, 5}, 0, {INTEL_PT_PWRX, 0, 0x504030201}, 0, 1 },
|
||||
{7, {0x02, 0xa2, 0xff, 0xff, 0xff, 0xff, 0xff}, 0, {INTEL_PT_PWRX, 0, 0xffffffffff}, 0, 1 },
|
||||
/* Block Begin Packet */
|
||||
{3, {0x02, 0x63, 0x00}, 0, {INTEL_PT_BBP, 0, 0}, INTEL_PT_BLK_8_CTX, 0 },
|
||||
{3, {0x02, 0x63, 0x80}, 0, {INTEL_PT_BBP, 1, 0}, INTEL_PT_BLK_4_CTX, 0 },
|
||||
{3, {0x02, 0x63, 0x1f}, 0, {INTEL_PT_BBP, 0, 0x1f}, INTEL_PT_BLK_8_CTX, 0 },
|
||||
{3, {0x02, 0x63, 0x9f}, 0, {INTEL_PT_BBP, 1, 0x1f}, INTEL_PT_BLK_4_CTX, 0 },
|
||||
/* 4-byte Block Item Packet */
|
||||
{5, {0x04}, INTEL_PT_BLK_4_CTX, {INTEL_PT_BIP, 0, 0}, INTEL_PT_BLK_4_CTX, 0 },
|
||||
{5, {0xfc}, INTEL_PT_BLK_4_CTX, {INTEL_PT_BIP, 0x1f, 0}, INTEL_PT_BLK_4_CTX, 0 },
|
||||
{5, {0x04, 1, 2, 3, 4}, INTEL_PT_BLK_4_CTX, {INTEL_PT_BIP, 0, 0x04030201}, INTEL_PT_BLK_4_CTX, 0 },
|
||||
{5, {0xfc, 1, 2, 3, 4}, INTEL_PT_BLK_4_CTX, {INTEL_PT_BIP, 0x1f, 0x04030201}, INTEL_PT_BLK_4_CTX, 0 },
|
||||
/* 8-byte Block Item Packet */
|
||||
{9, {0x04}, INTEL_PT_BLK_8_CTX, {INTEL_PT_BIP, 0, 0}, INTEL_PT_BLK_8_CTX, 0 },
|
||||
{9, {0xfc}, INTEL_PT_BLK_8_CTX, {INTEL_PT_BIP, 0x1f, 0}, INTEL_PT_BLK_8_CTX, 0 },
|
||||
{9, {0x04, 1, 2, 3, 4, 5, 6, 7, 8}, INTEL_PT_BLK_8_CTX, {INTEL_PT_BIP, 0, 0x0807060504030201}, INTEL_PT_BLK_8_CTX, 0 },
|
||||
{9, {0xfc, 1, 2, 3, 4, 5, 6, 7, 8}, INTEL_PT_BLK_8_CTX, {INTEL_PT_BIP, 0x1f, 0x0807060504030201}, INTEL_PT_BLK_8_CTX, 0 },
|
||||
/* Block End Packet */
|
||||
{2, {0x02, 0x33}, INTEL_PT_BLK_4_CTX, {INTEL_PT_BEP, 0, 0}, 0, 0 },
|
||||
{2, {0x02, 0xb3}, INTEL_PT_BLK_4_CTX, {INTEL_PT_BEP_IP, 0, 0}, 0, 0 },
|
||||
{2, {0x02, 0x33}, INTEL_PT_BLK_8_CTX, {INTEL_PT_BEP, 0, 0}, 0, 0 },
|
||||
{2, {0x02, 0xb3}, INTEL_PT_BLK_8_CTX, {INTEL_PT_BEP_IP, 0, 0}, 0, 0 },
|
||||
/* Terminator */
|
||||
{0, {0}, 0, {0, 0, 0}, 0, 0 },
|
||||
};
|
||||
|
||||
static int dump_packet(struct intel_pt_pkt *packet, u8 *bytes, int len)
|
||||
{
|
||||
char desc[INTEL_PT_PKT_DESC_MAX];
|
||||
int ret, i;
|
||||
|
||||
for (i = 0; i < len; i++)
|
||||
pr_debug(" %02x", bytes[i]);
|
||||
for (; i < INTEL_PT_PKT_MAX_SZ; i++)
|
||||
pr_debug(" ");
|
||||
pr_debug(" ");
|
||||
ret = intel_pt_pkt_desc(packet, desc, INTEL_PT_PKT_DESC_MAX);
|
||||
if (ret < 0) {
|
||||
pr_debug("intel_pt_pkt_desc failed!\n");
|
||||
return TEST_FAIL;
|
||||
}
|
||||
pr_debug("%s\n", desc);
|
||||
|
||||
return TEST_OK;
|
||||
}
|
||||
|
||||
static void decoding_failed(struct test_data *d)
|
||||
{
|
||||
pr_debug("Decoding failed!\n");
|
||||
pr_debug("Decoding: ");
|
||||
dump_packet(&d->packet, d->bytes, d->len);
|
||||
}
|
||||
|
||||
static int fail(struct test_data *d, struct intel_pt_pkt *packet, int len,
|
||||
enum intel_pt_pkt_ctx new_ctx)
|
||||
{
|
||||
decoding_failed(d);
|
||||
|
||||
if (len != d->len)
|
||||
pr_debug("Expected length: %d Decoded length %d\n",
|
||||
d->len, len);
|
||||
|
||||
if (packet->type != d->packet.type)
|
||||
pr_debug("Expected type: %d Decoded type %d\n",
|
||||
d->packet.type, packet->type);
|
||||
|
||||
if (packet->count != d->packet.count)
|
||||
pr_debug("Expected count: %d Decoded count %d\n",
|
||||
d->packet.count, packet->count);
|
||||
|
||||
if (packet->payload != d->packet.payload)
|
||||
pr_debug("Expected payload: 0x%llx Decoded payload 0x%llx\n",
|
||||
(unsigned long long)d->packet.payload,
|
||||
(unsigned long long)packet->payload);
|
||||
|
||||
if (new_ctx != d->new_ctx)
|
||||
pr_debug("Expected packet context: %d Decoded packet context %d\n",
|
||||
d->new_ctx, new_ctx);
|
||||
|
||||
return TEST_FAIL;
|
||||
}
|
||||
|
||||
static int test_ctx_unchanged(struct test_data *d, struct intel_pt_pkt *packet,
|
||||
enum intel_pt_pkt_ctx ctx)
|
||||
{
|
||||
enum intel_pt_pkt_ctx old_ctx = ctx;
|
||||
|
||||
intel_pt_upd_pkt_ctx(packet, &ctx);
|
||||
|
||||
if (ctx != old_ctx) {
|
||||
decoding_failed(d);
|
||||
pr_debug("Packet context changed!\n");
|
||||
return TEST_FAIL;
|
||||
}
|
||||
|
||||
return TEST_OK;
|
||||
}
|
||||
|
||||
static int test_one(struct test_data *d)
|
||||
{
|
||||
struct intel_pt_pkt packet;
|
||||
enum intel_pt_pkt_ctx ctx = d->ctx;
|
||||
int ret;
|
||||
|
||||
memset(&packet, 0xff, sizeof(packet));
|
||||
|
||||
/* Decode a packet */
|
||||
ret = intel_pt_get_packet(d->bytes, d->len, &packet, &ctx);
|
||||
if (ret < 0 || ret > INTEL_PT_PKT_MAX_SZ) {
|
||||
decoding_failed(d);
|
||||
pr_debug("intel_pt_get_packet returned %d\n", ret);
|
||||
return TEST_FAIL;
|
||||
}
|
||||
|
||||
/* Some packets must always leave the packet context unchanged */
|
||||
if (d->ctx_unchanged) {
|
||||
int err;
|
||||
|
||||
err = test_ctx_unchanged(d, &packet, INTEL_PT_NO_CTX);
|
||||
if (err)
|
||||
return err;
|
||||
err = test_ctx_unchanged(d, &packet, INTEL_PT_BLK_4_CTX);
|
||||
if (err)
|
||||
return err;
|
||||
err = test_ctx_unchanged(d, &packet, INTEL_PT_BLK_8_CTX);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Compare to the expected values */
|
||||
if (ret != d->len || packet.type != d->packet.type ||
|
||||
packet.count != d->packet.count ||
|
||||
packet.payload != d->packet.payload || ctx != d->new_ctx)
|
||||
return fail(d, &packet, ret, ctx);
|
||||
|
||||
pr_debug("Decoded ok:");
|
||||
ret = dump_packet(&d->packet, d->bytes, d->len);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This test feeds byte sequences to the Intel PT packet decoder and checks the
|
||||
* results. Changes to the packet context are also checked.
|
||||
*/
|
||||
int test__intel_pt_pkt_decoder(struct test *test __maybe_unused, int subtest __maybe_unused)
|
||||
{
|
||||
struct test_data *d = data;
|
||||
int ret;
|
||||
|
||||
for (d = data; d->len; d++) {
|
||||
ret = test_one(d);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return TEST_OK;
|
||||
}
|
@ -25,6 +25,7 @@
|
||||
#include "../../util/auxtrace.h"
|
||||
#include "../../util/tsc.h"
|
||||
#include "../../util/intel-pt.h"
|
||||
#include "../../util/util.h"
|
||||
|
||||
#define KiB(x) ((x) * 1024)
|
||||
#define MiB(x) ((x) * 1024 * 1024)
|
||||
|
@ -3,10 +3,11 @@
|
||||
#include <linux/string.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "../../util/util.h"
|
||||
#include "../../util/machine.h"
|
||||
#include "../../util/map.h"
|
||||
#include "../../util/symbol.h"
|
||||
#include "../../util/sane_ctype.h"
|
||||
#include <linux/ctype.h>
|
||||
|
||||
#include <symbol/kallsyms.h>
|
||||
|
||||
|
@ -20,6 +20,8 @@
|
||||
#include "util/data.h"
|
||||
#include "util/config.h"
|
||||
#include "util/time-utils.h"
|
||||
#include "util/annotate.h"
|
||||
#include "util/map.h"
|
||||
|
||||
#include <errno.h>
|
||||
#include <inttypes.h>
|
||||
@ -32,6 +34,7 @@ struct perf_diff {
|
||||
struct perf_time_interval *ptime_range;
|
||||
int range_size;
|
||||
int range_num;
|
||||
bool has_br_stack;
|
||||
};
|
||||
|
||||
/* Diff command specific HPP columns. */
|
||||
@ -44,6 +47,7 @@ enum {
|
||||
PERF_HPP_DIFF__WEIGHTED_DIFF,
|
||||
PERF_HPP_DIFF__FORMULA,
|
||||
PERF_HPP_DIFF__DELTA_ABS,
|
||||
PERF_HPP_DIFF__CYCLES,
|
||||
|
||||
PERF_HPP_DIFF__MAX_INDEX
|
||||
};
|
||||
@ -86,11 +90,14 @@ static s64 compute_wdiff_w2;
|
||||
static const char *cpu_list;
|
||||
static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
|
||||
|
||||
static struct addr_location dummy_al;
|
||||
|
||||
enum {
|
||||
COMPUTE_DELTA,
|
||||
COMPUTE_RATIO,
|
||||
COMPUTE_WEIGHTED_DIFF,
|
||||
COMPUTE_DELTA_ABS,
|
||||
COMPUTE_CYCLES,
|
||||
COMPUTE_MAX,
|
||||
};
|
||||
|
||||
@ -99,6 +106,7 @@ const char *compute_names[COMPUTE_MAX] = {
|
||||
[COMPUTE_DELTA_ABS] = "delta-abs",
|
||||
[COMPUTE_RATIO] = "ratio",
|
||||
[COMPUTE_WEIGHTED_DIFF] = "wdiff",
|
||||
[COMPUTE_CYCLES] = "cycles",
|
||||
};
|
||||
|
||||
static int compute = COMPUTE_DELTA_ABS;
|
||||
@ -108,6 +116,7 @@ static int compute_2_hpp[COMPUTE_MAX] = {
|
||||
[COMPUTE_DELTA_ABS] = PERF_HPP_DIFF__DELTA_ABS,
|
||||
[COMPUTE_RATIO] = PERF_HPP_DIFF__RATIO,
|
||||
[COMPUTE_WEIGHTED_DIFF] = PERF_HPP_DIFF__WEIGHTED_DIFF,
|
||||
[COMPUTE_CYCLES] = PERF_HPP_DIFF__CYCLES,
|
||||
};
|
||||
|
||||
#define MAX_COL_WIDTH 70
|
||||
@ -146,6 +155,10 @@ static struct header_column {
|
||||
[PERF_HPP_DIFF__FORMULA] = {
|
||||
.name = "Formula",
|
||||
.width = MAX_COL_WIDTH,
|
||||
},
|
||||
[PERF_HPP_DIFF__CYCLES] = {
|
||||
.name = "[Program Block Range] Cycles Diff",
|
||||
.width = 70,
|
||||
}
|
||||
};
|
||||
|
||||
@ -335,6 +348,31 @@ static int formula_fprintf(struct hist_entry *he, struct hist_entry *pair,
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void *block_hist_zalloc(size_t size)
|
||||
{
|
||||
struct block_hist *bh;
|
||||
|
||||
bh = zalloc(size + sizeof(*bh));
|
||||
if (!bh)
|
||||
return NULL;
|
||||
|
||||
return &bh->he;
|
||||
}
|
||||
|
||||
static void block_hist_free(void *he)
|
||||
{
|
||||
struct block_hist *bh;
|
||||
|
||||
bh = container_of(he, struct block_hist, he);
|
||||
hists__delete_entries(&bh->block_hists);
|
||||
free(bh);
|
||||
}
|
||||
|
||||
struct hist_entry_ops block_hist_ops = {
|
||||
.new = block_hist_zalloc,
|
||||
.free = block_hist_free,
|
||||
};
|
||||
|
||||
static int diff__process_sample_event(struct perf_tool *tool,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
@ -362,9 +400,22 @@ static int diff__process_sample_event(struct perf_tool *tool,
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
if (!hists__add_entry(hists, &al, NULL, NULL, NULL, sample, true)) {
|
||||
pr_warning("problem incrementing symbol period, skipping event\n");
|
||||
goto out_put;
|
||||
if (compute != COMPUTE_CYCLES) {
|
||||
if (!hists__add_entry(hists, &al, NULL, NULL, NULL, sample,
|
||||
true)) {
|
||||
pr_warning("problem incrementing symbol period, "
|
||||
"skipping event\n");
|
||||
goto out_put;
|
||||
}
|
||||
} else {
|
||||
if (!hists__add_entry_ops(hists, &block_hist_ops, &al, NULL,
|
||||
NULL, NULL, sample, true)) {
|
||||
pr_warning("problem incrementing symbol period, "
|
||||
"skipping event\n");
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
hist__account_cycles(sample->branch_stack, &al, sample, false);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -474,6 +525,203 @@ static void hists__baseline_only(struct hists *hists)
|
||||
}
|
||||
}
|
||||
|
||||
static int64_t block_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
|
||||
struct hist_entry *left, struct hist_entry *right)
|
||||
{
|
||||
struct block_info *bi_l = left->block_info;
|
||||
struct block_info *bi_r = right->block_info;
|
||||
int cmp;
|
||||
|
||||
if (!bi_l->sym || !bi_r->sym) {
|
||||
if (!bi_l->sym && !bi_r->sym)
|
||||
return 0;
|
||||
else if (!bi_l->sym)
|
||||
return -1;
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (bi_l->sym == bi_r->sym) {
|
||||
if (bi_l->start == bi_r->start) {
|
||||
if (bi_l->end == bi_r->end)
|
||||
return 0;
|
||||
else
|
||||
return (int64_t)(bi_r->end - bi_l->end);
|
||||
} else
|
||||
return (int64_t)(bi_r->start - bi_l->start);
|
||||
} else {
|
||||
cmp = strcmp(bi_l->sym->name, bi_r->sym->name);
|
||||
return cmp;
|
||||
}
|
||||
|
||||
if (bi_l->sym->start != bi_r->sym->start)
|
||||
return (int64_t)(bi_r->sym->start - bi_l->sym->start);
|
||||
|
||||
return (int64_t)(bi_r->sym->end - bi_l->sym->end);
|
||||
}
|
||||
|
||||
static int64_t block_cycles_diff_cmp(struct hist_entry *left,
|
||||
struct hist_entry *right)
|
||||
{
|
||||
bool pairs_left = hist_entry__has_pairs(left);
|
||||
bool pairs_right = hist_entry__has_pairs(right);
|
||||
s64 l, r;
|
||||
|
||||
if (!pairs_left && !pairs_right)
|
||||
return 0;
|
||||
|
||||
l = labs(left->diff.cycles);
|
||||
r = labs(right->diff.cycles);
|
||||
return r - l;
|
||||
}
|
||||
|
||||
static int64_t block_sort(struct perf_hpp_fmt *fmt __maybe_unused,
|
||||
struct hist_entry *left, struct hist_entry *right)
|
||||
{
|
||||
return block_cycles_diff_cmp(right, left);
|
||||
}
|
||||
|
||||
static void init_block_hist(struct block_hist *bh)
|
||||
{
|
||||
__hists__init(&bh->block_hists, &bh->block_list);
|
||||
perf_hpp_list__init(&bh->block_list);
|
||||
|
||||
INIT_LIST_HEAD(&bh->block_fmt.list);
|
||||
INIT_LIST_HEAD(&bh->block_fmt.sort_list);
|
||||
bh->block_fmt.cmp = block_cmp;
|
||||
bh->block_fmt.sort = block_sort;
|
||||
perf_hpp_list__register_sort_field(&bh->block_list,
|
||||
&bh->block_fmt);
|
||||
bh->valid = true;
|
||||
}
|
||||
|
||||
static void init_block_info(struct block_info *bi, struct symbol *sym,
|
||||
struct cyc_hist *ch, int offset)
|
||||
{
|
||||
bi->sym = sym;
|
||||
bi->start = ch->start;
|
||||
bi->end = offset;
|
||||
bi->cycles = ch->cycles;
|
||||
bi->cycles_aggr = ch->cycles_aggr;
|
||||
bi->num = ch->num;
|
||||
bi->num_aggr = ch->num_aggr;
|
||||
}
|
||||
|
||||
static int process_block_per_sym(struct hist_entry *he)
|
||||
{
|
||||
struct annotation *notes;
|
||||
struct cyc_hist *ch;
|
||||
struct block_hist *bh;
|
||||
|
||||
if (!he->ms.map || !he->ms.sym)
|
||||
return 0;
|
||||
|
||||
notes = symbol__annotation(he->ms.sym);
|
||||
if (!notes || !notes->src || !notes->src->cycles_hist)
|
||||
return 0;
|
||||
|
||||
bh = container_of(he, struct block_hist, he);
|
||||
init_block_hist(bh);
|
||||
|
||||
ch = notes->src->cycles_hist;
|
||||
for (unsigned int i = 0; i < symbol__size(he->ms.sym); i++) {
|
||||
if (ch[i].num_aggr) {
|
||||
struct block_info *bi;
|
||||
struct hist_entry *he_block;
|
||||
|
||||
bi = block_info__new();
|
||||
if (!bi)
|
||||
return -1;
|
||||
|
||||
init_block_info(bi, he->ms.sym, &ch[i], i);
|
||||
he_block = hists__add_entry_block(&bh->block_hists,
|
||||
&dummy_al, bi);
|
||||
if (!he_block) {
|
||||
block_info__put(bi);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int block_pair_cmp(struct hist_entry *a, struct hist_entry *b)
|
||||
{
|
||||
struct block_info *bi_a = a->block_info;
|
||||
struct block_info *bi_b = b->block_info;
|
||||
int cmp;
|
||||
|
||||
if (!bi_a->sym || !bi_b->sym)
|
||||
return -1;
|
||||
|
||||
cmp = strcmp(bi_a->sym->name, bi_b->sym->name);
|
||||
|
||||
if ((!cmp) && (bi_a->start == bi_b->start) && (bi_a->end == bi_b->end))
|
||||
return 0;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static struct hist_entry *get_block_pair(struct hist_entry *he,
|
||||
struct hists *hists_pair)
|
||||
{
|
||||
struct rb_root_cached *root = hists_pair->entries_in;
|
||||
struct rb_node *next = rb_first_cached(root);
|
||||
int cmp;
|
||||
|
||||
while (next != NULL) {
|
||||
struct hist_entry *he_pair = rb_entry(next, struct hist_entry,
|
||||
rb_node_in);
|
||||
|
||||
next = rb_next(&he_pair->rb_node_in);
|
||||
|
||||
cmp = block_pair_cmp(he_pair, he);
|
||||
if (!cmp)
|
||||
return he_pair;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void compute_cycles_diff(struct hist_entry *he,
|
||||
struct hist_entry *pair)
|
||||
{
|
||||
pair->diff.computed = true;
|
||||
if (pair->block_info->num && he->block_info->num) {
|
||||
pair->diff.cycles =
|
||||
pair->block_info->cycles_aggr / pair->block_info->num_aggr -
|
||||
he->block_info->cycles_aggr / he->block_info->num_aggr;
|
||||
}
|
||||
}
|
||||
|
||||
static void block_hists_match(struct hists *hists_base,
|
||||
struct hists *hists_pair)
|
||||
{
|
||||
struct rb_root_cached *root = hists_base->entries_in;
|
||||
struct rb_node *next = rb_first_cached(root);
|
||||
|
||||
while (next != NULL) {
|
||||
struct hist_entry *he = rb_entry(next, struct hist_entry,
|
||||
rb_node_in);
|
||||
struct hist_entry *pair = get_block_pair(he, hists_pair);
|
||||
|
||||
next = rb_next(&he->rb_node_in);
|
||||
|
||||
if (pair) {
|
||||
hist_entry__add_pair(pair, he);
|
||||
compute_cycles_diff(he, pair);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int filter_cb(struct hist_entry *he, void *arg __maybe_unused)
|
||||
{
|
||||
/* Skip the calculation of column length in output_resort */
|
||||
he->filtered = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hists__precompute(struct hists *hists)
|
||||
{
|
||||
struct rb_root_cached *root;
|
||||
@ -486,6 +734,7 @@ static void hists__precompute(struct hists *hists)
|
||||
|
||||
next = rb_first_cached(root);
|
||||
while (next != NULL) {
|
||||
struct block_hist *bh, *pair_bh;
|
||||
struct hist_entry *he, *pair;
|
||||
struct data__file *d;
|
||||
int i;
|
||||
@ -493,6 +742,9 @@ static void hists__precompute(struct hists *hists)
|
||||
he = rb_entry(next, struct hist_entry, rb_node_in);
|
||||
next = rb_next(&he->rb_node_in);
|
||||
|
||||
if (compute == COMPUTE_CYCLES)
|
||||
process_block_per_sym(he);
|
||||
|
||||
data__for_each_file_new(i, d) {
|
||||
pair = get_pair_data(he, d);
|
||||
if (!pair)
|
||||
@ -509,6 +761,19 @@ static void hists__precompute(struct hists *hists)
|
||||
case COMPUTE_WEIGHTED_DIFF:
|
||||
compute_wdiff(he, pair);
|
||||
break;
|
||||
case COMPUTE_CYCLES:
|
||||
process_block_per_sym(pair);
|
||||
bh = container_of(he, struct block_hist, he);
|
||||
pair_bh = container_of(pair, struct block_hist,
|
||||
he);
|
||||
|
||||
if (bh->valid && pair_bh->valid) {
|
||||
block_hists_match(&bh->block_hists,
|
||||
&pair_bh->block_hists);
|
||||
hists__output_resort_cb(&pair_bh->block_hists,
|
||||
NULL, filter_cb);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
BUG_ON(1);
|
||||
}
|
||||
@ -720,6 +985,9 @@ static void hists__process(struct hists *hists)
|
||||
hists__precompute(hists);
|
||||
hists__output_resort(hists, NULL);
|
||||
|
||||
if (compute == COMPUTE_CYCLES)
|
||||
symbol_conf.report_block = true;
|
||||
|
||||
hists__fprintf(hists, !quiet, 0, 0, 0, stdout,
|
||||
!symbol_conf.use_callchain);
|
||||
}
|
||||
@ -873,6 +1141,31 @@ static int parse_time_str(struct data__file *d, char *abstime_ostr,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int check_file_brstack(void)
|
||||
{
|
||||
struct data__file *d;
|
||||
bool has_br_stack;
|
||||
int i;
|
||||
|
||||
data__for_each_file(i, d) {
|
||||
d->session = perf_session__new(&d->data, false, &pdiff.tool);
|
||||
if (!d->session) {
|
||||
pr_err("Failed to open %s\n", d->data.path);
|
||||
return -1;
|
||||
}
|
||||
|
||||
has_br_stack = perf_header__has_feat(&d->session->header,
|
||||
HEADER_BRANCH_STACK);
|
||||
perf_session__delete(d->session);
|
||||
if (!has_br_stack)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Set only all files having branch stacks */
|
||||
pdiff.has_br_stack = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __cmd_diff(void)
|
||||
{
|
||||
struct data__file *d;
|
||||
@ -950,7 +1243,7 @@ static const struct option options[] = {
|
||||
OPT_BOOLEAN('b', "baseline-only", &show_baseline_only,
|
||||
"Show only items with match in baseline"),
|
||||
OPT_CALLBACK('c', "compute", &compute,
|
||||
"delta,delta-abs,ratio,wdiff:w1,w2 (default delta-abs)",
|
||||
"delta,delta-abs,ratio,wdiff:w1,w2 (default delta-abs),cycles",
|
||||
"Entries differential computation selection",
|
||||
setup_compute),
|
||||
OPT_BOOLEAN('p', "period", &show_period,
|
||||
@ -1028,6 +1321,49 @@ static int hpp__entry_baseline(struct hist_entry *he, char *buf, size_t size)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cycles_printf(struct hist_entry *he, struct hist_entry *pair,
|
||||
struct perf_hpp *hpp, int width)
|
||||
{
|
||||
struct block_hist *bh = container_of(he, struct block_hist, he);
|
||||
struct block_hist *bh_pair = container_of(pair, struct block_hist, he);
|
||||
struct hist_entry *block_he;
|
||||
struct block_info *bi;
|
||||
char buf[128];
|
||||
char *start_line, *end_line;
|
||||
|
||||
block_he = hists__get_entry(&bh_pair->block_hists, bh->block_idx);
|
||||
if (!block_he) {
|
||||
hpp->skip = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Avoid printing the warning "addr2line_init failed for ..."
|
||||
*/
|
||||
symbol_conf.disable_add2line_warn = true;
|
||||
|
||||
bi = block_he->block_info;
|
||||
|
||||
start_line = map__srcline(he->ms.map, bi->sym->start + bi->start,
|
||||
he->ms.sym);
|
||||
|
||||
end_line = map__srcline(he->ms.map, bi->sym->start + bi->end,
|
||||
he->ms.sym);
|
||||
|
||||
if ((start_line != SRCLINE_UNKNOWN) && (end_line != SRCLINE_UNKNOWN)) {
|
||||
scnprintf(buf, sizeof(buf), "[%s -> %s] %4ld",
|
||||
start_line, end_line, block_he->diff.cycles);
|
||||
} else {
|
||||
scnprintf(buf, sizeof(buf), "[%7lx -> %7lx] %4ld",
|
||||
bi->start, bi->end, block_he->diff.cycles);
|
||||
}
|
||||
|
||||
free_srcline(start_line);
|
||||
free_srcline(end_line);
|
||||
|
||||
return scnprintf(hpp->buf, hpp->size, "%*s", width, buf);
|
||||
}
|
||||
|
||||
static int __hpp__color_compare(struct perf_hpp_fmt *fmt,
|
||||
struct perf_hpp *hpp, struct hist_entry *he,
|
||||
int comparison_method)
|
||||
@ -1039,8 +1375,17 @@ static int __hpp__color_compare(struct perf_hpp_fmt *fmt,
|
||||
s64 wdiff;
|
||||
char pfmt[20] = " ";
|
||||
|
||||
if (!pair)
|
||||
if (!pair) {
|
||||
if (comparison_method == COMPUTE_CYCLES) {
|
||||
struct block_hist *bh;
|
||||
|
||||
bh = container_of(he, struct block_hist, he);
|
||||
if (bh->block_idx)
|
||||
hpp->skip = true;
|
||||
}
|
||||
|
||||
goto no_print;
|
||||
}
|
||||
|
||||
switch (comparison_method) {
|
||||
case COMPUTE_DELTA:
|
||||
@ -1075,6 +1420,8 @@ static int __hpp__color_compare(struct perf_hpp_fmt *fmt,
|
||||
return color_snprintf(hpp->buf, hpp->size,
|
||||
get_percent_color(wdiff),
|
||||
pfmt, wdiff);
|
||||
case COMPUTE_CYCLES:
|
||||
return cycles_printf(he, pair, hpp, dfmt->header_width);
|
||||
default:
|
||||
BUG_ON(1);
|
||||
}
|
||||
@ -1104,6 +1451,12 @@ static int hpp__color_wdiff(struct perf_hpp_fmt *fmt,
|
||||
return __hpp__color_compare(fmt, hpp, he, COMPUTE_WEIGHTED_DIFF);
|
||||
}
|
||||
|
||||
static int hpp__color_cycles(struct perf_hpp_fmt *fmt,
|
||||
struct perf_hpp *hpp, struct hist_entry *he)
|
||||
{
|
||||
return __hpp__color_compare(fmt, hpp, he, COMPUTE_CYCLES);
|
||||
}
|
||||
|
||||
static void
|
||||
hpp__entry_unpair(struct hist_entry *he, int idx, char *buf, size_t size)
|
||||
{
|
||||
@ -1305,6 +1658,10 @@ static void data__hpp_register(struct data__file *d, int idx)
|
||||
fmt->color = hpp__color_delta;
|
||||
fmt->sort = hist_entry__cmp_delta_abs;
|
||||
break;
|
||||
case PERF_HPP_DIFF__CYCLES:
|
||||
fmt->color = hpp__color_cycles;
|
||||
fmt->sort = hist_entry__cmp_nop;
|
||||
break;
|
||||
default:
|
||||
fmt->sort = hist_entry__cmp_nop;
|
||||
break;
|
||||
@ -1385,6 +1742,13 @@ static int ui_init(void)
|
||||
case COMPUTE_DELTA_ABS:
|
||||
fmt->sort = hist_entry__cmp_delta_abs_idx;
|
||||
break;
|
||||
case COMPUTE_CYCLES:
|
||||
/*
|
||||
* Should set since 'fmt->sort' is called without
|
||||
* checking valid during sorting
|
||||
*/
|
||||
fmt->sort = hist_entry__cmp_nop;
|
||||
break;
|
||||
default:
|
||||
BUG_ON(1);
|
||||
}
|
||||
@ -1481,12 +1845,20 @@ int cmd_diff(int argc, const char **argv)
|
||||
if (quiet)
|
||||
perf_quiet_option();
|
||||
|
||||
symbol__annotation_init();
|
||||
|
||||
if (symbol__init(NULL) < 0)
|
||||
return -1;
|
||||
|
||||
if (data_init(argc, argv) < 0)
|
||||
return -1;
|
||||
|
||||
if (check_file_brstack() < 0)
|
||||
return -1;
|
||||
|
||||
if (compute == COMPUTE_CYCLES && !pdiff.has_br_stack)
|
||||
return -1;
|
||||
|
||||
if (ui_init() < 0)
|
||||
return -1;
|
||||
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "util/cpumap.h"
|
||||
|
||||
#include "util/debug.h"
|
||||
#include "util/string2.h"
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/rbtree.h>
|
||||
@ -30,7 +31,7 @@
|
||||
#include <locale.h>
|
||||
#include <regex.h>
|
||||
|
||||
#include "sane_ctype.h"
|
||||
#include <linux/ctype.h>
|
||||
|
||||
static int kmem_slab;
|
||||
static int kmem_page;
|
||||
|
@ -2191,6 +2191,10 @@ static struct option __record_options[] = {
|
||||
OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
|
||||
"Configure all used events to run in user space.",
|
||||
PARSE_OPT_EXCLUSIVE),
|
||||
OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
|
||||
"collect kernel callchains"),
|
||||
OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
|
||||
"collect user callchains"),
|
||||
OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
|
||||
"clang binary to use for compiling BPF scriptlets"),
|
||||
OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
|
||||
|
@ -47,7 +47,7 @@
|
||||
#include <errno.h>
|
||||
#include <inttypes.h>
|
||||
#include <regex.h>
|
||||
#include "sane_ctype.h"
|
||||
#include <linux/ctype.h>
|
||||
#include <signal.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/stringify.h>
|
||||
@ -941,8 +941,7 @@ parse_time_quantum(const struct option *opt, const char *arg,
|
||||
pr_err("time quantum cannot be 0");
|
||||
return -1;
|
||||
}
|
||||
while (isspace(*end))
|
||||
end++;
|
||||
end = skip_spaces(end);
|
||||
if (*end == 0)
|
||||
return 0;
|
||||
if (!strcmp(end, "s")) {
|
||||
@ -1428,6 +1427,10 @@ repeat:
|
||||
&report.range_num);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
|
||||
itrace_synth_opts__set_time_range(&itrace_synth_opts,
|
||||
report.ptime_range,
|
||||
report.range_num);
|
||||
}
|
||||
|
||||
if (session->tevent.pevent &&
|
||||
@ -1449,8 +1452,10 @@ repeat:
|
||||
ret = 0;
|
||||
|
||||
error:
|
||||
if (report.ptime_range)
|
||||
if (report.ptime_range) {
|
||||
itrace_synth_opts__clear_time_range(&itrace_synth_opts);
|
||||
zfree(&report.ptime_range);
|
||||
}
|
||||
zstd_fini(&(session->zstd_data));
|
||||
perf_session__delete(session);
|
||||
return ret;
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "util/thread_map.h"
|
||||
#include "util/color.h"
|
||||
#include "util/stat.h"
|
||||
#include "util/string2.h"
|
||||
#include "util/callchain.h"
|
||||
#include "util/time-utils.h"
|
||||
|
||||
@ -36,7 +37,7 @@
|
||||
#include <api/fs/fs.h>
|
||||
#include <linux/time64.h>
|
||||
|
||||
#include "sane_ctype.h"
|
||||
#include <linux/ctype.h>
|
||||
|
||||
#define PR_SET_NAME 15 /* Set process name */
|
||||
#define MAX_CPUS 4096
|
||||
|
@ -49,7 +49,7 @@
|
||||
#include <unistd.h>
|
||||
#include <subcmd/pager.h>
|
||||
|
||||
#include "sane_ctype.h"
|
||||
#include <linux/ctype.h>
|
||||
|
||||
static char const *script_name;
|
||||
static char const *generate_script_lang;
|
||||
@ -102,6 +102,7 @@ enum perf_output_field {
|
||||
PERF_OUTPUT_METRIC = 1U << 28,
|
||||
PERF_OUTPUT_MISC = 1U << 29,
|
||||
PERF_OUTPUT_SRCCODE = 1U << 30,
|
||||
PERF_OUTPUT_IPC = 1U << 31,
|
||||
};
|
||||
|
||||
struct output_option {
|
||||
@ -139,6 +140,7 @@ struct output_option {
|
||||
{.str = "metric", .field = PERF_OUTPUT_METRIC},
|
||||
{.str = "misc", .field = PERF_OUTPUT_MISC},
|
||||
{.str = "srccode", .field = PERF_OUTPUT_SRCCODE},
|
||||
{.str = "ipc", .field = PERF_OUTPUT_IPC},
|
||||
};
|
||||
|
||||
enum {
|
||||
@ -1268,6 +1270,20 @@ static int perf_sample__fprintf_insn(struct perf_sample *sample,
|
||||
return printed;
|
||||
}
|
||||
|
||||
static int perf_sample__fprintf_ipc(struct perf_sample *sample,
|
||||
struct perf_event_attr *attr, FILE *fp)
|
||||
{
|
||||
unsigned int ipc;
|
||||
|
||||
if (!PRINT_FIELD(IPC) || !sample->cyc_cnt || !sample->insn_cnt)
|
||||
return 0;
|
||||
|
||||
ipc = (sample->insn_cnt * 100) / sample->cyc_cnt;
|
||||
|
||||
return fprintf(fp, " \t IPC: %u.%02u (%" PRIu64 "/%" PRIu64 ") ",
|
||||
ipc / 100, ipc % 100, sample->insn_cnt, sample->cyc_cnt);
|
||||
}
|
||||
|
||||
static int perf_sample__fprintf_bts(struct perf_sample *sample,
|
||||
struct perf_evsel *evsel,
|
||||
struct thread *thread,
|
||||
@ -1312,6 +1328,8 @@ static int perf_sample__fprintf_bts(struct perf_sample *sample,
|
||||
printed += perf_sample__fprintf_addr(sample, thread, attr, fp);
|
||||
}
|
||||
|
||||
printed += perf_sample__fprintf_ipc(sample, attr, fp);
|
||||
|
||||
if (print_srcline_last)
|
||||
printed += map__fprintf_srcline(al->map, al->addr, "\n ", fp);
|
||||
|
||||
@ -1606,6 +1624,7 @@ struct perf_script {
|
||||
bool show_namespace_events;
|
||||
bool show_lost_events;
|
||||
bool show_round_events;
|
||||
bool show_bpf_events;
|
||||
bool allocated;
|
||||
bool per_event_dump;
|
||||
struct cpu_map *cpus;
|
||||
@ -1858,6 +1877,9 @@ static void process_event(struct perf_script *script,
|
||||
|
||||
if (PRINT_FIELD(PHYS_ADDR))
|
||||
fprintf(fp, "%16" PRIx64, sample->phys_addr);
|
||||
|
||||
perf_sample__fprintf_ipc(sample, attr, fp);
|
||||
|
||||
fprintf(fp, "\n");
|
||||
|
||||
if (PRINT_FIELD(SRCCODE)) {
|
||||
@ -2318,6 +2340,41 @@ process_finished_round_event(struct perf_tool *tool __maybe_unused,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
process_bpf_events(struct perf_tool *tool __maybe_unused,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
struct machine *machine)
|
||||
{
|
||||
struct thread *thread;
|
||||
struct perf_script *script = container_of(tool, struct perf_script, tool);
|
||||
struct perf_session *session = script->session;
|
||||
struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
|
||||
|
||||
if (machine__process_ksymbol(machine, event, sample) < 0)
|
||||
return -1;
|
||||
|
||||
if (!evsel->attr.sample_id_all) {
|
||||
perf_event__fprintf(event, stdout);
|
||||
return 0;
|
||||
}
|
||||
|
||||
thread = machine__findnew_thread(machine, sample->pid, sample->tid);
|
||||
if (thread == NULL) {
|
||||
pr_debug("problem processing MMAP event, skipping it.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!filter_cpu(sample)) {
|
||||
perf_sample__fprintf_start(sample, thread, evsel,
|
||||
event->header.type, stdout);
|
||||
perf_event__fprintf(event, stdout);
|
||||
}
|
||||
|
||||
thread__put(thread);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sig_handler(int sig __maybe_unused)
|
||||
{
|
||||
session_done = 1;
|
||||
@ -2420,6 +2477,10 @@ static int __cmd_script(struct perf_script *script)
|
||||
script->tool.ordered_events = false;
|
||||
script->tool.finished_round = process_finished_round_event;
|
||||
}
|
||||
if (script->show_bpf_events) {
|
||||
script->tool.ksymbol = process_bpf_events;
|
||||
script->tool.bpf_event = process_bpf_events;
|
||||
}
|
||||
|
||||
if (perf_script__setup_per_event_dump(script)) {
|
||||
pr_err("Couldn't create the per event dump files\n");
|
||||
@ -2819,7 +2880,7 @@ static int read_script_info(struct script_desc *desc, const char *filename)
|
||||
return -1;
|
||||
|
||||
while (fgets(line, sizeof(line), fp)) {
|
||||
p = ltrim(line);
|
||||
p = skip_spaces(line);
|
||||
if (strlen(p) == 0)
|
||||
continue;
|
||||
if (*p != '#')
|
||||
@ -2828,19 +2889,19 @@ static int read_script_info(struct script_desc *desc, const char *filename)
|
||||
if (strlen(p) && *p == '!')
|
||||
continue;
|
||||
|
||||
p = ltrim(p);
|
||||
p = skip_spaces(p);
|
||||
if (strlen(p) && p[strlen(p) - 1] == '\n')
|
||||
p[strlen(p) - 1] = '\0';
|
||||
|
||||
if (!strncmp(p, "description:", strlen("description:"))) {
|
||||
p += strlen("description:");
|
||||
desc->half_liner = strdup(ltrim(p));
|
||||
desc->half_liner = strdup(skip_spaces(p));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!strncmp(p, "args:", strlen("args:"))) {
|
||||
p += strlen("args:");
|
||||
desc->args = strdup(ltrim(p));
|
||||
desc->args = strdup(skip_spaces(p));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -2947,7 +3008,7 @@ static int check_ev_match(char *dir_name, char *scriptname,
|
||||
return -1;
|
||||
|
||||
while (fgets(line, sizeof(line), fp)) {
|
||||
p = ltrim(line);
|
||||
p = skip_spaces(line);
|
||||
if (*p == '#')
|
||||
continue;
|
||||
|
||||
@ -2957,7 +3018,7 @@ static int check_ev_match(char *dir_name, char *scriptname,
|
||||
break;
|
||||
|
||||
p += 2;
|
||||
p = ltrim(p);
|
||||
p = skip_spaces(p);
|
||||
len = strcspn(p, " \t");
|
||||
if (!len)
|
||||
break;
|
||||
@ -3297,6 +3358,7 @@ static int parse_call_trace(const struct option *opt __maybe_unused,
|
||||
parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent", 0);
|
||||
itrace_parse_synth_opts(opt, "cewp", 0);
|
||||
symbol_conf.nanosecs = true;
|
||||
symbol_conf.pad_output_len_dso = 50;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3392,7 +3454,7 @@ int cmd_script(int argc, const char **argv)
|
||||
"Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
|
||||
"addr,symoff,srcline,period,iregs,uregs,brstack,"
|
||||
"brstacksym,flags,bpf-output,brstackinsn,brstackoff,"
|
||||
"callindent,insn,insnlen,synth,phys_addr,metric,misc",
|
||||
"callindent,insn,insnlen,synth,phys_addr,metric,misc,ipc",
|
||||
parse_output_fields),
|
||||
OPT_BOOLEAN('a', "all-cpus", &system_wide,
|
||||
"system-wide collection from all CPUs"),
|
||||
@ -3438,6 +3500,8 @@ int cmd_script(int argc, const char **argv)
|
||||
"Show lost events (if recorded)"),
|
||||
OPT_BOOLEAN('\0', "show-round-events", &script.show_round_events,
|
||||
"Show round events (if recorded)"),
|
||||
OPT_BOOLEAN('\0', "show-bpf-events", &script.show_bpf_events,
|
||||
"Show bpf related events (if recorded)"),
|
||||
OPT_BOOLEAN('\0', "per-event-dump", &script.per_event_dump,
|
||||
"Dump trace output to files named by the monitored events"),
|
||||
OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
|
||||
@ -3458,6 +3522,15 @@ int cmd_script(int argc, const char **argv)
|
||||
"Time span of interest (start,stop)"),
|
||||
OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name,
|
||||
"Show inline function"),
|
||||
OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
|
||||
"guest mount directory under which every guest os"
|
||||
" instance has a subdir"),
|
||||
OPT_STRING(0, "guestvmlinux", &symbol_conf.default_guest_vmlinux_name,
|
||||
"file", "file saving guest os vmlinux"),
|
||||
OPT_STRING(0, "guestkallsyms", &symbol_conf.default_guest_kallsyms,
|
||||
"file", "file saving guest os /proc/kallsyms"),
|
||||
OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules,
|
||||
"file", "file saving guest os /proc/modules"),
|
||||
OPT_END()
|
||||
};
|
||||
const char * const script_subcommands[] = { "record", "report", NULL };
|
||||
@ -3477,6 +3550,16 @@ int cmd_script(int argc, const char **argv)
|
||||
argc = parse_options_subcommand(argc, argv, options, script_subcommands, script_usage,
|
||||
PARSE_OPT_STOP_AT_NON_OPTION);
|
||||
|
||||
if (symbol_conf.guestmount ||
|
||||
symbol_conf.default_guest_vmlinux_name ||
|
||||
symbol_conf.default_guest_kallsyms ||
|
||||
symbol_conf.default_guest_modules) {
|
||||
/*
|
||||
* Enable guest sample processing.
|
||||
*/
|
||||
perf_guest = true;
|
||||
}
|
||||
|
||||
data.path = input_name;
|
||||
data.force = symbol_conf.force;
|
||||
|
||||
@ -3765,6 +3848,10 @@ int cmd_script(int argc, const char **argv)
|
||||
&script.range_num);
|
||||
if (err < 0)
|
||||
goto out_delete;
|
||||
|
||||
itrace_synth_opts__set_time_range(&itrace_synth_opts,
|
||||
script.ptime_range,
|
||||
script.range_num);
|
||||
}
|
||||
|
||||
err = __cmd_script(&script);
|
||||
@ -3772,8 +3859,10 @@ int cmd_script(int argc, const char **argv)
|
||||
flush_scripting();
|
||||
|
||||
out_delete:
|
||||
if (script.ptime_range)
|
||||
if (script.ptime_range) {
|
||||
itrace_synth_opts__clear_time_range(&itrace_synth_opts);
|
||||
zfree(&script.ptime_range);
|
||||
}
|
||||
|
||||
perf_evlist__free_stats(session->evlist);
|
||||
perf_session__delete(session);
|
||||
|
@ -82,7 +82,7 @@
|
||||
#include <sys/time.h>
|
||||
#include <sys/resource.h>
|
||||
|
||||
#include "sane_ctype.h"
|
||||
#include <linux/ctype.h>
|
||||
|
||||
#define DEFAULT_SEPARATOR " "
|
||||
#define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi"
|
||||
@ -776,6 +776,8 @@ static struct option stat_options[] = {
|
||||
"stop workload and print counts after a timeout period in ms (>= 10ms)"),
|
||||
OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
|
||||
"aggregate counts per processor socket", AGGR_SOCKET),
|
||||
OPT_SET_UINT(0, "per-die", &stat_config.aggr_mode,
|
||||
"aggregate counts per processor die", AGGR_DIE),
|
||||
OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode,
|
||||
"aggregate counts per physical processor core", AGGR_CORE),
|
||||
OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode,
|
||||
@ -800,6 +802,12 @@ static int perf_stat__get_socket(struct perf_stat_config *config __maybe_unused,
|
||||
return cpu_map__get_socket(map, cpu, NULL);
|
||||
}
|
||||
|
||||
static int perf_stat__get_die(struct perf_stat_config *config __maybe_unused,
|
||||
struct cpu_map *map, int cpu)
|
||||
{
|
||||
return cpu_map__get_die(map, cpu, NULL);
|
||||
}
|
||||
|
||||
static int perf_stat__get_core(struct perf_stat_config *config __maybe_unused,
|
||||
struct cpu_map *map, int cpu)
|
||||
{
|
||||
@ -840,6 +848,12 @@ static int perf_stat__get_socket_cached(struct perf_stat_config *config,
|
||||
return perf_stat__get_aggr(config, perf_stat__get_socket, map, idx);
|
||||
}
|
||||
|
||||
static int perf_stat__get_die_cached(struct perf_stat_config *config,
|
||||
struct cpu_map *map, int idx)
|
||||
{
|
||||
return perf_stat__get_aggr(config, perf_stat__get_die, map, idx);
|
||||
}
|
||||
|
||||
static int perf_stat__get_core_cached(struct perf_stat_config *config,
|
||||
struct cpu_map *map, int idx)
|
||||
{
|
||||
@ -870,6 +884,13 @@ static int perf_stat_init_aggr_mode(void)
|
||||
}
|
||||
stat_config.aggr_get_id = perf_stat__get_socket_cached;
|
||||
break;
|
||||
case AGGR_DIE:
|
||||
if (cpu_map__build_die_map(evsel_list->cpus, &stat_config.aggr_map)) {
|
||||
perror("cannot build die map");
|
||||
return -1;
|
||||
}
|
||||
stat_config.aggr_get_id = perf_stat__get_die_cached;
|
||||
break;
|
||||
case AGGR_CORE:
|
||||
if (cpu_map__build_core_map(evsel_list->cpus, &stat_config.aggr_map)) {
|
||||
perror("cannot build core map");
|
||||
@ -935,21 +956,55 @@ static int perf_env__get_socket(struct cpu_map *map, int idx, void *data)
|
||||
return cpu == -1 ? -1 : env->cpu[cpu].socket_id;
|
||||
}
|
||||
|
||||
static int perf_env__get_die(struct cpu_map *map, int idx, void *data)
|
||||
{
|
||||
struct perf_env *env = data;
|
||||
int die_id = -1, cpu = perf_env__get_cpu(env, map, idx);
|
||||
|
||||
if (cpu != -1) {
|
||||
/*
|
||||
* Encode socket in bit range 15:8
|
||||
* die_id is relative to socket,
|
||||
* we need a global id. So we combine
|
||||
* socket + die id
|
||||
*/
|
||||
if (WARN_ONCE(env->cpu[cpu].socket_id >> 8, "The socket id number is too big.\n"))
|
||||
return -1;
|
||||
|
||||
if (WARN_ONCE(env->cpu[cpu].die_id >> 8, "The die id number is too big.\n"))
|
||||
return -1;
|
||||
|
||||
die_id = (env->cpu[cpu].socket_id << 8) | (env->cpu[cpu].die_id & 0xff);
|
||||
}
|
||||
|
||||
return die_id;
|
||||
}
|
||||
|
||||
static int perf_env__get_core(struct cpu_map *map, int idx, void *data)
|
||||
{
|
||||
struct perf_env *env = data;
|
||||
int core = -1, cpu = perf_env__get_cpu(env, map, idx);
|
||||
|
||||
if (cpu != -1) {
|
||||
int socket_id = env->cpu[cpu].socket_id;
|
||||
|
||||
/*
|
||||
* Encode socket in upper 16 bits
|
||||
* core_id is relative to socket, and
|
||||
* Encode socket in bit range 31:24
|
||||
* encode die id in bit range 23:16
|
||||
* core_id is relative to socket and die,
|
||||
* we need a global id. So we combine
|
||||
* socket + core id.
|
||||
* socket + die id + core id
|
||||
*/
|
||||
core = (socket_id << 16) | (env->cpu[cpu].core_id & 0xffff);
|
||||
if (WARN_ONCE(env->cpu[cpu].socket_id >> 8, "The socket id number is too big.\n"))
|
||||
return -1;
|
||||
|
||||
if (WARN_ONCE(env->cpu[cpu].die_id >> 8, "The die id number is too big.\n"))
|
||||
return -1;
|
||||
|
||||
if (WARN_ONCE(env->cpu[cpu].core_id >> 16, "The core id number is too big.\n"))
|
||||
return -1;
|
||||
|
||||
core = (env->cpu[cpu].socket_id << 24) |
|
||||
(env->cpu[cpu].die_id << 16) |
|
||||
(env->cpu[cpu].core_id & 0xffff);
|
||||
}
|
||||
|
||||
return core;
|
||||
@ -961,6 +1016,12 @@ static int perf_env__build_socket_map(struct perf_env *env, struct cpu_map *cpus
|
||||
return cpu_map__build_map(cpus, sockp, perf_env__get_socket, env);
|
||||
}
|
||||
|
||||
static int perf_env__build_die_map(struct perf_env *env, struct cpu_map *cpus,
|
||||
struct cpu_map **diep)
|
||||
{
|
||||
return cpu_map__build_map(cpus, diep, perf_env__get_die, env);
|
||||
}
|
||||
|
||||
static int perf_env__build_core_map(struct perf_env *env, struct cpu_map *cpus,
|
||||
struct cpu_map **corep)
|
||||
{
|
||||
@ -972,6 +1033,11 @@ static int perf_stat__get_socket_file(struct perf_stat_config *config __maybe_un
|
||||
{
|
||||
return perf_env__get_socket(map, idx, &perf_stat.session->header.env);
|
||||
}
|
||||
static int perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused,
|
||||
struct cpu_map *map, int idx)
|
||||
{
|
||||
return perf_env__get_die(map, idx, &perf_stat.session->header.env);
|
||||
}
|
||||
|
||||
static int perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused,
|
||||
struct cpu_map *map, int idx)
|
||||
@ -991,6 +1057,13 @@ static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
|
||||
}
|
||||
stat_config.aggr_get_id = perf_stat__get_socket_file;
|
||||
break;
|
||||
case AGGR_DIE:
|
||||
if (perf_env__build_die_map(env, evsel_list->cpus, &stat_config.aggr_map)) {
|
||||
perror("cannot build die map");
|
||||
return -1;
|
||||
}
|
||||
stat_config.aggr_get_id = perf_stat__get_die_file;
|
||||
break;
|
||||
case AGGR_CORE:
|
||||
if (perf_env__build_core_map(env, evsel_list->cpus, &stat_config.aggr_map)) {
|
||||
perror("cannot build core map");
|
||||
@ -1541,6 +1614,8 @@ static int __cmd_report(int argc, const char **argv)
|
||||
OPT_STRING('i', "input", &input_name, "file", "input file name"),
|
||||
OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode,
|
||||
"aggregate counts per processor socket", AGGR_SOCKET),
|
||||
OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode,
|
||||
"aggregate counts per processor die", AGGR_DIE),
|
||||
OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode,
|
||||
"aggregate counts per physical processor core", AGGR_CORE),
|
||||
OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode,
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include "util/cpumap.h"
|
||||
#include "util/xyarray.h"
|
||||
#include "util/sort.h"
|
||||
#include "util/string2.h"
|
||||
#include "util/term.h"
|
||||
#include "util/intlist.h"
|
||||
#include "util/parse-branch-options.h"
|
||||
@ -75,7 +76,7 @@
|
||||
#include <linux/time64.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "sane_ctype.h"
|
||||
#include <linux/ctype.h>
|
||||
|
||||
static volatile int done;
|
||||
static volatile int resize;
|
||||
@ -1207,11 +1208,14 @@ static int __cmd_top(struct perf_top *top)
|
||||
|
||||
init_process_thread(top);
|
||||
|
||||
if (opts->record_namespaces)
|
||||
top->tool.namespace_events = true;
|
||||
|
||||
ret = perf_event__synthesize_bpf_events(top->session, perf_event__process,
|
||||
&top->session->machines.host,
|
||||
&top->record_opts);
|
||||
if (ret < 0)
|
||||
pr_warning("Couldn't synthesize bpf events.\n");
|
||||
pr_debug("Couldn't synthesize BPF events: Pre-existing BPF programs won't have symbols resolved.\n");
|
||||
|
||||
machine__synthesize_threads(&top->session->machines.host, &opts->target,
|
||||
top->evlist->threads, false,
|
||||
@ -1499,6 +1503,8 @@ int cmd_top(int argc, const char **argv)
|
||||
OPT_BOOLEAN(0, "force", &symbol_conf.force, "don't complain, do it"),
|
||||
OPT_UINTEGER(0, "num-thread-synthesize", &top.nr_threads_synthesize,
|
||||
"number of thread to run event synthesize"),
|
||||
OPT_BOOLEAN(0, "namespaces", &opts->record_namespaces,
|
||||
"Record namespaces events"),
|
||||
OPT_END()
|
||||
};
|
||||
struct perf_evlist *sb_evlist = NULL;
|
||||
|
@ -64,7 +64,7 @@
|
||||
#include <fcntl.h>
|
||||
#include <sys/sysmacros.h>
|
||||
|
||||
#include "sane_ctype.h"
|
||||
#include <linux/ctype.h>
|
||||
|
||||
#ifndef O_CLOEXEC
|
||||
# define O_CLOEXEC 02000000
|
||||
@ -402,6 +402,11 @@ static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
|
||||
|
||||
#define SCA_STRARRAY syscall_arg__scnprintf_strarray
|
||||
|
||||
size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg)
|
||||
{
|
||||
return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val);
|
||||
}
|
||||
|
||||
size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
|
||||
{
|
||||
size_t printed;
|
||||
@ -481,6 +486,15 @@ static const char *bpf_cmd[] = {
|
||||
};
|
||||
static DEFINE_STRARRAY(bpf_cmd, "BPF_");
|
||||
|
||||
static const char *fsmount_flags[] = {
|
||||
[1] = "CLOEXEC",
|
||||
};
|
||||
static DEFINE_STRARRAY(fsmount_flags, "FSMOUNT_");
|
||||
|
||||
#include "trace/beauty/generated/fsconfig_arrays.c"
|
||||
|
||||
static DEFINE_STRARRAY(fsconfig_cmds, "FSCONFIG_");
|
||||
|
||||
static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
|
||||
static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1);
|
||||
|
||||
@ -641,6 +655,10 @@ static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
|
||||
{ .scnprintf = SCA_STRARRAY, \
|
||||
.parm = &strarray__##array, }
|
||||
|
||||
#define STRARRAY_FLAGS(name, array) \
|
||||
{ .scnprintf = SCA_STRARRAY_FLAGS, \
|
||||
.parm = &strarray__##array, }
|
||||
|
||||
#include "trace/beauty/arch_errno_names.c"
|
||||
#include "trace/beauty/eventfd.c"
|
||||
#include "trace/beauty/futex_op.c"
|
||||
@ -712,6 +730,15 @@ static struct syscall_fmt {
|
||||
[2] = { .scnprintf = SCA_FCNTL_ARG, /* arg */ }, }, },
|
||||
{ .name = "flock",
|
||||
.arg = { [1] = { .scnprintf = SCA_FLOCK, /* cmd */ }, }, },
|
||||
{ .name = "fsconfig",
|
||||
.arg = { [1] = STRARRAY(cmd, fsconfig_cmds), }, },
|
||||
{ .name = "fsmount",
|
||||
.arg = { [1] = STRARRAY_FLAGS(flags, fsmount_flags),
|
||||
[2] = { .scnprintf = SCA_FSMOUNT_ATTR_FLAGS, /* attr_flags */ }, }, },
|
||||
{ .name = "fspick",
|
||||
.arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
|
||||
[1] = { .scnprintf = SCA_FILENAME, /* path */ },
|
||||
[2] = { .scnprintf = SCA_FSPICK_FLAGS, /* flags */ }, }, },
|
||||
{ .name = "fstat", .alias = "newfstat", },
|
||||
{ .name = "fstatat", .alias = "newfstatat", },
|
||||
{ .name = "futex",
|
||||
@ -774,6 +801,12 @@ static struct syscall_fmt {
|
||||
.arg = { [0] = { .scnprintf = SCA_FILENAME, /* dev_name */ },
|
||||
[3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */
|
||||
.mask_val = SCAMV_MOUNT_FLAGS, /* flags */ }, }, },
|
||||
{ .name = "move_mount",
|
||||
.arg = { [0] = { .scnprintf = SCA_FDAT, /* from_dfd */ },
|
||||
[1] = { .scnprintf = SCA_FILENAME, /* from_pathname */ },
|
||||
[2] = { .scnprintf = SCA_FDAT, /* to_dfd */ },
|
||||
[3] = { .scnprintf = SCA_FILENAME, /* to_pathname */ },
|
||||
[4] = { .scnprintf = SCA_MOVE_MOUNT_FLAGS, /* flags */ }, }, },
|
||||
{ .name = "mprotect",
|
||||
.arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
|
||||
[2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ }, }, },
|
||||
@ -878,6 +911,8 @@ static struct syscall_fmt {
|
||||
.arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
|
||||
{ .name = "symlinkat",
|
||||
.arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
|
||||
{ .name = "sync_file_range",
|
||||
.arg = { [3] = { .scnprintf = SCA_SYNC_FILE_RANGE_FLAGS, /* flags */ }, }, },
|
||||
{ .name = "tgkill",
|
||||
.arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
|
||||
{ .name = "tkill",
|
||||
@ -936,8 +971,14 @@ struct syscall {
|
||||
struct syscall_arg_fmt *arg_fmt;
|
||||
};
|
||||
|
||||
/*
|
||||
* Must match what is in the BPF program:
|
||||
*
|
||||
* tools/perf/examples/bpf/augmented_raw_syscalls.c
|
||||
*/
|
||||
struct bpf_map_syscall_entry {
|
||||
bool enabled;
|
||||
u16 string_args_len[6];
|
||||
};
|
||||
|
||||
/*
|
||||
@ -1191,8 +1232,17 @@ static void thread__set_filename_pos(struct thread *thread, const char *bf,
|
||||
static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size)
|
||||
{
|
||||
struct augmented_arg *augmented_arg = arg->augmented.args;
|
||||
size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value);
|
||||
/*
|
||||
* So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls
|
||||
* we would have two strings, each prefixed by its size.
|
||||
*/
|
||||
int consumed = sizeof(*augmented_arg) + augmented_arg->size;
|
||||
|
||||
return scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value);
|
||||
arg->augmented.args = ((void *)arg->augmented.args) + consumed;
|
||||
arg->augmented.size -= consumed;
|
||||
|
||||
return printed;
|
||||
}
|
||||
|
||||
static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
|
||||
@ -1380,10 +1430,11 @@ static int syscall__set_arg_fmts(struct syscall *sc)
|
||||
if (sc->fmt && sc->fmt->arg[idx].scnprintf)
|
||||
continue;
|
||||
|
||||
len = strlen(field->name);
|
||||
|
||||
if (strcmp(field->type, "const char *") == 0 &&
|
||||
(strcmp(field->name, "filename") == 0 ||
|
||||
strcmp(field->name, "path") == 0 ||
|
||||
strcmp(field->name, "pathname") == 0))
|
||||
((len >= 4 && strcmp(field->name + len - 4, "name") == 0) ||
|
||||
strstr(field->name, "path") != NULL))
|
||||
sc->arg_fmt[idx].scnprintf = SCA_FILENAME;
|
||||
else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr"))
|
||||
sc->arg_fmt[idx].scnprintf = SCA_PTR;
|
||||
@ -1394,8 +1445,7 @@ static int syscall__set_arg_fmts(struct syscall *sc)
|
||||
else if ((strcmp(field->type, "int") == 0 ||
|
||||
strcmp(field->type, "unsigned int") == 0 ||
|
||||
strcmp(field->type, "long") == 0) &&
|
||||
(len = strlen(field->name)) >= 2 &&
|
||||
strcmp(field->name + len - 2, "fd") == 0) {
|
||||
len >= 2 && strcmp(field->name + len - 2, "fd") == 0) {
|
||||
/*
|
||||
* /sys/kernel/tracing/events/syscalls/sys_enter*
|
||||
* egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
|
||||
@ -1477,12 +1527,12 @@ static int trace__read_syscall_info(struct trace *trace, int id)
|
||||
|
||||
static int trace__validate_ev_qualifier(struct trace *trace)
|
||||
{
|
||||
int err = 0, i;
|
||||
size_t nr_allocated;
|
||||
int err = 0;
|
||||
bool printed_invalid_prefix = false;
|
||||
struct str_node *pos;
|
||||
size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier);
|
||||
|
||||
trace->ev_qualifier_ids.nr = strlist__nr_entries(trace->ev_qualifier);
|
||||
trace->ev_qualifier_ids.entries = malloc(trace->ev_qualifier_ids.nr *
|
||||
trace->ev_qualifier_ids.entries = malloc(nr_allocated *
|
||||
sizeof(trace->ev_qualifier_ids.entries[0]));
|
||||
|
||||
if (trace->ev_qualifier_ids.entries == NULL) {
|
||||
@ -1492,9 +1542,6 @@ static int trace__validate_ev_qualifier(struct trace *trace)
|
||||
goto out;
|
||||
}
|
||||
|
||||
nr_allocated = trace->ev_qualifier_ids.nr;
|
||||
i = 0;
|
||||
|
||||
strlist__for_each_entry(pos, trace->ev_qualifier) {
|
||||
const char *sc = pos->s;
|
||||
int id = syscalltbl__id(trace->sctbl, sc), match_next = -1;
|
||||
@ -1504,17 +1551,18 @@ static int trace__validate_ev_qualifier(struct trace *trace)
|
||||
if (id >= 0)
|
||||
goto matches;
|
||||
|
||||
if (err == 0) {
|
||||
fputs("Error:\tInvalid syscall ", trace->output);
|
||||
err = -EINVAL;
|
||||
if (!printed_invalid_prefix) {
|
||||
pr_debug("Skipping unknown syscalls: ");
|
||||
printed_invalid_prefix = true;
|
||||
} else {
|
||||
fputs(", ", trace->output);
|
||||
pr_debug(", ");
|
||||
}
|
||||
|
||||
fputs(sc, trace->output);
|
||||
pr_debug("%s", sc);
|
||||
continue;
|
||||
}
|
||||
matches:
|
||||
trace->ev_qualifier_ids.entries[i++] = id;
|
||||
trace->ev_qualifier_ids.entries[nr_used++] = id;
|
||||
if (match_next == -1)
|
||||
continue;
|
||||
|
||||
@ -1522,7 +1570,7 @@ matches:
|
||||
id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next);
|
||||
if (id < 0)
|
||||
break;
|
||||
if (nr_allocated == trace->ev_qualifier_ids.nr) {
|
||||
if (nr_allocated == nr_used) {
|
||||
void *entries;
|
||||
|
||||
nr_allocated += 8;
|
||||
@ -1535,20 +1583,19 @@ matches:
|
||||
}
|
||||
trace->ev_qualifier_ids.entries = entries;
|
||||
}
|
||||
trace->ev_qualifier_ids.nr++;
|
||||
trace->ev_qualifier_ids.entries[i++] = id;
|
||||
trace->ev_qualifier_ids.entries[nr_used++] = id;
|
||||
}
|
||||
}
|
||||
|
||||
if (err < 0) {
|
||||
fputs("\nHint:\ttry 'perf list syscalls:sys_enter_*'"
|
||||
"\nHint:\tand: 'man syscalls'\n", trace->output);
|
||||
out_free:
|
||||
zfree(&trace->ev_qualifier_ids.entries);
|
||||
trace->ev_qualifier_ids.nr = 0;
|
||||
}
|
||||
trace->ev_qualifier_ids.nr = nr_used;
|
||||
out:
|
||||
if (printed_invalid_prefix)
|
||||
pr_debug("\n");
|
||||
return err;
|
||||
out_free:
|
||||
zfree(&trace->ev_qualifier_ids.entries);
|
||||
trace->ev_qualifier_ids.nr = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2675,6 +2722,25 @@ out_enomem:
|
||||
}
|
||||
|
||||
#ifdef HAVE_LIBBPF_SUPPORT
|
||||
static void trace__init_bpf_map_syscall_args(struct trace *trace, int id, struct bpf_map_syscall_entry *entry)
|
||||
{
|
||||
struct syscall *sc = trace__syscall_info(trace, NULL, id);
|
||||
int arg = 0;
|
||||
|
||||
if (sc == NULL)
|
||||
goto out;
|
||||
|
||||
for (; arg < sc->nr_args; ++arg) {
|
||||
entry->string_args_len[arg] = 0;
|
||||
if (sc->arg_fmt[arg].scnprintf == SCA_FILENAME) {
|
||||
/* Should be set like strace -s strsize */
|
||||
entry->string_args_len[arg] = PATH_MAX;
|
||||
}
|
||||
}
|
||||
out:
|
||||
for (; arg < 6; ++arg)
|
||||
entry->string_args_len[arg] = 0;
|
||||
}
|
||||
static int trace__set_ev_qualifier_bpf_filter(struct trace *trace)
|
||||
{
|
||||
int fd = bpf_map__fd(trace->syscalls.map);
|
||||
@ -2687,6 +2753,9 @@ static int trace__set_ev_qualifier_bpf_filter(struct trace *trace)
|
||||
for (i = 0; i < trace->ev_qualifier_ids.nr; ++i) {
|
||||
int key = trace->ev_qualifier_ids.entries[i];
|
||||
|
||||
if (value.enabled)
|
||||
trace__init_bpf_map_syscall_args(trace, key, &value);
|
||||
|
||||
err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST);
|
||||
if (err)
|
||||
break;
|
||||
@ -2704,6 +2773,9 @@ static int __trace__init_syscalls_bpf_map(struct trace *trace, bool enabled)
|
||||
int err = 0, key;
|
||||
|
||||
for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
|
||||
if (enabled)
|
||||
trace__init_bpf_map_syscall_args(trace, key, &value);
|
||||
|
||||
err = bpf_map_update_elem(fd, &key, &value, BPF_ANY);
|
||||
if (err)
|
||||
break;
|
||||
@ -3627,7 +3699,12 @@ static int trace__config(const char *var, const char *value, void *arg)
|
||||
struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event",
|
||||
"event selector. use 'perf list' to list available events",
|
||||
parse_events_option);
|
||||
err = parse_events_option(&o, value, 0);
|
||||
/*
|
||||
* We can't propagate parse_event_option() return, as it is 1
|
||||
* for failure while perf_config() expects -1.
|
||||
*/
|
||||
if (parse_events_option(&o, value, 0))
|
||||
err = -1;
|
||||
} else if (!strcmp(var, "trace.show_timestamp")) {
|
||||
trace->show_tstamp = perf_config_bool(var, value);
|
||||
} else if (!strcmp(var, "trace.show_duration")) {
|
||||
|
@ -105,6 +105,8 @@ check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/ex
|
||||
check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"'
|
||||
check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common\(-tools\)*.h>"'
|
||||
check include/uapi/linux/mman.h '-I "^#include <\(uapi/\)*asm/mman.h>"'
|
||||
check include/linux/ctype.h '-I "isdigit("'
|
||||
check lib/ctype.c '-I "^EXPORT_SYMBOL" -I "^#include <linux/export.h>" -B'
|
||||
|
||||
# diff non-symmetric files
|
||||
check_2 tools/perf/arch/x86/entry/syscalls/syscall_64.tbl arch/x86/entry/syscalls/syscall_64.tbl
|
||||
|
@ -21,8 +21,14 @@
|
||||
/* bpf-output associated map */
|
||||
bpf_map(__augmented_syscalls__, PERF_EVENT_ARRAY, int, u32, __NR_CPUS__);
|
||||
|
||||
/*
|
||||
* string_args_len: one per syscall arg, 0 means not a string or don't copy it,
|
||||
* PATH_MAX for copying everything, any other value to limit
|
||||
* it a la 'strace -s strsize'.
|
||||
*/
|
||||
struct syscall {
|
||||
bool enabled;
|
||||
u16 string_args_len[6];
|
||||
};
|
||||
|
||||
bpf_map(syscalls, ARRAY, int, struct syscall, 512);
|
||||
@ -41,83 +47,10 @@ struct syscall_exit_args {
|
||||
|
||||
struct augmented_filename {
|
||||
unsigned int size;
|
||||
int reserved;
|
||||
int err;
|
||||
char value[PATH_MAX];
|
||||
};
|
||||
|
||||
/* syscalls where the first arg is a string */
|
||||
#define SYS_OPEN 2
|
||||
#define SYS_STAT 4
|
||||
#define SYS_LSTAT 6
|
||||
#define SYS_ACCESS 21
|
||||
#define SYS_EXECVE 59
|
||||
#define SYS_TRUNCATE 76
|
||||
#define SYS_CHDIR 80
|
||||
#define SYS_RENAME 82
|
||||
#define SYS_MKDIR 83
|
||||
#define SYS_RMDIR 84
|
||||
#define SYS_CREAT 85
|
||||
#define SYS_LINK 86
|
||||
#define SYS_UNLINK 87
|
||||
#define SYS_SYMLINK 88
|
||||
#define SYS_READLINK 89
|
||||
#define SYS_CHMOD 90
|
||||
#define SYS_CHOWN 92
|
||||
#define SYS_LCHOWN 94
|
||||
#define SYS_MKNOD 133
|
||||
#define SYS_STATFS 137
|
||||
#define SYS_PIVOT_ROOT 155
|
||||
#define SYS_CHROOT 161
|
||||
#define SYS_ACCT 163
|
||||
#define SYS_SWAPON 167
|
||||
#define SYS_SWAPOFF 168
|
||||
#define SYS_DELETE_MODULE 176
|
||||
#define SYS_SETXATTR 188
|
||||
#define SYS_LSETXATTR 189
|
||||
#define SYS_GETXATTR 191
|
||||
#define SYS_LGETXATTR 192
|
||||
#define SYS_LISTXATTR 194
|
||||
#define SYS_LLISTXATTR 195
|
||||
#define SYS_REMOVEXATTR 197
|
||||
#define SYS_LREMOVEXATTR 198
|
||||
#define SYS_MQ_OPEN 240
|
||||
#define SYS_MQ_UNLINK 241
|
||||
#define SYS_ADD_KEY 248
|
||||
#define SYS_REQUEST_KEY 249
|
||||
#define SYS_SYMLINKAT 266
|
||||
#define SYS_MEMFD_CREATE 319
|
||||
|
||||
/* syscalls where the first arg is a string */
|
||||
|
||||
#define SYS_PWRITE64 18
|
||||
#define SYS_EXECVE 59
|
||||
#define SYS_RENAME 82
|
||||
#define SYS_QUOTACTL 179
|
||||
#define SYS_FSETXATTR 190
|
||||
#define SYS_FGETXATTR 193
|
||||
#define SYS_FREMOVEXATTR 199
|
||||
#define SYS_MQ_TIMEDSEND 242
|
||||
#define SYS_REQUEST_KEY 249
|
||||
#define SYS_INOTIFY_ADD_WATCH 254
|
||||
#define SYS_OPENAT 257
|
||||
#define SYS_MKDIRAT 258
|
||||
#define SYS_MKNODAT 259
|
||||
#define SYS_FCHOWNAT 260
|
||||
#define SYS_FUTIMESAT 261
|
||||
#define SYS_NEWFSTATAT 262
|
||||
#define SYS_UNLINKAT 263
|
||||
#define SYS_RENAMEAT 264
|
||||
#define SYS_LINKAT 265
|
||||
#define SYS_READLINKAT 267
|
||||
#define SYS_FCHMODAT 268
|
||||
#define SYS_FACCESSAT 269
|
||||
#define SYS_UTIMENSAT 280
|
||||
#define SYS_NAME_TO_HANDLE_AT 303
|
||||
#define SYS_FINIT_MODULE 313
|
||||
#define SYS_RENAMEAT2 316
|
||||
#define SYS_EXECVEAT 322
|
||||
#define SYS_STATX 332
|
||||
|
||||
pid_filter(pids_filtered);
|
||||
|
||||
struct augmented_args_filename {
|
||||
@ -127,12 +60,48 @@ struct augmented_args_filename {
|
||||
|
||||
bpf_map(augmented_filename_map, PERCPU_ARRAY, int, struct augmented_args_filename, 1);
|
||||
|
||||
static inline
|
||||
unsigned int augmented_filename__read(struct augmented_filename *augmented_filename,
|
||||
const void *filename_arg, unsigned int filename_len)
|
||||
{
|
||||
unsigned int len = sizeof(*augmented_filename);
|
||||
int size = probe_read_str(&augmented_filename->value, filename_len, filename_arg);
|
||||
|
||||
augmented_filename->size = augmented_filename->err = 0;
|
||||
/*
|
||||
* probe_read_str may return < 0, e.g. -EFAULT
|
||||
* So we leave that in the augmented_filename->size that userspace will
|
||||
*/
|
||||
if (size > 0) {
|
||||
len -= sizeof(augmented_filename->value) - size;
|
||||
len &= sizeof(augmented_filename->value) - 1;
|
||||
augmented_filename->size = size;
|
||||
} else {
|
||||
/*
|
||||
* So that username notice the error while still being able
|
||||
* to skip this augmented arg record
|
||||
*/
|
||||
augmented_filename->err = size;
|
||||
len = offsetof(struct augmented_filename, value);
|
||||
}
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
SEC("raw_syscalls:sys_enter")
|
||||
int sys_enter(struct syscall_enter_args *args)
|
||||
{
|
||||
struct augmented_args_filename *augmented_args;
|
||||
unsigned int len = sizeof(*augmented_args);
|
||||
const void *filename_arg = NULL;
|
||||
/*
|
||||
* We start len, the amount of data that will be in the perf ring
|
||||
* buffer, if this is not filtered out by one of pid_filter__has(),
|
||||
* syscall->enabled, etc, with the non-augmented raw syscall payload,
|
||||
* i.e. sizeof(augmented_args->args).
|
||||
*
|
||||
* We'll add to this as we add augmented syscalls right after that
|
||||
* initial, non-augmented raw_syscalls:sys_enter payload.
|
||||
*/
|
||||
unsigned int len = sizeof(augmented_args->args);
|
||||
struct syscall *syscall;
|
||||
int key = 0;
|
||||
|
||||
@ -189,101 +158,66 @@ int sys_enter(struct syscall_enter_args *args)
|
||||
* after the ctx memory access to prevent their down stream merging.
|
||||
*/
|
||||
/*
|
||||
* This table of what args are strings will be provided by userspace,
|
||||
* in the syscalls map, i.e. we will already have to do the lookup to
|
||||
* see if this specific syscall is filtered, so we can as well get more
|
||||
* info about what syscall args are strings or pointers, and how many
|
||||
* bytes to copy, per arg, etc.
|
||||
* For now copy just the first string arg, we need to improve the protocol
|
||||
* and have more than one.
|
||||
*
|
||||
* For now hard code it, till we have all the basic mechanisms in place
|
||||
* to automate everything and make the kernel part be completely driven
|
||||
* by information obtained in userspace for each kernel version and
|
||||
* processor architecture, making the kernel part the same no matter what
|
||||
* kernel version or processor architecture it runs on.
|
||||
*/
|
||||
switch (augmented_args->args.syscall_nr) {
|
||||
case SYS_ACCT:
|
||||
case SYS_ADD_KEY:
|
||||
case SYS_CHDIR:
|
||||
case SYS_CHMOD:
|
||||
case SYS_CHOWN:
|
||||
case SYS_CHROOT:
|
||||
case SYS_CREAT:
|
||||
case SYS_DELETE_MODULE:
|
||||
case SYS_EXECVE:
|
||||
case SYS_GETXATTR:
|
||||
case SYS_LCHOWN:
|
||||
case SYS_LGETXATTR:
|
||||
case SYS_LINK:
|
||||
case SYS_LISTXATTR:
|
||||
case SYS_LLISTXATTR:
|
||||
case SYS_LREMOVEXATTR:
|
||||
case SYS_LSETXATTR:
|
||||
case SYS_LSTAT:
|
||||
case SYS_MEMFD_CREATE:
|
||||
case SYS_MKDIR:
|
||||
case SYS_MKNOD:
|
||||
case SYS_MQ_OPEN:
|
||||
case SYS_MQ_UNLINK:
|
||||
case SYS_PIVOT_ROOT:
|
||||
case SYS_READLINK:
|
||||
case SYS_REMOVEXATTR:
|
||||
case SYS_RENAME:
|
||||
case SYS_REQUEST_KEY:
|
||||
case SYS_RMDIR:
|
||||
case SYS_SETXATTR:
|
||||
case SYS_STAT:
|
||||
case SYS_STATFS:
|
||||
case SYS_SWAPOFF:
|
||||
case SYS_SWAPON:
|
||||
case SYS_SYMLINK:
|
||||
case SYS_SYMLINKAT:
|
||||
case SYS_TRUNCATE:
|
||||
case SYS_UNLINK:
|
||||
case SYS_ACCESS:
|
||||
case SYS_OPEN: filename_arg = (const void *)args->args[0];
|
||||
* Using the unrolled loop is not working, only when we do it manually,
|
||||
* check this out later...
|
||||
|
||||
u8 arg;
|
||||
#pragma clang loop unroll(full)
|
||||
for (arg = 0; arg < 6; ++arg) {
|
||||
if (syscall->string_args_len[arg] != 0) {
|
||||
filename_len = syscall->string_args_len[arg];
|
||||
filename_arg = (const void *)args->args[arg];
|
||||
__asm__ __volatile__("": : :"memory");
|
||||
break;
|
||||
case SYS_EXECVEAT:
|
||||
case SYS_FACCESSAT:
|
||||
case SYS_FCHMODAT:
|
||||
case SYS_FCHOWNAT:
|
||||
case SYS_FGETXATTR:
|
||||
case SYS_FINIT_MODULE:
|
||||
case SYS_FREMOVEXATTR:
|
||||
case SYS_FSETXATTR:
|
||||
case SYS_FUTIMESAT:
|
||||
case SYS_INOTIFY_ADD_WATCH:
|
||||
case SYS_LINKAT:
|
||||
case SYS_MKDIRAT:
|
||||
case SYS_MKNODAT:
|
||||
case SYS_MQ_TIMEDSEND:
|
||||
case SYS_NAME_TO_HANDLE_AT:
|
||||
case SYS_NEWFSTATAT:
|
||||
case SYS_PWRITE64:
|
||||
case SYS_QUOTACTL:
|
||||
case SYS_READLINKAT:
|
||||
case SYS_RENAMEAT:
|
||||
case SYS_RENAMEAT2:
|
||||
case SYS_STATX:
|
||||
case SYS_UNLINKAT:
|
||||
case SYS_UTIMENSAT:
|
||||
case SYS_OPENAT: filename_arg = (const void *)args->args[1];
|
||||
break;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (filename_arg != NULL) {
|
||||
augmented_args->filename.reserved = 0;
|
||||
augmented_args->filename.size = probe_read_str(&augmented_args->filename.value,
|
||||
sizeof(augmented_args->filename.value),
|
||||
filename_arg);
|
||||
if (augmented_args->filename.size < sizeof(augmented_args->filename.value)) {
|
||||
len -= sizeof(augmented_args->filename.value) - augmented_args->filename.size;
|
||||
len &= sizeof(augmented_args->filename.value) - 1;
|
||||
}
|
||||
} else {
|
||||
len = sizeof(augmented_args->args);
|
||||
}
|
||||
verifier log:
|
||||
|
||||
; if (syscall->string_args_len[arg] != 0) {
|
||||
37: (69) r3 = *(u16 *)(r0 +2)
|
||||
R0=map_value(id=0,off=0,ks=4,vs=14,imm=0) R1_w=inv0 R2_w=map_value(id=0,off=2,ks=4,vs=14,imm=0) R6=ctx(id=0,off=0,imm=0) R7=map_value(id=0,off=0,ks=4,vs=4168,imm=0) R10=fp0,call_-1 fp-8=mmmmmmmm
|
||||
; if (syscall->string_args_len[arg] != 0) {
|
||||
38: (55) if r3 != 0x0 goto pc+5
|
||||
R0=map_value(id=0,off=0,ks=4,vs=14,imm=0) R1=inv0 R2=map_value(id=0,off=2,ks=4,vs=14,imm=0) R3=inv0 R6=ctx(id=0,off=0,imm=0) R7=map_value(id=0,off=0,ks=4,vs=4168,imm=0) R10=fp0,call_-1 fp-8=mmmmmmmm
|
||||
39: (b7) r1 = 1
|
||||
; if (syscall->string_args_len[arg] != 0) {
|
||||
40: (bf) r2 = r0
|
||||
41: (07) r2 += 4
|
||||
42: (69) r3 = *(u16 *)(r0 +4)
|
||||
R0=map_value(id=0,off=0,ks=4,vs=14,imm=0) R1_w=inv1 R2_w=map_value(id=0,off=4,ks=4,vs=14,imm=0) R3_w=inv0 R6=ctx(id=0,off=0,imm=0) R7=map_value(id=0,off=0,ks=4,vs=4168,imm=0) R10=fp0,call_-1 fp-8=mmmmmmmm
|
||||
; if (syscall->string_args_len[arg] != 0) {
|
||||
43: (15) if r3 == 0x0 goto pc+32
|
||||
R0=map_value(id=0,off=0,ks=4,vs=14,imm=0) R1=inv1 R2=map_value(id=0,off=4,ks=4,vs=14,imm=0) R3=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff)) R6=ctx(id=0,off=0,imm=0) R7=map_value(id=0,off=0,ks=4,vs=4168,imm=0) R10=fp0,call_-1 fp-8=mmmmmmmm
|
||||
; filename_arg = (const void *)args->args[arg];
|
||||
44: (67) r1 <<= 3
|
||||
45: (bf) r3 = r6
|
||||
46: (0f) r3 += r1
|
||||
47: (b7) r5 = 64
|
||||
48: (79) r3 = *(u64 *)(r3 +16)
|
||||
dereference of modified ctx ptr R3 off=8 disallowed
|
||||
processed 46 insns (limit 1000000) max_states_per_insn 0 total_states 12 peak_states 12 mark_read 7
|
||||
*/
|
||||
|
||||
#define __loop_iter(arg) \
|
||||
if (syscall->string_args_len[arg] != 0) { \
|
||||
unsigned int filename_len = syscall->string_args_len[arg]; \
|
||||
const void *filename_arg = (const void *)args->args[arg]; \
|
||||
if (filename_len <= sizeof(augmented_args->filename.value)) \
|
||||
len += augmented_filename__read(&augmented_args->filename, filename_arg, filename_len);
|
||||
#define loop_iter_first() __loop_iter(0); }
|
||||
#define loop_iter(arg) else __loop_iter(arg); }
|
||||
#define loop_iter_last(arg) else __loop_iter(arg); __asm__ __volatile__("": : :"memory"); }
|
||||
|
||||
loop_iter_first()
|
||||
loop_iter(1)
|
||||
loop_iter(2)
|
||||
loop_iter(3)
|
||||
loop_iter(4)
|
||||
loop_iter_last(5)
|
||||
|
||||
/* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */
|
||||
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, augmented_args, len);
|
||||
|
@ -45,10 +45,12 @@
|
||||
static char jit_path[PATH_MAX];
|
||||
static void *marker_addr;
|
||||
|
||||
#ifndef HAVE_GETTID
|
||||
static inline pid_t gettid(void)
|
||||
{
|
||||
return (pid_t)syscall(__NR_gettid);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int get_e_machine(struct jitheader *hdr)
|
||||
{
|
||||
|
@ -1,5 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/string.h>
|
||||
#include <sys/types.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
@ -162,8 +163,7 @@ copy_class_filename(const char * class_sign, const char * file_name, char * resu
|
||||
result[i] = '\0';
|
||||
} else {
|
||||
/* fallback case */
|
||||
size_t file_name_len = strlen(file_name);
|
||||
strncpy(result, file_name, file_name_len < max_length ? file_name_len : max_length);
|
||||
strlcpy(result, file_name, max_length);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -104,11 +104,6 @@ fix_buildid_cache_permissions()
|
||||
|
||||
USER_HOME=$(bash <<< "echo ~$SUDO_USER")
|
||||
|
||||
if [ "$HOME" != "$USER_HOME" ] ; then
|
||||
echo "Fix unnecessary because root has a home: $HOME" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Fixing buildid cache permissions"
|
||||
|
||||
find "$USER_HOME/.debug" -xdev -type d ! -user "$SUDO_USER" -ls -exec chown "$SUDO_USER" \{\} \;
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include "util/bpf-loader.h"
|
||||
#include "util/debug.h"
|
||||
#include "util/event.h"
|
||||
#include "util/util.h"
|
||||
#include <api/fs/fs.h>
|
||||
#include <api/fs/tracing_path.h>
|
||||
#include <errno.h>
|
||||
|
@ -26,7 +26,7 @@ static inline unsigned long long rdclock(void)
|
||||
}
|
||||
|
||||
#ifndef MAX_NR_CPUS
|
||||
#define MAX_NR_CPUS 1024
|
||||
#define MAX_NR_CPUS 2048
|
||||
#endif
|
||||
|
||||
extern const char *input_name;
|
||||
@ -61,6 +61,8 @@ struct record_opts {
|
||||
bool record_switch_events;
|
||||
bool all_kernel;
|
||||
bool all_user;
|
||||
bool kernel_callchains;
|
||||
bool user_callchains;
|
||||
bool tail_synthesize;
|
||||
bool overwrite;
|
||||
bool ignore_missing_thread;
|
||||
|
@ -0,0 +1,44 @@
|
||||
[
|
||||
{
|
||||
"EventCode": "0x02",
|
||||
"EventName": "uncore_hisi_ddrc.flux_wcmd",
|
||||
"BriefDescription": "DDRC write commands",
|
||||
"PublicDescription": "DDRC write commands",
|
||||
"Unit": "hisi_sccl,ddrc",
|
||||
},
|
||||
{
|
||||
"EventCode": "0x03",
|
||||
"EventName": "uncore_hisi_ddrc.flux_rcmd",
|
||||
"BriefDescription": "DDRC read commands",
|
||||
"PublicDescription": "DDRC read commands",
|
||||
"Unit": "hisi_sccl,ddrc",
|
||||
},
|
||||
{
|
||||
"EventCode": "0x04",
|
||||
"EventName": "uncore_hisi_ddrc.flux_wr",
|
||||
"BriefDescription": "DDRC precharge commands",
|
||||
"PublicDescription": "DDRC precharge commands",
|
||||
"Unit": "hisi_sccl,ddrc",
|
||||
},
|
||||
{
|
||||
"EventCode": "0x05",
|
||||
"EventName": "uncore_hisi_ddrc.act_cmd",
|
||||
"BriefDescription": "DDRC active commands",
|
||||
"PublicDescription": "DDRC active commands",
|
||||
"Unit": "hisi_sccl,ddrc",
|
||||
},
|
||||
{
|
||||
"EventCode": "0x06",
|
||||
"EventName": "uncore_hisi_ddrc.rnk_chg",
|
||||
"BriefDescription": "DDRC rank commands",
|
||||
"PublicDescription": "DDRC rank commands",
|
||||
"Unit": "hisi_sccl,ddrc",
|
||||
},
|
||||
{
|
||||
"EventCode": "0x07",
|
||||
"EventName": "uncore_hisi_ddrc.rw_chg",
|
||||
"BriefDescription": "DDRC read and write changes",
|
||||
"PublicDescription": "DDRC read and write changes",
|
||||
"Unit": "hisi_sccl,ddrc",
|
||||
},
|
||||
]
|
@ -0,0 +1,51 @@
|
||||
[
|
||||
{
|
||||
"EventCode": "0x00",
|
||||
"EventName": "uncore_hisi_hha.rx_ops_num",
|
||||
"BriefDescription": "The number of all operations received by the HHA",
|
||||
"PublicDescription": "The number of all operations received by the HHA",
|
||||
"Unit": "hisi_sccl,hha",
|
||||
},
|
||||
{
|
||||
"EventCode": "0x01",
|
||||
"EventName": "uncore_hisi_hha.rx_outer",
|
||||
"BriefDescription": "The number of all operations received by the HHA from another socket",
|
||||
"PublicDescription": "The number of all operations received by the HHA from another socket",
|
||||
"Unit": "hisi_sccl,hha",
|
||||
},
|
||||
{
|
||||
"EventCode": "0x02",
|
||||
"EventName": "uncore_hisi_hha.rx_sccl",
|
||||
"BriefDescription": "The number of all operations received by the HHA from another SCCL in this socket",
|
||||
"PublicDescription": "The number of all operations received by the HHA from another SCCL in this socket",
|
||||
"Unit": "hisi_sccl,hha",
|
||||
},
|
||||
{
|
||||
"EventCode": "0x1c",
|
||||
"EventName": "uncore_hisi_hha.rd_ddr_64b",
|
||||
"BriefDescription": "The number of read operations sent by HHA to DDRC which size is 64 bytes",
|
||||
"PublicDescription": "The number of read operations sent by HHA to DDRC which size is 64bytes",
|
||||
"Unit": "hisi_sccl,hha",
|
||||
},
|
||||
{
|
||||
"EventCode": "0x1d",
|
||||
"EventName": "uncore_hisi_hha.wr_dr_64b",
|
||||
"BriefDescription": "The number of write operations sent by HHA to DDRC which size is 64 bytes",
|
||||
"PublicDescription": "The number of write operations sent by HHA to DDRC which size is 64 bytes",
|
||||
"Unit": "hisi_sccl,hha",
|
||||
},
|
||||
{
|
||||
"EventCode": "0x1e",
|
||||
"EventName": "uncore_hisi_hha.rd_ddr_128b",
|
||||
"BriefDescription": "The number of read operations sent by HHA to DDRC which size is 128 bytes",
|
||||
"PublicDescription": "The number of read operations sent by HHA to DDRC which size is 128 bytes",
|
||||
"Unit": "hisi_sccl,hha",
|
||||
},
|
||||
{
|
||||
"EventCode": "0x1f",
|
||||
"EventName": "uncore_hisi_hha.wr_ddr_128b",
|
||||
"BriefDescription": "The number of write operations sent by HHA to DDRC which size is 128 bytes",
|
||||
"PublicDescription": "The number of write operations sent by HHA to DDRC which size is 128 bytes",
|
||||
"Unit": "hisi_sccl,hha",
|
||||
},
|
||||
]
|
@ -0,0 +1,37 @@
|
||||
[
|
||||
{
|
||||
"EventCode": "0x00",
|
||||
"EventName": "uncore_hisi_l3c.rd_cpipe",
|
||||
"BriefDescription": "Total read accesses",
|
||||
"PublicDescription": "Total read accesses",
|
||||
"Unit": "hisi_sccl,l3c",
|
||||
},
|
||||
{
|
||||
"EventCode": "0x01",
|
||||
"EventName": "uncore_hisi_l3c.wr_cpipe",
|
||||
"BriefDescription": "Total write accesses",
|
||||
"PublicDescription": "Total write accesses",
|
||||
"Unit": "hisi_sccl,l3c",
|
||||
},
|
||||
{
|
||||
"EventCode": "0x02",
|
||||
"EventName": "uncore_hisi_l3c.rd_hit_cpipe",
|
||||
"BriefDescription": "Total read hits",
|
||||
"PublicDescription": "Total read hits",
|
||||
"Unit": "hisi_sccl,l3c",
|
||||
},
|
||||
{
|
||||
"EventCode": "0x03",
|
||||
"EventName": "uncore_hisi_l3c.wr_hit_cpipe",
|
||||
"BriefDescription": "Total write hits",
|
||||
"PublicDescription": "Total write hits",
|
||||
"Unit": "hisi_sccl,l3c",
|
||||
},
|
||||
{
|
||||
"EventCode": "0x04",
|
||||
"EventName": "uncore_hisi_l3c.victim_num",
|
||||
"BriefDescription": "l3c precharge commands",
|
||||
"PublicDescription": "l3c precharge commands",
|
||||
"Unit": "hisi_sccl,l3c",
|
||||
},
|
||||
]
|
@ -314,13 +314,13 @@
|
||||
"MetricName": "DRAM_BW_Use"
|
||||
},
|
||||
{
|
||||
"MetricExpr": "1000000000 * ( cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x35\\\\\\,umask\\=0x21@ ) / ( cha_0@event\\=0x0@ / duration_time )",
|
||||
"MetricExpr": "1000000000 * ( cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,config\\=0x40433@ / cha@event\\=0x35\\\\\\,umask\\=0x21\\\\\\,config\\=0x40433@ ) / ( cha_0@event\\=0x0@ / duration_time )",
|
||||
"BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
|
||||
"MetricGroup": "Memory_Lat",
|
||||
"MetricName": "DRAM_Read_Latency"
|
||||
},
|
||||
{
|
||||
"MetricExpr": "cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,thresh\\=1@",
|
||||
"MetricExpr": "cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,config\\=0x40433@ / cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,thresh\\=1\\\\\\,config\\=0x40433@",
|
||||
"BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
|
||||
"MetricGroup": "Memory_BW",
|
||||
"MetricName": "DRAM_Parallel_Reads"
|
||||
|
@ -314,35 +314,17 @@
|
||||
"MetricName": "DRAM_BW_Use"
|
||||
},
|
||||
{
|
||||
"MetricExpr": "1000000000 * ( cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x35\\\\\\,umask\\=0x21@ ) / ( cha_0@event\\=0x0@ / duration_time )",
|
||||
"MetricExpr": "1000000000 * ( cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,config\\=0x40433@ / cha@event\\=0x35\\\\\\,umask\\=0x21\\\\\\,config\\=0x40433@ ) / ( cha_0@event\\=0x0@ / duration_time )",
|
||||
"BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
|
||||
"MetricGroup": "Memory_Lat",
|
||||
"MetricName": "DRAM_Read_Latency"
|
||||
},
|
||||
{
|
||||
"MetricExpr": "cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,thresh\\=1@",
|
||||
"MetricExpr": "cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,config\\=0x40433@ / cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,thresh\\=1\\\\\\,config\\=0x40433@",
|
||||
"BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
|
||||
"MetricGroup": "Memory_BW",
|
||||
"MetricName": "DRAM_Parallel_Reads"
|
||||
},
|
||||
{
|
||||
"MetricExpr": "( 1000000000 * ( imc@event\\=0xe0\\\\\\,umask\\=0x1@ / imc@event\\=0xe3@ ) / imc_0@event\\=0x0@ ) if 1 if 0 == 1 else 0 else 0",
|
||||
"BriefDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches",
|
||||
"MetricGroup": "Memory_Lat",
|
||||
"MetricName": "MEM_PMM_Read_Latency"
|
||||
},
|
||||
{
|
||||
"MetricExpr": "( ( 64 * imc@event\\=0xe3@ / 1000000000 ) / duration_time ) if 1 if 0 == 1 else 0 else 0",
|
||||
"BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]",
|
||||
"MetricGroup": "Memory_BW",
|
||||
"MetricName": "PMM_Read_BW"
|
||||
},
|
||||
{
|
||||
"MetricExpr": "( ( 64 * imc@event\\=0xe7@ / 1000000000 ) / duration_time ) if 1 if 0 == 1 else 0 else 0",
|
||||
"BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]",
|
||||
"MetricGroup": "Memory_BW",
|
||||
"MetricName": "PMM_Write_BW"
|
||||
},
|
||||
{
|
||||
"MetricExpr": "cha_0@event\\=0x0@",
|
||||
"BriefDescription": "Socket actual clocks when any core is active on that socket",
|
||||
|
@ -236,6 +236,9 @@ static struct map {
|
||||
{ "CPU-M-CF", "cpum_cf" },
|
||||
{ "CPU-M-SF", "cpum_sf" },
|
||||
{ "UPI LL", "uncore_upi" },
|
||||
{ "hisi_sccl,ddrc", "hisi_sccl,ddrc" },
|
||||
{ "hisi_sccl,hha", "hisi_sccl,hha" },
|
||||
{ "hisi_sccl,l3c", "hisi_sccl,l3c" },
|
||||
{}
|
||||
};
|
||||
|
||||
@ -841,7 +844,7 @@ static void create_empty_mapping(const char *output_file)
|
||||
_Exit(1);
|
||||
}
|
||||
|
||||
fprintf(outfp, "#include \"../../pmu-events/pmu-events.h\"\n");
|
||||
fprintf(outfp, "#include \"pmu-events/pmu-events.h\"\n");
|
||||
print_mapping_table_prefix(outfp);
|
||||
print_mapping_table_suffix(outfp);
|
||||
fclose(outfp);
|
||||
@ -1096,7 +1099,7 @@ int main(int argc, char *argv[])
|
||||
}
|
||||
|
||||
/* Include pmu-events.h first */
|
||||
fprintf(eventsfp, "#include \"../../pmu-events/pmu-events.h\"\n");
|
||||
fprintf(eventsfp, "#include \"pmu-events/pmu-events.h\"\n");
|
||||
|
||||
/*
|
||||
* The mapfile allows multiple CPUids to point to the same JSON file,
|
||||
|
@ -27,18 +27,31 @@ import datetime
|
||||
#
|
||||
# fedora:
|
||||
#
|
||||
# $ sudo yum install postgresql postgresql-server python-pyside qt-postgresql
|
||||
# $ sudo yum install postgresql postgresql-server qt-postgresql
|
||||
# $ sudo su - postgres -c initdb
|
||||
# $ sudo service postgresql start
|
||||
# $ sudo su - postgres
|
||||
# $ createuser <your user id here>
|
||||
# $ createuser -s <your user id here> # Older versions may not support -s, in which case answer the prompt below:
|
||||
# Shall the new role be a superuser? (y/n) y
|
||||
# $ sudo yum install python-pyside
|
||||
#
|
||||
# Alternately, to use Python3 and/or pyside 2, one of the following:
|
||||
# $ sudo yum install python3-pyside
|
||||
# $ pip install --user PySide2
|
||||
# $ pip3 install --user PySide2
|
||||
#
|
||||
# ubuntu:
|
||||
#
|
||||
# $ sudo apt-get install postgresql python-pyside.qtsql libqt4-sql-psql
|
||||
# $ sudo apt-get install postgresql
|
||||
# $ sudo su - postgres
|
||||
# $ createuser -s <your user id here>
|
||||
# $ sudo apt-get install python-pyside.qtsql libqt4-sql-psql
|
||||
#
|
||||
# Alternately, to use Python3 and/or pyside 2, one of the following:
|
||||
#
|
||||
# $ sudo apt-get install python3-pyside.qtsql libqt4-sql-psql
|
||||
# $ sudo apt-get install python-pyside2.qtsql libqt5sql5-psql
|
||||
# $ sudo apt-get install python3-pyside2.qtsql libqt5sql5-psql
|
||||
#
|
||||
# An example of using this script with Intel PT:
|
||||
#
|
||||
@ -199,7 +212,16 @@ import datetime
|
||||
# print "{0:>6} {1:>10} {2:>9} {3:<30} {4:>6} {5:<30}".format(query.value(0), query.value(1), query.value(2), query.value(3), query.value(4), query.value(5))
|
||||
# call_path_id = query.value(6)
|
||||
|
||||
from PySide.QtSql import *
|
||||
pyside_version_1 = True
|
||||
if not "pyside-version-1" in sys.argv:
|
||||
try:
|
||||
from PySide2.QtSql import *
|
||||
pyside_version_1 = False
|
||||
except:
|
||||
pass
|
||||
|
||||
if pyside_version_1:
|
||||
from PySide.QtSql import *
|
||||
|
||||
if sys.version_info < (3, 0):
|
||||
def toserverstr(str):
|
||||
@ -255,11 +277,12 @@ def printdate(*args, **kw_args):
|
||||
print(datetime.datetime.today(), *args, sep=' ', **kw_args)
|
||||
|
||||
def usage():
|
||||
printerr("Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]")
|
||||
printerr("where: columns 'all' or 'branches'")
|
||||
printerr(" calls 'calls' => create calls and call_paths table")
|
||||
printerr(" callchains 'callchains' => create call_paths table")
|
||||
raise Exception("Too few arguments")
|
||||
printerr("Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>] [<pyside-version-1>]");
|
||||
printerr("where: columns 'all' or 'branches'");
|
||||
printerr(" calls 'calls' => create calls and call_paths table");
|
||||
printerr(" callchains 'callchains' => create call_paths table");
|
||||
printerr(" pyside-version-1 'pyside-version-1' => use pyside version 1");
|
||||
raise Exception("Too few or bad arguments")
|
||||
|
||||
if (len(sys.argv) < 2):
|
||||
usage()
|
||||
@ -281,6 +304,8 @@ for i in range(3,len(sys.argv)):
|
||||
perf_db_export_calls = True
|
||||
elif (sys.argv[i] == "callchains"):
|
||||
perf_db_export_callchains = True
|
||||
elif (sys.argv[i] == "pyside-version-1"):
|
||||
pass
|
||||
else:
|
||||
usage()
|
||||
|
||||
@ -369,7 +394,9 @@ if branches:
|
||||
'to_ip bigint,'
|
||||
'branch_type integer,'
|
||||
'in_tx boolean,'
|
||||
'call_path_id bigint)')
|
||||
'call_path_id bigint,'
|
||||
'insn_count bigint,'
|
||||
'cyc_count bigint)')
|
||||
else:
|
||||
do_query(query, 'CREATE TABLE samples ('
|
||||
'id bigint NOT NULL,'
|
||||
@ -393,7 +420,9 @@ else:
|
||||
'data_src bigint,'
|
||||
'branch_type integer,'
|
||||
'in_tx boolean,'
|
||||
'call_path_id bigint)')
|
||||
'call_path_id bigint,'
|
||||
'insn_count bigint,'
|
||||
'cyc_count bigint)')
|
||||
|
||||
if perf_db_export_calls or perf_db_export_callchains:
|
||||
do_query(query, 'CREATE TABLE call_paths ('
|
||||
@ -414,7 +443,41 @@ if perf_db_export_calls:
|
||||
'return_id bigint,'
|
||||
'parent_call_path_id bigint,'
|
||||
'flags integer,'
|
||||
'parent_id bigint)')
|
||||
'parent_id bigint,'
|
||||
'insn_count bigint,'
|
||||
'cyc_count bigint)')
|
||||
|
||||
do_query(query, 'CREATE TABLE ptwrite ('
|
||||
'id bigint NOT NULL,'
|
||||
'payload bigint,'
|
||||
'exact_ip boolean)')
|
||||
|
||||
do_query(query, 'CREATE TABLE cbr ('
|
||||
'id bigint NOT NULL,'
|
||||
'cbr integer,'
|
||||
'mhz integer,'
|
||||
'percent integer)')
|
||||
|
||||
do_query(query, 'CREATE TABLE mwait ('
|
||||
'id bigint NOT NULL,'
|
||||
'hints integer,'
|
||||
'extensions integer)')
|
||||
|
||||
do_query(query, 'CREATE TABLE pwre ('
|
||||
'id bigint NOT NULL,'
|
||||
'cstate integer,'
|
||||
'subcstate integer,'
|
||||
'hw boolean)')
|
||||
|
||||
do_query(query, 'CREATE TABLE exstop ('
|
||||
'id bigint NOT NULL,'
|
||||
'exact_ip boolean)')
|
||||
|
||||
do_query(query, 'CREATE TABLE pwrx ('
|
||||
'id bigint NOT NULL,'
|
||||
'deepest_cstate integer,'
|
||||
'last_cstate integer,'
|
||||
'wake_reason integer)')
|
||||
|
||||
do_query(query, 'CREATE VIEW machines_view AS '
|
||||
'SELECT '
|
||||
@ -496,6 +559,9 @@ if perf_db_export_calls:
|
||||
'return_time,'
|
||||
'return_time - call_time AS elapsed_time,'
|
||||
'branch_count,'
|
||||
'insn_count,'
|
||||
'cyc_count,'
|
||||
'CASE WHEN cyc_count=0 THEN CAST(0 AS NUMERIC(20, 2)) ELSE CAST((CAST(insn_count AS FLOAT) / cyc_count) AS NUMERIC(20, 2)) END AS IPC,'
|
||||
'call_id,'
|
||||
'return_id,'
|
||||
'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE CAST ( flags AS VARCHAR(6) ) END AS flags,'
|
||||
@ -521,9 +587,110 @@ do_query(query, 'CREATE VIEW samples_view AS '
|
||||
'to_sym_offset,'
|
||||
'(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,'
|
||||
'(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,'
|
||||
'in_tx'
|
||||
'in_tx,'
|
||||
'insn_count,'
|
||||
'cyc_count,'
|
||||
'CASE WHEN cyc_count=0 THEN CAST(0 AS NUMERIC(20, 2)) ELSE CAST((CAST(insn_count AS FLOAT) / cyc_count) AS NUMERIC(20, 2)) END AS IPC'
|
||||
' FROM samples')
|
||||
|
||||
do_query(query, 'CREATE VIEW ptwrite_view AS '
|
||||
'SELECT '
|
||||
'ptwrite.id,'
|
||||
'time,'
|
||||
'cpu,'
|
||||
'to_hex(payload) AS payload_hex,'
|
||||
'CASE WHEN exact_ip=FALSE THEN \'False\' ELSE \'True\' END AS exact_ip'
|
||||
' FROM ptwrite'
|
||||
' INNER JOIN samples ON samples.id = ptwrite.id')
|
||||
|
||||
do_query(query, 'CREATE VIEW cbr_view AS '
|
||||
'SELECT '
|
||||
'cbr.id,'
|
||||
'time,'
|
||||
'cpu,'
|
||||
'cbr,'
|
||||
'mhz,'
|
||||
'percent'
|
||||
' FROM cbr'
|
||||
' INNER JOIN samples ON samples.id = cbr.id')
|
||||
|
||||
do_query(query, 'CREATE VIEW mwait_view AS '
|
||||
'SELECT '
|
||||
'mwait.id,'
|
||||
'time,'
|
||||
'cpu,'
|
||||
'to_hex(hints) AS hints_hex,'
|
||||
'to_hex(extensions) AS extensions_hex'
|
||||
' FROM mwait'
|
||||
' INNER JOIN samples ON samples.id = mwait.id')
|
||||
|
||||
do_query(query, 'CREATE VIEW pwre_view AS '
|
||||
'SELECT '
|
||||
'pwre.id,'
|
||||
'time,'
|
||||
'cpu,'
|
||||
'cstate,'
|
||||
'subcstate,'
|
||||
'CASE WHEN hw=FALSE THEN \'False\' ELSE \'True\' END AS hw'
|
||||
' FROM pwre'
|
||||
' INNER JOIN samples ON samples.id = pwre.id')
|
||||
|
||||
do_query(query, 'CREATE VIEW exstop_view AS '
|
||||
'SELECT '
|
||||
'exstop.id,'
|
||||
'time,'
|
||||
'cpu,'
|
||||
'CASE WHEN exact_ip=FALSE THEN \'False\' ELSE \'True\' END AS exact_ip'
|
||||
' FROM exstop'
|
||||
' INNER JOIN samples ON samples.id = exstop.id')
|
||||
|
||||
do_query(query, 'CREATE VIEW pwrx_view AS '
|
||||
'SELECT '
|
||||
'pwrx.id,'
|
||||
'time,'
|
||||
'cpu,'
|
||||
'deepest_cstate,'
|
||||
'last_cstate,'
|
||||
'CASE WHEN wake_reason=1 THEN \'Interrupt\''
|
||||
' WHEN wake_reason=2 THEN \'Timer Deadline\''
|
||||
' WHEN wake_reason=4 THEN \'Monitored Address\''
|
||||
' WHEN wake_reason=8 THEN \'HW\''
|
||||
' ELSE CAST ( wake_reason AS VARCHAR(2) )'
|
||||
'END AS wake_reason'
|
||||
' FROM pwrx'
|
||||
' INNER JOIN samples ON samples.id = pwrx.id')
|
||||
|
||||
do_query(query, 'CREATE VIEW power_events_view AS '
|
||||
'SELECT '
|
||||
'samples.id,'
|
||||
'samples.time,'
|
||||
'samples.cpu,'
|
||||
'selected_events.name AS event,'
|
||||
'FORMAT(\'%6s\', cbr.cbr) AS cbr,'
|
||||
'FORMAT(\'%6s\', cbr.mhz) AS MHz,'
|
||||
'FORMAT(\'%5s\', cbr.percent) AS percent,'
|
||||
'to_hex(mwait.hints) AS hints_hex,'
|
||||
'to_hex(mwait.extensions) AS extensions_hex,'
|
||||
'FORMAT(\'%3s\', pwre.cstate) AS cstate,'
|
||||
'FORMAT(\'%3s\', pwre.subcstate) AS subcstate,'
|
||||
'CASE WHEN pwre.hw=FALSE THEN \'False\' WHEN pwre.hw=TRUE THEN \'True\' ELSE NULL END AS hw,'
|
||||
'CASE WHEN exstop.exact_ip=FALSE THEN \'False\' WHEN exstop.exact_ip=TRUE THEN \'True\' ELSE NULL END AS exact_ip,'
|
||||
'FORMAT(\'%3s\', pwrx.deepest_cstate) AS deepest_cstate,'
|
||||
'FORMAT(\'%3s\', pwrx.last_cstate) AS last_cstate,'
|
||||
'CASE WHEN pwrx.wake_reason=1 THEN \'Interrupt\''
|
||||
' WHEN pwrx.wake_reason=2 THEN \'Timer Deadline\''
|
||||
' WHEN pwrx.wake_reason=4 THEN \'Monitored Address\''
|
||||
' WHEN pwrx.wake_reason=8 THEN \'HW\''
|
||||
' ELSE FORMAT(\'%2s\', pwrx.wake_reason)'
|
||||
'END AS wake_reason'
|
||||
' FROM cbr'
|
||||
' FULL JOIN mwait ON mwait.id = cbr.id'
|
||||
' FULL JOIN pwre ON pwre.id = cbr.id'
|
||||
' FULL JOIN exstop ON exstop.id = cbr.id'
|
||||
' FULL JOIN pwrx ON pwrx.id = cbr.id'
|
||||
' INNER JOIN samples ON samples.id = coalesce(cbr.id, mwait.id, pwre.id, exstop.id, pwrx.id)'
|
||||
' INNER JOIN selected_events ON selected_events.id = samples.evsel_id'
|
||||
' ORDER BY samples.id')
|
||||
|
||||
file_header = struct.pack("!11sii", b"PGCOPY\n\377\r\n\0", 0, 0)
|
||||
file_trailer = b"\377\377"
|
||||
@ -583,6 +750,12 @@ if perf_db_export_calls or perf_db_export_callchains:
|
||||
call_path_file = open_output_file("call_path_table.bin")
|
||||
if perf_db_export_calls:
|
||||
call_file = open_output_file("call_table.bin")
|
||||
ptwrite_file = open_output_file("ptwrite_table.bin")
|
||||
cbr_file = open_output_file("cbr_table.bin")
|
||||
mwait_file = open_output_file("mwait_table.bin")
|
||||
pwre_file = open_output_file("pwre_table.bin")
|
||||
exstop_file = open_output_file("exstop_table.bin")
|
||||
pwrx_file = open_output_file("pwrx_table.bin")
|
||||
|
||||
def trace_begin():
|
||||
printdate("Writing to intermediate files...")
|
||||
@ -593,13 +766,23 @@ def trace_begin():
|
||||
comm_table(0, "unknown")
|
||||
dso_table(0, 0, "unknown", "unknown", "")
|
||||
symbol_table(0, 0, 0, 0, 0, "unknown")
|
||||
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
|
||||
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
|
||||
if perf_db_export_calls or perf_db_export_callchains:
|
||||
call_path_table(0, 0, 0, 0)
|
||||
call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
|
||||
call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
|
||||
|
||||
unhandled_count = 0
|
||||
|
||||
def is_table_empty(table_name):
|
||||
do_query(query, 'SELECT * FROM ' + table_name + ' LIMIT 1');
|
||||
if query.next():
|
||||
return False
|
||||
return True
|
||||
|
||||
def drop(table_name):
|
||||
do_query(query, 'DROP VIEW ' + table_name + '_view');
|
||||
do_query(query, 'DROP TABLE ' + table_name);
|
||||
|
||||
def trace_end():
|
||||
printdate("Copying to database...")
|
||||
copy_output_file(evsel_file, "selected_events")
|
||||
@ -615,6 +798,12 @@ def trace_end():
|
||||
copy_output_file(call_path_file, "call_paths")
|
||||
if perf_db_export_calls:
|
||||
copy_output_file(call_file, "calls")
|
||||
copy_output_file(ptwrite_file, "ptwrite")
|
||||
copy_output_file(cbr_file, "cbr")
|
||||
copy_output_file(mwait_file, "mwait")
|
||||
copy_output_file(pwre_file, "pwre")
|
||||
copy_output_file(exstop_file, "exstop")
|
||||
copy_output_file(pwrx_file, "pwrx")
|
||||
|
||||
printdate("Removing intermediate files...")
|
||||
remove_output_file(evsel_file)
|
||||
@ -630,6 +819,12 @@ def trace_end():
|
||||
remove_output_file(call_path_file)
|
||||
if perf_db_export_calls:
|
||||
remove_output_file(call_file)
|
||||
remove_output_file(ptwrite_file)
|
||||
remove_output_file(cbr_file)
|
||||
remove_output_file(mwait_file)
|
||||
remove_output_file(pwre_file)
|
||||
remove_output_file(exstop_file)
|
||||
remove_output_file(pwrx_file)
|
||||
os.rmdir(output_dir_name)
|
||||
printdate("Adding primary keys")
|
||||
do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
|
||||
@ -645,6 +840,12 @@ def trace_end():
|
||||
do_query(query, 'ALTER TABLE call_paths ADD PRIMARY KEY (id)')
|
||||
if perf_db_export_calls:
|
||||
do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)')
|
||||
do_query(query, 'ALTER TABLE ptwrite ADD PRIMARY KEY (id)')
|
||||
do_query(query, 'ALTER TABLE cbr ADD PRIMARY KEY (id)')
|
||||
do_query(query, 'ALTER TABLE mwait ADD PRIMARY KEY (id)')
|
||||
do_query(query, 'ALTER TABLE pwre ADD PRIMARY KEY (id)')
|
||||
do_query(query, 'ALTER TABLE exstop ADD PRIMARY KEY (id)')
|
||||
do_query(query, 'ALTER TABLE pwrx ADD PRIMARY KEY (id)')
|
||||
|
||||
printdate("Adding foreign keys")
|
||||
do_query(query, 'ALTER TABLE threads '
|
||||
@ -680,6 +881,30 @@ def trace_end():
|
||||
'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)')
|
||||
do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
|
||||
do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
|
||||
do_query(query, 'ALTER TABLE ptwrite '
|
||||
'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)')
|
||||
do_query(query, 'ALTER TABLE cbr '
|
||||
'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)')
|
||||
do_query(query, 'ALTER TABLE mwait '
|
||||
'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)')
|
||||
do_query(query, 'ALTER TABLE pwre '
|
||||
'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)')
|
||||
do_query(query, 'ALTER TABLE exstop '
|
||||
'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)')
|
||||
do_query(query, 'ALTER TABLE pwrx '
|
||||
'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)')
|
||||
|
||||
printdate("Dropping unused tables")
|
||||
if is_table_empty("ptwrite"):
|
||||
drop("ptwrite")
|
||||
if is_table_empty("mwait") and is_table_empty("pwre") and is_table_empty("exstop") and is_table_empty("pwrx"):
|
||||
drop("mwait")
|
||||
drop("pwre")
|
||||
drop("exstop")
|
||||
drop("pwrx")
|
||||
do_query(query, 'DROP VIEW power_events_view');
|
||||
if is_table_empty("cbr"):
|
||||
drop("cbr")
|
||||
|
||||
if (unhandled_count):
|
||||
printdate("Warning: ", unhandled_count, " unhandled events")
|
||||
@ -747,11 +972,11 @@ def branch_type_table(branch_type, name, *x):
|
||||
value = struct.pack(fmt, 2, 4, branch_type, n, name)
|
||||
branch_type_file.write(value)
|
||||
|
||||
def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, call_path_id, *x):
|
||||
def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, call_path_id, insn_cnt, cyc_cnt, *x):
|
||||
if branches:
|
||||
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiBiq", 18, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx, 8, call_path_id)
|
||||
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiBiqiqiq", 20, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx, 8, call_path_id, 8, insn_cnt, 8, cyc_cnt)
|
||||
else:
|
||||
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiBiq", 22, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx, 8, call_path_id)
|
||||
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiBiqiqiq", 24, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx, 8, call_path_id, 8, insn_cnt, 8, cyc_cnt)
|
||||
sample_file.write(value)
|
||||
|
||||
def call_path_table(cp_id, parent_id, symbol_id, ip, *x):
|
||||
@ -759,7 +984,70 @@ def call_path_table(cp_id, parent_id, symbol_id, ip, *x):
|
||||
value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip)
|
||||
call_path_file.write(value)
|
||||
|
||||
def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, parent_id, *x):
|
||||
fmt = "!hiqiqiqiqiqiqiqiqiqiqiiiq"
|
||||
value = struct.pack(fmt, 12, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags, 8, parent_id)
|
||||
def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, parent_id, insn_cnt, cyc_cnt, *x):
|
||||
fmt = "!hiqiqiqiqiqiqiqiqiqiqiiiqiqiq"
|
||||
value = struct.pack(fmt, 14, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags, 8, parent_id, 8, insn_cnt, 8, cyc_cnt)
|
||||
call_file.write(value)
|
||||
|
||||
def ptwrite(id, raw_buf):
|
||||
data = struct.unpack_from("<IQ", raw_buf)
|
||||
flags = data[0]
|
||||
payload = data[1]
|
||||
exact_ip = flags & 1
|
||||
value = struct.pack("!hiqiqiB", 3, 8, id, 8, payload, 1, exact_ip)
|
||||
ptwrite_file.write(value)
|
||||
|
||||
def cbr(id, raw_buf):
|
||||
data = struct.unpack_from("<BBBBII", raw_buf)
|
||||
cbr = data[0]
|
||||
MHz = (data[4] + 500) / 1000
|
||||
percent = ((cbr * 1000 / data[2]) + 5) / 10
|
||||
value = struct.pack("!hiqiiiiii", 4, 8, id, 4, cbr, 4, MHz, 4, percent)
|
||||
cbr_file.write(value)
|
||||
|
||||
def mwait(id, raw_buf):
|
||||
data = struct.unpack_from("<IQ", raw_buf)
|
||||
payload = data[1]
|
||||
hints = payload & 0xff
|
||||
extensions = (payload >> 32) & 0x3
|
||||
value = struct.pack("!hiqiiii", 3, 8, id, 4, hints, 4, extensions)
|
||||
mwait_file.write(value)
|
||||
|
||||
def pwre(id, raw_buf):
|
||||
data = struct.unpack_from("<IQ", raw_buf)
|
||||
payload = data[1]
|
||||
hw = (payload >> 7) & 1
|
||||
cstate = (payload >> 12) & 0xf
|
||||
subcstate = (payload >> 8) & 0xf
|
||||
value = struct.pack("!hiqiiiiiB", 4, 8, id, 4, cstate, 4, subcstate, 1, hw)
|
||||
pwre_file.write(value)
|
||||
|
||||
def exstop(id, raw_buf):
|
||||
data = struct.unpack_from("<I", raw_buf)
|
||||
flags = data[0]
|
||||
exact_ip = flags & 1
|
||||
value = struct.pack("!hiqiB", 2, 8, id, 1, exact_ip)
|
||||
exstop_file.write(value)
|
||||
|
||||
def pwrx(id, raw_buf):
|
||||
data = struct.unpack_from("<IQ", raw_buf)
|
||||
payload = data[1]
|
||||
deepest_cstate = payload & 0xf
|
||||
last_cstate = (payload >> 4) & 0xf
|
||||
wake_reason = (payload >> 8) & 0xf
|
||||
value = struct.pack("!hiqiiiiii", 4, 8, id, 4, deepest_cstate, 4, last_cstate, 4, wake_reason)
|
||||
pwrx_file.write(value)
|
||||
|
||||
def synth_data(id, config, raw_buf, *x):
|
||||
if config == 0:
|
||||
ptwrite(id, raw_buf)
|
||||
elif config == 1:
|
||||
mwait(id, raw_buf)
|
||||
elif config == 2:
|
||||
pwre(id, raw_buf)
|
||||
elif config == 3:
|
||||
exstop(id, raw_buf)
|
||||
elif config == 4:
|
||||
pwrx(id, raw_buf)
|
||||
elif config == 5:
|
||||
cbr(id, raw_buf)
|
||||
|
@ -21,6 +21,26 @@ import datetime
|
||||
# provides LGPL-licensed Python bindings for Qt. You will also need the package
|
||||
# libqt4-sql-sqlite for Qt sqlite3 support.
|
||||
#
|
||||
# Examples of installing pyside:
|
||||
#
|
||||
# ubuntu:
|
||||
#
|
||||
# $ sudo apt-get install python-pyside.qtsql libqt4-sql-psql
|
||||
#
|
||||
# Alternately, to use Python3 and/or pyside 2, one of the following:
|
||||
#
|
||||
# $ sudo apt-get install python3-pyside.qtsql libqt4-sql-psql
|
||||
# $ sudo apt-get install python-pyside2.qtsql libqt5sql5-psql
|
||||
# $ sudo apt-get install python3-pyside2.qtsql libqt5sql5-psql
|
||||
# fedora:
|
||||
#
|
||||
# $ sudo yum install python-pyside
|
||||
#
|
||||
# Alternately, to use Python3 and/or pyside 2, one of the following:
|
||||
# $ sudo yum install python3-pyside
|
||||
# $ pip install --user PySide2
|
||||
# $ pip3 install --user PySide2
|
||||
#
|
||||
# An example of using this script with Intel PT:
|
||||
#
|
||||
# $ perf record -e intel_pt//u ls
|
||||
@ -49,7 +69,16 @@ import datetime
|
||||
# difference is the 'transaction' column of the 'samples' table which is
|
||||
# renamed 'transaction_' in sqlite because 'transaction' is a reserved word.
|
||||
|
||||
from PySide.QtSql import *
|
||||
pyside_version_1 = True
|
||||
if not "pyside-version-1" in sys.argv:
|
||||
try:
|
||||
from PySide2.QtSql import *
|
||||
pyside_version_1 = False
|
||||
except:
|
||||
pass
|
||||
|
||||
if pyside_version_1:
|
||||
from PySide.QtSql import *
|
||||
|
||||
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
|
||||
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
|
||||
@ -69,11 +98,12 @@ def printdate(*args, **kw_args):
|
||||
print(datetime.datetime.today(), *args, sep=' ', **kw_args)
|
||||
|
||||
def usage():
|
||||
printerr("Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]");
|
||||
printerr("where: columns 'all' or 'branches'");
|
||||
printerr(" calls 'calls' => create calls and call_paths table");
|
||||
printerr(" callchains 'callchains' => create call_paths table");
|
||||
raise Exception("Too few arguments")
|
||||
printerr("Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>] [<pyside-version-1>]");
|
||||
printerr("where: columns 'all' or 'branches'");
|
||||
printerr(" calls 'calls' => create calls and call_paths table");
|
||||
printerr(" callchains 'callchains' => create call_paths table");
|
||||
printerr(" pyside-version-1 'pyside-version-1' => use pyside version 1");
|
||||
raise Exception("Too few or bad arguments")
|
||||
|
||||
if (len(sys.argv) < 2):
|
||||
usage()
|
||||
@ -95,6 +125,8 @@ for i in range(3,len(sys.argv)):
|
||||
perf_db_export_calls = True
|
||||
elif (sys.argv[i] == "callchains"):
|
||||
perf_db_export_callchains = True
|
||||
elif (sys.argv[i] == "pyside-version-1"):
|
||||
pass
|
||||
else:
|
||||
usage()
|
||||
|
||||
@ -186,7 +218,9 @@ if branches:
|
||||
'to_ip bigint,'
|
||||
'branch_type integer,'
|
||||
'in_tx boolean,'
|
||||
'call_path_id bigint)')
|
||||
'call_path_id bigint,'
|
||||
'insn_count bigint,'
|
||||
'cyc_count bigint)')
|
||||
else:
|
||||
do_query(query, 'CREATE TABLE samples ('
|
||||
'id integer NOT NULL PRIMARY KEY,'
|
||||
@ -210,7 +244,9 @@ else:
|
||||
'data_src bigint,'
|
||||
'branch_type integer,'
|
||||
'in_tx boolean,'
|
||||
'call_path_id bigint)')
|
||||
'call_path_id bigint,'
|
||||
'insn_count bigint,'
|
||||
'cyc_count bigint)')
|
||||
|
||||
if perf_db_export_calls or perf_db_export_callchains:
|
||||
do_query(query, 'CREATE TABLE call_paths ('
|
||||
@ -231,7 +267,41 @@ if perf_db_export_calls:
|
||||
'return_id bigint,'
|
||||
'parent_call_path_id bigint,'
|
||||
'flags integer,'
|
||||
'parent_id bigint)')
|
||||
'parent_id bigint,'
|
||||
'insn_count bigint,'
|
||||
'cyc_count bigint)')
|
||||
|
||||
do_query(query, 'CREATE TABLE ptwrite ('
|
||||
'id integer NOT NULL PRIMARY KEY,'
|
||||
'payload bigint,'
|
||||
'exact_ip integer)')
|
||||
|
||||
do_query(query, 'CREATE TABLE cbr ('
|
||||
'id integer NOT NULL PRIMARY KEY,'
|
||||
'cbr integer,'
|
||||
'mhz integer,'
|
||||
'percent integer)')
|
||||
|
||||
do_query(query, 'CREATE TABLE mwait ('
|
||||
'id integer NOT NULL PRIMARY KEY,'
|
||||
'hints integer,'
|
||||
'extensions integer)')
|
||||
|
||||
do_query(query, 'CREATE TABLE pwre ('
|
||||
'id integer NOT NULL PRIMARY KEY,'
|
||||
'cstate integer,'
|
||||
'subcstate integer,'
|
||||
'hw integer)')
|
||||
|
||||
do_query(query, 'CREATE TABLE exstop ('
|
||||
'id integer NOT NULL PRIMARY KEY,'
|
||||
'exact_ip integer)')
|
||||
|
||||
do_query(query, 'CREATE TABLE pwrx ('
|
||||
'id integer NOT NULL PRIMARY KEY,'
|
||||
'deepest_cstate integer,'
|
||||
'last_cstate integer,'
|
||||
'wake_reason integer)')
|
||||
|
||||
# printf was added to sqlite in version 3.8.3
|
||||
sqlite_has_printf = False
|
||||
@ -327,6 +397,9 @@ if perf_db_export_calls:
|
||||
'return_time,'
|
||||
'return_time - call_time AS elapsed_time,'
|
||||
'branch_count,'
|
||||
'insn_count,'
|
||||
'cyc_count,'
|
||||
'CASE WHEN cyc_count=0 THEN CAST(0 AS FLOAT) ELSE ROUND(CAST(insn_count AS FLOAT) / cyc_count, 2) END AS IPC,'
|
||||
'call_id,'
|
||||
'return_id,'
|
||||
'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,'
|
||||
@ -352,9 +425,108 @@ do_query(query, 'CREATE VIEW samples_view AS '
|
||||
'to_sym_offset,'
|
||||
'(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,'
|
||||
'(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,'
|
||||
'in_tx'
|
||||
'in_tx,'
|
||||
'insn_count,'
|
||||
'cyc_count,'
|
||||
'CASE WHEN cyc_count=0 THEN CAST(0 AS FLOAT) ELSE ROUND(CAST(insn_count AS FLOAT) / cyc_count, 2) END AS IPC'
|
||||
' FROM samples')
|
||||
|
||||
do_query(query, 'CREATE VIEW ptwrite_view AS '
|
||||
'SELECT '
|
||||
'ptwrite.id,'
|
||||
'time,'
|
||||
'cpu,'
|
||||
+ emit_to_hex('payload') + ' AS payload_hex,'
|
||||
'CASE WHEN exact_ip=0 THEN \'False\' ELSE \'True\' END AS exact_ip'
|
||||
' FROM ptwrite'
|
||||
' INNER JOIN samples ON samples.id = ptwrite.id')
|
||||
|
||||
do_query(query, 'CREATE VIEW cbr_view AS '
|
||||
'SELECT '
|
||||
'cbr.id,'
|
||||
'time,'
|
||||
'cpu,'
|
||||
'cbr,'
|
||||
'mhz,'
|
||||
'percent'
|
||||
' FROM cbr'
|
||||
' INNER JOIN samples ON samples.id = cbr.id')
|
||||
|
||||
do_query(query, 'CREATE VIEW mwait_view AS '
|
||||
'SELECT '
|
||||
'mwait.id,'
|
||||
'time,'
|
||||
'cpu,'
|
||||
+ emit_to_hex('hints') + ' AS hints_hex,'
|
||||
+ emit_to_hex('extensions') + ' AS extensions_hex'
|
||||
' FROM mwait'
|
||||
' INNER JOIN samples ON samples.id = mwait.id')
|
||||
|
||||
do_query(query, 'CREATE VIEW pwre_view AS '
|
||||
'SELECT '
|
||||
'pwre.id,'
|
||||
'time,'
|
||||
'cpu,'
|
||||
'cstate,'
|
||||
'subcstate,'
|
||||
'CASE WHEN hw=0 THEN \'False\' ELSE \'True\' END AS hw'
|
||||
' FROM pwre'
|
||||
' INNER JOIN samples ON samples.id = pwre.id')
|
||||
|
||||
do_query(query, 'CREATE VIEW exstop_view AS '
|
||||
'SELECT '
|
||||
'exstop.id,'
|
||||
'time,'
|
||||
'cpu,'
|
||||
'CASE WHEN exact_ip=0 THEN \'False\' ELSE \'True\' END AS exact_ip'
|
||||
' FROM exstop'
|
||||
' INNER JOIN samples ON samples.id = exstop.id')
|
||||
|
||||
do_query(query, 'CREATE VIEW pwrx_view AS '
|
||||
'SELECT '
|
||||
'pwrx.id,'
|
||||
'time,'
|
||||
'cpu,'
|
||||
'deepest_cstate,'
|
||||
'last_cstate,'
|
||||
'CASE WHEN wake_reason=1 THEN \'Interrupt\''
|
||||
' WHEN wake_reason=2 THEN \'Timer Deadline\''
|
||||
' WHEN wake_reason=4 THEN \'Monitored Address\''
|
||||
' WHEN wake_reason=8 THEN \'HW\''
|
||||
' ELSE wake_reason '
|
||||
'END AS wake_reason'
|
||||
' FROM pwrx'
|
||||
' INNER JOIN samples ON samples.id = pwrx.id')
|
||||
|
||||
do_query(query, 'CREATE VIEW power_events_view AS '
|
||||
'SELECT '
|
||||
'samples.id,'
|
||||
'time,'
|
||||
'cpu,'
|
||||
'selected_events.name AS event,'
|
||||
'CASE WHEN selected_events.name=\'cbr\' THEN (SELECT cbr FROM cbr WHERE cbr.id = samples.id) ELSE "" END AS cbr,'
|
||||
'CASE WHEN selected_events.name=\'cbr\' THEN (SELECT mhz FROM cbr WHERE cbr.id = samples.id) ELSE "" END AS mhz,'
|
||||
'CASE WHEN selected_events.name=\'cbr\' THEN (SELECT percent FROM cbr WHERE cbr.id = samples.id) ELSE "" END AS percent,'
|
||||
'CASE WHEN selected_events.name=\'mwait\' THEN (SELECT ' + emit_to_hex('hints') + ' FROM mwait WHERE mwait.id = samples.id) ELSE "" END AS hints_hex,'
|
||||
'CASE WHEN selected_events.name=\'mwait\' THEN (SELECT ' + emit_to_hex('extensions') + ' FROM mwait WHERE mwait.id = samples.id) ELSE "" END AS extensions_hex,'
|
||||
'CASE WHEN selected_events.name=\'pwre\' THEN (SELECT cstate FROM pwre WHERE pwre.id = samples.id) ELSE "" END AS cstate,'
|
||||
'CASE WHEN selected_events.name=\'pwre\' THEN (SELECT subcstate FROM pwre WHERE pwre.id = samples.id) ELSE "" END AS subcstate,'
|
||||
'CASE WHEN selected_events.name=\'pwre\' THEN (SELECT hw FROM pwre WHERE pwre.id = samples.id) ELSE "" END AS hw,'
|
||||
'CASE WHEN selected_events.name=\'exstop\' THEN (SELECT exact_ip FROM exstop WHERE exstop.id = samples.id) ELSE "" END AS exact_ip,'
|
||||
'CASE WHEN selected_events.name=\'pwrx\' THEN (SELECT deepest_cstate FROM pwrx WHERE pwrx.id = samples.id) ELSE "" END AS deepest_cstate,'
|
||||
'CASE WHEN selected_events.name=\'pwrx\' THEN (SELECT last_cstate FROM pwrx WHERE pwrx.id = samples.id) ELSE "" END AS last_cstate,'
|
||||
'CASE WHEN selected_events.name=\'pwrx\' THEN (SELECT '
|
||||
'CASE WHEN wake_reason=1 THEN \'Interrupt\''
|
||||
' WHEN wake_reason=2 THEN \'Timer Deadline\''
|
||||
' WHEN wake_reason=4 THEN \'Monitored Address\''
|
||||
' WHEN wake_reason=8 THEN \'HW\''
|
||||
' ELSE wake_reason '
|
||||
'END'
|
||||
' FROM pwrx WHERE pwrx.id = samples.id) ELSE "" END AS wake_reason'
|
||||
' FROM samples'
|
||||
' INNER JOIN selected_events ON selected_events.id = evsel_id'
|
||||
' WHERE selected_events.name IN (\'cbr\',\'mwait\',\'exstop\',\'pwre\',\'pwrx\')')
|
||||
|
||||
do_query(query, 'END TRANSACTION')
|
||||
|
||||
evsel_query = QSqlQuery(db)
|
||||
@ -375,15 +547,27 @@ branch_type_query = QSqlQuery(db)
|
||||
branch_type_query.prepare("INSERT INTO branch_types VALUES (?, ?)")
|
||||
sample_query = QSqlQuery(db)
|
||||
if branches:
|
||||
sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
|
||||
sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
|
||||
else:
|
||||
sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
|
||||
sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
|
||||
if perf_db_export_calls or perf_db_export_callchains:
|
||||
call_path_query = QSqlQuery(db)
|
||||
call_path_query.prepare("INSERT INTO call_paths VALUES (?, ?, ?, ?)")
|
||||
if perf_db_export_calls:
|
||||
call_query = QSqlQuery(db)
|
||||
call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
|
||||
call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
|
||||
ptwrite_query = QSqlQuery(db)
|
||||
ptwrite_query.prepare("INSERT INTO ptwrite VALUES (?, ?, ?)")
|
||||
cbr_query = QSqlQuery(db)
|
||||
cbr_query.prepare("INSERT INTO cbr VALUES (?, ?, ?, ?)")
|
||||
mwait_query = QSqlQuery(db)
|
||||
mwait_query.prepare("INSERT INTO mwait VALUES (?, ?, ?)")
|
||||
pwre_query = QSqlQuery(db)
|
||||
pwre_query.prepare("INSERT INTO pwre VALUES (?, ?, ?, ?)")
|
||||
exstop_query = QSqlQuery(db)
|
||||
exstop_query.prepare("INSERT INTO exstop VALUES (?, ?)")
|
||||
pwrx_query = QSqlQuery(db)
|
||||
pwrx_query.prepare("INSERT INTO pwrx VALUES (?, ?, ?, ?)")
|
||||
|
||||
def trace_begin():
|
||||
printdate("Writing records...")
|
||||
@ -395,13 +579,23 @@ def trace_begin():
|
||||
comm_table(0, "unknown")
|
||||
dso_table(0, 0, "unknown", "unknown", "")
|
||||
symbol_table(0, 0, 0, 0, 0, "unknown")
|
||||
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
|
||||
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
|
||||
if perf_db_export_calls or perf_db_export_callchains:
|
||||
call_path_table(0, 0, 0, 0)
|
||||
call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
|
||||
call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
|
||||
|
||||
unhandled_count = 0
|
||||
|
||||
def is_table_empty(table_name):
|
||||
do_query(query, 'SELECT * FROM ' + table_name + ' LIMIT 1');
|
||||
if query.next():
|
||||
return False
|
||||
return True
|
||||
|
||||
def drop(table_name):
|
||||
do_query(query, 'DROP VIEW ' + table_name + '_view');
|
||||
do_query(query, 'DROP TABLE ' + table_name);
|
||||
|
||||
def trace_end():
|
||||
do_query(query, 'END TRANSACTION')
|
||||
|
||||
@ -410,6 +604,18 @@ def trace_end():
|
||||
do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
|
||||
do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
|
||||
|
||||
printdate("Dropping unused tables")
|
||||
if is_table_empty("ptwrite"):
|
||||
drop("ptwrite")
|
||||
if is_table_empty("mwait") and is_table_empty("pwre") and is_table_empty("exstop") and is_table_empty("pwrx"):
|
||||
drop("mwait")
|
||||
drop("pwre")
|
||||
drop("exstop")
|
||||
drop("pwrx")
|
||||
do_query(query, 'DROP VIEW power_events_view');
|
||||
if is_table_empty("cbr"):
|
||||
drop("cbr")
|
||||
|
||||
if (unhandled_count):
|
||||
printdate("Warning: ", unhandled_count, " unhandled events")
|
||||
printdate("Done")
|
||||
@ -454,14 +660,91 @@ def sample_table(*x):
|
||||
if branches:
|
||||
for xx in x[0:15]:
|
||||
sample_query.addBindValue(str(xx))
|
||||
for xx in x[19:22]:
|
||||
for xx in x[19:24]:
|
||||
sample_query.addBindValue(str(xx))
|
||||
do_query_(sample_query)
|
||||
else:
|
||||
bind_exec(sample_query, 22, x)
|
||||
bind_exec(sample_query, 24, x)
|
||||
|
||||
def call_path_table(*x):
|
||||
bind_exec(call_path_query, 4, x)
|
||||
|
||||
def call_return_table(*x):
|
||||
bind_exec(call_query, 12, x)
|
||||
bind_exec(call_query, 14, x)
|
||||
|
||||
def ptwrite(id, raw_buf):
|
||||
data = struct.unpack_from("<IQ", raw_buf)
|
||||
flags = data[0]
|
||||
payload = data[1]
|
||||
exact_ip = flags & 1
|
||||
ptwrite_query.addBindValue(str(id))
|
||||
ptwrite_query.addBindValue(str(payload))
|
||||
ptwrite_query.addBindValue(str(exact_ip))
|
||||
do_query_(ptwrite_query)
|
||||
|
||||
def cbr(id, raw_buf):
|
||||
data = struct.unpack_from("<BBBBII", raw_buf)
|
||||
cbr = data[0]
|
||||
MHz = (data[4] + 500) / 1000
|
||||
percent = ((cbr * 1000 / data[2]) + 5) / 10
|
||||
cbr_query.addBindValue(str(id))
|
||||
cbr_query.addBindValue(str(cbr))
|
||||
cbr_query.addBindValue(str(MHz))
|
||||
cbr_query.addBindValue(str(percent))
|
||||
do_query_(cbr_query)
|
||||
|
||||
def mwait(id, raw_buf):
|
||||
data = struct.unpack_from("<IQ", raw_buf)
|
||||
payload = data[1]
|
||||
hints = payload & 0xff
|
||||
extensions = (payload >> 32) & 0x3
|
||||
mwait_query.addBindValue(str(id))
|
||||
mwait_query.addBindValue(str(hints))
|
||||
mwait_query.addBindValue(str(extensions))
|
||||
do_query_(mwait_query)
|
||||
|
||||
def pwre(id, raw_buf):
|
||||
data = struct.unpack_from("<IQ", raw_buf)
|
||||
payload = data[1]
|
||||
hw = (payload >> 7) & 1
|
||||
cstate = (payload >> 12) & 0xf
|
||||
subcstate = (payload >> 8) & 0xf
|
||||
pwre_query.addBindValue(str(id))
|
||||
pwre_query.addBindValue(str(cstate))
|
||||
pwre_query.addBindValue(str(subcstate))
|
||||
pwre_query.addBindValue(str(hw))
|
||||
do_query_(pwre_query)
|
||||
|
||||
def exstop(id, raw_buf):
|
||||
data = struct.unpack_from("<I", raw_buf)
|
||||
flags = data[0]
|
||||
exact_ip = flags & 1
|
||||
exstop_query.addBindValue(str(id))
|
||||
exstop_query.addBindValue(str(exact_ip))
|
||||
do_query_(exstop_query)
|
||||
|
||||
def pwrx(id, raw_buf):
|
||||
data = struct.unpack_from("<IQ", raw_buf)
|
||||
payload = data[1]
|
||||
deepest_cstate = payload & 0xf
|
||||
last_cstate = (payload >> 4) & 0xf
|
||||
wake_reason = (payload >> 8) & 0xf
|
||||
pwrx_query.addBindValue(str(id))
|
||||
pwrx_query.addBindValue(str(deepest_cstate))
|
||||
pwrx_query.addBindValue(str(last_cstate))
|
||||
pwrx_query.addBindValue(str(wake_reason))
|
||||
do_query_(pwrx_query)
|
||||
|
||||
def synth_data(id, config, raw_buf, *x):
|
||||
if config == 0:
|
||||
ptwrite(id, raw_buf)
|
||||
elif config == 1:
|
||||
mwait(id, raw_buf)
|
||||
elif config == 2:
|
||||
pwre(id, raw_buf)
|
||||
elif config == 3:
|
||||
exstop(id, raw_buf)
|
||||
elif config == 4:
|
||||
pwrx(id, raw_buf)
|
||||
elif config == 5:
|
||||
cbr(id, raw_buf)
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python2
|
||||
#!/usr/bin/env python
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
# exported-sql-viewer.py: view data from sql database
|
||||
# Copyright (c) 2014-2018, Intel Corporation.
|
||||
@ -91,6 +91,7 @@
|
||||
from __future__ import print_function
|
||||
|
||||
import sys
|
||||
import argparse
|
||||
import weakref
|
||||
import threading
|
||||
import string
|
||||
@ -104,10 +105,23 @@ except ImportError:
|
||||
glb_nsz = 16
|
||||
import re
|
||||
import os
|
||||
from PySide.QtCore import *
|
||||
from PySide.QtGui import *
|
||||
from PySide.QtSql import *
|
||||
|
||||
pyside_version_1 = True
|
||||
if not "--pyside-version-1" in sys.argv:
|
||||
try:
|
||||
from PySide2.QtCore import *
|
||||
from PySide2.QtGui import *
|
||||
from PySide2.QtSql import *
|
||||
from PySide2.QtWidgets import *
|
||||
pyside_version_1 = False
|
||||
except:
|
||||
pass
|
||||
|
||||
if pyside_version_1:
|
||||
from PySide.QtCore import *
|
||||
from PySide.QtGui import *
|
||||
from PySide.QtSql import *
|
||||
|
||||
from decimal import *
|
||||
from ctypes import *
|
||||
from multiprocessing import Process, Array, Value, Event
|
||||
@ -186,9 +200,10 @@ class Thread(QThread):
|
||||
|
||||
class TreeModel(QAbstractItemModel):
|
||||
|
||||
def __init__(self, glb, parent=None):
|
||||
def __init__(self, glb, params, parent=None):
|
||||
super(TreeModel, self).__init__(parent)
|
||||
self.glb = glb
|
||||
self.params = params
|
||||
self.root = self.GetRoot()
|
||||
self.last_row_read = 0
|
||||
|
||||
@ -385,6 +400,7 @@ class FindBar():
|
||||
|
||||
def Activate(self):
|
||||
self.bar.show()
|
||||
self.textbox.lineEdit().selectAll()
|
||||
self.textbox.setFocus()
|
||||
|
||||
def Deactivate(self):
|
||||
@ -449,8 +465,9 @@ class FindBar():
|
||||
|
||||
class CallGraphLevelItemBase(object):
|
||||
|
||||
def __init__(self, glb, row, parent_item):
|
||||
def __init__(self, glb, params, row, parent_item):
|
||||
self.glb = glb
|
||||
self.params = params
|
||||
self.row = row
|
||||
self.parent_item = parent_item
|
||||
self.query_done = False;
|
||||
@ -489,18 +506,24 @@ class CallGraphLevelItemBase(object):
|
||||
|
||||
class CallGraphLevelTwoPlusItemBase(CallGraphLevelItemBase):
|
||||
|
||||
def __init__(self, glb, row, comm_id, thread_id, call_path_id, time, branch_count, parent_item):
|
||||
super(CallGraphLevelTwoPlusItemBase, self).__init__(glb, row, parent_item)
|
||||
def __init__(self, glb, params, row, comm_id, thread_id, call_path_id, time, insn_cnt, cyc_cnt, branch_count, parent_item):
|
||||
super(CallGraphLevelTwoPlusItemBase, self).__init__(glb, params, row, parent_item)
|
||||
self.comm_id = comm_id
|
||||
self.thread_id = thread_id
|
||||
self.call_path_id = call_path_id
|
||||
self.insn_cnt = insn_cnt
|
||||
self.cyc_cnt = cyc_cnt
|
||||
self.branch_count = branch_count
|
||||
self.time = time
|
||||
|
||||
def Select(self):
|
||||
self.query_done = True;
|
||||
query = QSqlQuery(self.glb.db)
|
||||
QueryExec(query, "SELECT call_path_id, name, short_name, COUNT(calls.id), SUM(return_time - call_time), SUM(branch_count)"
|
||||
if self.params.have_ipc:
|
||||
ipc_str = ", SUM(insn_count), SUM(cyc_count)"
|
||||
else:
|
||||
ipc_str = ""
|
||||
QueryExec(query, "SELECT call_path_id, name, short_name, COUNT(calls.id), SUM(return_time - call_time)" + ipc_str + ", SUM(branch_count)"
|
||||
" FROM calls"
|
||||
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
|
||||
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
|
||||
@ -511,7 +534,15 @@ class CallGraphLevelTwoPlusItemBase(CallGraphLevelItemBase):
|
||||
" GROUP BY call_path_id, name, short_name"
|
||||
" ORDER BY call_path_id")
|
||||
while query.next():
|
||||
child_item = CallGraphLevelThreeItem(self.glb, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), int(query.value(5)), self)
|
||||
if self.params.have_ipc:
|
||||
insn_cnt = int(query.value(5))
|
||||
cyc_cnt = int(query.value(6))
|
||||
branch_count = int(query.value(7))
|
||||
else:
|
||||
insn_cnt = 0
|
||||
cyc_cnt = 0
|
||||
branch_count = int(query.value(5))
|
||||
child_item = CallGraphLevelThreeItem(self.glb, self.params, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), insn_cnt, cyc_cnt, branch_count, self)
|
||||
self.child_items.append(child_item)
|
||||
self.child_count += 1
|
||||
|
||||
@ -519,37 +550,57 @@ class CallGraphLevelTwoPlusItemBase(CallGraphLevelItemBase):
|
||||
|
||||
class CallGraphLevelThreeItem(CallGraphLevelTwoPlusItemBase):
|
||||
|
||||
def __init__(self, glb, row, comm_id, thread_id, call_path_id, name, dso, count, time, branch_count, parent_item):
|
||||
super(CallGraphLevelThreeItem, self).__init__(glb, row, comm_id, thread_id, call_path_id, time, branch_count, parent_item)
|
||||
def __init__(self, glb, params, row, comm_id, thread_id, call_path_id, name, dso, count, time, insn_cnt, cyc_cnt, branch_count, parent_item):
|
||||
super(CallGraphLevelThreeItem, self).__init__(glb, params, row, comm_id, thread_id, call_path_id, time, insn_cnt, cyc_cnt, branch_count, parent_item)
|
||||
dso = dsoname(dso)
|
||||
self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
|
||||
if self.params.have_ipc:
|
||||
insn_pcnt = PercentToOneDP(insn_cnt, parent_item.insn_cnt)
|
||||
cyc_pcnt = PercentToOneDP(cyc_cnt, parent_item.cyc_cnt)
|
||||
br_pcnt = PercentToOneDP(branch_count, parent_item.branch_count)
|
||||
ipc = CalcIPC(cyc_cnt, insn_cnt)
|
||||
self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(insn_cnt), insn_pcnt, str(cyc_cnt), cyc_pcnt, ipc, str(branch_count), br_pcnt ]
|
||||
else:
|
||||
self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
|
||||
self.dbid = call_path_id
|
||||
|
||||
# Context-sensitive call graph data model level two item
|
||||
|
||||
class CallGraphLevelTwoItem(CallGraphLevelTwoPlusItemBase):
|
||||
|
||||
def __init__(self, glb, row, comm_id, thread_id, pid, tid, parent_item):
|
||||
super(CallGraphLevelTwoItem, self).__init__(glb, row, comm_id, thread_id, 1, 0, 0, parent_item)
|
||||
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
|
||||
def __init__(self, glb, params, row, comm_id, thread_id, pid, tid, parent_item):
|
||||
super(CallGraphLevelTwoItem, self).__init__(glb, params, row, comm_id, thread_id, 1, 0, 0, 0, 0, parent_item)
|
||||
if self.params.have_ipc:
|
||||
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", "", "", "", "", "", ""]
|
||||
else:
|
||||
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
|
||||
self.dbid = thread_id
|
||||
|
||||
def Select(self):
|
||||
super(CallGraphLevelTwoItem, self).Select()
|
||||
for child_item in self.child_items:
|
||||
self.time += child_item.time
|
||||
self.insn_cnt += child_item.insn_cnt
|
||||
self.cyc_cnt += child_item.cyc_cnt
|
||||
self.branch_count += child_item.branch_count
|
||||
for child_item in self.child_items:
|
||||
child_item.data[4] = PercentToOneDP(child_item.time, self.time)
|
||||
child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
|
||||
if self.params.have_ipc:
|
||||
child_item.data[6] = PercentToOneDP(child_item.insn_cnt, self.insn_cnt)
|
||||
child_item.data[8] = PercentToOneDP(child_item.cyc_cnt, self.cyc_cnt)
|
||||
child_item.data[11] = PercentToOneDP(child_item.branch_count, self.branch_count)
|
||||
else:
|
||||
child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
|
||||
|
||||
# Context-sensitive call graph data model level one item
|
||||
|
||||
class CallGraphLevelOneItem(CallGraphLevelItemBase):
|
||||
|
||||
def __init__(self, glb, row, comm_id, comm, parent_item):
|
||||
super(CallGraphLevelOneItem, self).__init__(glb, row, parent_item)
|
||||
self.data = [comm, "", "", "", "", "", ""]
|
||||
def __init__(self, glb, params, row, comm_id, comm, parent_item):
|
||||
super(CallGraphLevelOneItem, self).__init__(glb, params, row, parent_item)
|
||||
if self.params.have_ipc:
|
||||
self.data = [comm, "", "", "", "", "", "", "", "", "", "", ""]
|
||||
else:
|
||||
self.data = [comm, "", "", "", "", "", ""]
|
||||
self.dbid = comm_id
|
||||
|
||||
def Select(self):
|
||||
@ -560,7 +611,7 @@ class CallGraphLevelOneItem(CallGraphLevelItemBase):
|
||||
" INNER JOIN threads ON thread_id = threads.id"
|
||||
" WHERE comm_id = " + str(self.dbid))
|
||||
while query.next():
|
||||
child_item = CallGraphLevelTwoItem(self.glb, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
|
||||
child_item = CallGraphLevelTwoItem(self.glb, self.params, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
|
||||
self.child_items.append(child_item)
|
||||
self.child_count += 1
|
||||
|
||||
@ -568,8 +619,8 @@ class CallGraphLevelOneItem(CallGraphLevelItemBase):
|
||||
|
||||
class CallGraphRootItem(CallGraphLevelItemBase):
|
||||
|
||||
def __init__(self, glb):
|
||||
super(CallGraphRootItem, self).__init__(glb, 0, None)
|
||||
def __init__(self, glb, params):
|
||||
super(CallGraphRootItem, self).__init__(glb, params, 0, None)
|
||||
self.dbid = 0
|
||||
self.query_done = True;
|
||||
query = QSqlQuery(glb.db)
|
||||
@ -577,16 +628,23 @@ class CallGraphRootItem(CallGraphLevelItemBase):
|
||||
while query.next():
|
||||
if not query.value(0):
|
||||
continue
|
||||
child_item = CallGraphLevelOneItem(glb, self.child_count, query.value(0), query.value(1), self)
|
||||
child_item = CallGraphLevelOneItem(glb, params, self.child_count, query.value(0), query.value(1), self)
|
||||
self.child_items.append(child_item)
|
||||
self.child_count += 1
|
||||
|
||||
# Call graph model parameters
|
||||
|
||||
class CallGraphModelParams():
|
||||
|
||||
def __init__(self, glb, parent=None):
|
||||
self.have_ipc = IsSelectable(glb.db, "calls", columns = "insn_count, cyc_count")
|
||||
|
||||
# Context-sensitive call graph data model base
|
||||
|
||||
class CallGraphModelBase(TreeModel):
|
||||
|
||||
def __init__(self, glb, parent=None):
|
||||
super(CallGraphModelBase, self).__init__(glb, parent)
|
||||
super(CallGraphModelBase, self).__init__(glb, CallGraphModelParams(glb), parent)
|
||||
|
||||
def FindSelect(self, value, pattern, query):
|
||||
if pattern:
|
||||
@ -668,17 +726,26 @@ class CallGraphModel(CallGraphModelBase):
|
||||
super(CallGraphModel, self).__init__(glb, parent)
|
||||
|
||||
def GetRoot(self):
|
||||
return CallGraphRootItem(self.glb)
|
||||
return CallGraphRootItem(self.glb, self.params)
|
||||
|
||||
def columnCount(self, parent=None):
|
||||
return 7
|
||||
if self.params.have_ipc:
|
||||
return 12
|
||||
else:
|
||||
return 7
|
||||
|
||||
def columnHeader(self, column):
|
||||
headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
|
||||
if self.params.have_ipc:
|
||||
headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Insn Cnt", "Insn Cnt (%)", "Cyc Cnt", "Cyc Cnt (%)", "IPC", "Branch Count ", "Branch Count (%) "]
|
||||
else:
|
||||
headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
|
||||
return headers[column]
|
||||
|
||||
def columnAlignment(self, column):
|
||||
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
|
||||
if self.params.have_ipc:
|
||||
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
|
||||
else:
|
||||
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
|
||||
return alignment[column]
|
||||
|
||||
def DoFindSelect(self, query, match):
|
||||
@ -715,11 +782,13 @@ class CallGraphModel(CallGraphModelBase):
|
||||
|
||||
class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase):
|
||||
|
||||
def __init__(self, glb, row, comm_id, thread_id, calls_id, time, branch_count, parent_item):
|
||||
super(CallTreeLevelTwoPlusItemBase, self).__init__(glb, row, parent_item)
|
||||
def __init__(self, glb, params, row, comm_id, thread_id, calls_id, time, insn_cnt, cyc_cnt, branch_count, parent_item):
|
||||
super(CallTreeLevelTwoPlusItemBase, self).__init__(glb, params, row, parent_item)
|
||||
self.comm_id = comm_id
|
||||
self.thread_id = thread_id
|
||||
self.calls_id = calls_id
|
||||
self.insn_cnt = insn_cnt
|
||||
self.cyc_cnt = cyc_cnt
|
||||
self.branch_count = branch_count
|
||||
self.time = time
|
||||
|
||||
@ -729,8 +798,12 @@ class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase):
|
||||
comm_thread = " AND comm_id = " + str(self.comm_id) + " AND thread_id = " + str(self.thread_id)
|
||||
else:
|
||||
comm_thread = ""
|
||||
if self.params.have_ipc:
|
||||
ipc_str = ", insn_count, cyc_count"
|
||||
else:
|
||||
ipc_str = ""
|
||||
query = QSqlQuery(self.glb.db)
|
||||
QueryExec(query, "SELECT calls.id, name, short_name, call_time, return_time - call_time, branch_count"
|
||||
QueryExec(query, "SELECT calls.id, name, short_name, call_time, return_time - call_time" + ipc_str + ", branch_count"
|
||||
" FROM calls"
|
||||
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
|
||||
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
|
||||
@ -738,7 +811,15 @@ class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase):
|
||||
" WHERE calls.parent_id = " + str(self.calls_id) + comm_thread +
|
||||
" ORDER BY call_time, calls.id")
|
||||
while query.next():
|
||||
child_item = CallTreeLevelThreeItem(self.glb, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), int(query.value(5)), self)
|
||||
if self.params.have_ipc:
|
||||
insn_cnt = int(query.value(5))
|
||||
cyc_cnt = int(query.value(6))
|
||||
branch_count = int(query.value(7))
|
||||
else:
|
||||
insn_cnt = 0
|
||||
cyc_cnt = 0
|
||||
branch_count = int(query.value(5))
|
||||
child_item = CallTreeLevelThreeItem(self.glb, self.params, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), insn_cnt, cyc_cnt, branch_count, self)
|
||||
self.child_items.append(child_item)
|
||||
self.child_count += 1
|
||||
|
||||
@ -746,37 +827,57 @@ class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase):
|
||||
|
||||
class CallTreeLevelThreeItem(CallTreeLevelTwoPlusItemBase):
|
||||
|
||||
def __init__(self, glb, row, comm_id, thread_id, calls_id, name, dso, count, time, branch_count, parent_item):
|
||||
super(CallTreeLevelThreeItem, self).__init__(glb, row, comm_id, thread_id, calls_id, time, branch_count, parent_item)
|
||||
def __init__(self, glb, params, row, comm_id, thread_id, calls_id, name, dso, count, time, insn_cnt, cyc_cnt, branch_count, parent_item):
|
||||
super(CallTreeLevelThreeItem, self).__init__(glb, params, row, comm_id, thread_id, calls_id, time, insn_cnt, cyc_cnt, branch_count, parent_item)
|
||||
dso = dsoname(dso)
|
||||
self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
|
||||
if self.params.have_ipc:
|
||||
insn_pcnt = PercentToOneDP(insn_cnt, parent_item.insn_cnt)
|
||||
cyc_pcnt = PercentToOneDP(cyc_cnt, parent_item.cyc_cnt)
|
||||
br_pcnt = PercentToOneDP(branch_count, parent_item.branch_count)
|
||||
ipc = CalcIPC(cyc_cnt, insn_cnt)
|
||||
self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(insn_cnt), insn_pcnt, str(cyc_cnt), cyc_pcnt, ipc, str(branch_count), br_pcnt ]
|
||||
else:
|
||||
self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
|
||||
self.dbid = calls_id
|
||||
|
||||
# Call tree data model level two item
|
||||
|
||||
class CallTreeLevelTwoItem(CallTreeLevelTwoPlusItemBase):
|
||||
|
||||
def __init__(self, glb, row, comm_id, thread_id, pid, tid, parent_item):
|
||||
super(CallTreeLevelTwoItem, self).__init__(glb, row, comm_id, thread_id, 0, 0, 0, parent_item)
|
||||
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
|
||||
def __init__(self, glb, params, row, comm_id, thread_id, pid, tid, parent_item):
|
||||
super(CallTreeLevelTwoItem, self).__init__(glb, params, row, comm_id, thread_id, 0, 0, 0, 0, 0, parent_item)
|
||||
if self.params.have_ipc:
|
||||
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", "", "", "", "", "", ""]
|
||||
else:
|
||||
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
|
||||
self.dbid = thread_id
|
||||
|
||||
def Select(self):
|
||||
super(CallTreeLevelTwoItem, self).Select()
|
||||
for child_item in self.child_items:
|
||||
self.time += child_item.time
|
||||
self.insn_cnt += child_item.insn_cnt
|
||||
self.cyc_cnt += child_item.cyc_cnt
|
||||
self.branch_count += child_item.branch_count
|
||||
for child_item in self.child_items:
|
||||
child_item.data[4] = PercentToOneDP(child_item.time, self.time)
|
||||
child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
|
||||
if self.params.have_ipc:
|
||||
child_item.data[6] = PercentToOneDP(child_item.insn_cnt, self.insn_cnt)
|
||||
child_item.data[8] = PercentToOneDP(child_item.cyc_cnt, self.cyc_cnt)
|
||||
child_item.data[11] = PercentToOneDP(child_item.branch_count, self.branch_count)
|
||||
else:
|
||||
child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
|
||||
|
||||
# Call tree data model level one item
|
||||
|
||||
class CallTreeLevelOneItem(CallGraphLevelItemBase):
|
||||
|
||||
def __init__(self, glb, row, comm_id, comm, parent_item):
|
||||
super(CallTreeLevelOneItem, self).__init__(glb, row, parent_item)
|
||||
self.data = [comm, "", "", "", "", "", ""]
|
||||
def __init__(self, glb, params, row, comm_id, comm, parent_item):
|
||||
super(CallTreeLevelOneItem, self).__init__(glb, params, row, parent_item)
|
||||
if self.params.have_ipc:
|
||||
self.data = [comm, "", "", "", "", "", "", "", "", "", "", ""]
|
||||
else:
|
||||
self.data = [comm, "", "", "", "", "", ""]
|
||||
self.dbid = comm_id
|
||||
|
||||
def Select(self):
|
||||
@ -787,7 +888,7 @@ class CallTreeLevelOneItem(CallGraphLevelItemBase):
|
||||
" INNER JOIN threads ON thread_id = threads.id"
|
||||
" WHERE comm_id = " + str(self.dbid))
|
||||
while query.next():
|
||||
child_item = CallTreeLevelTwoItem(self.glb, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
|
||||
child_item = CallTreeLevelTwoItem(self.glb, self.params, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
|
||||
self.child_items.append(child_item)
|
||||
self.child_count += 1
|
||||
|
||||
@ -795,8 +896,8 @@ class CallTreeLevelOneItem(CallGraphLevelItemBase):
|
||||
|
||||
class CallTreeRootItem(CallGraphLevelItemBase):
|
||||
|
||||
def __init__(self, glb):
|
||||
super(CallTreeRootItem, self).__init__(glb, 0, None)
|
||||
def __init__(self, glb, params):
|
||||
super(CallTreeRootItem, self).__init__(glb, params, 0, None)
|
||||
self.dbid = 0
|
||||
self.query_done = True;
|
||||
query = QSqlQuery(glb.db)
|
||||
@ -804,7 +905,7 @@ class CallTreeRootItem(CallGraphLevelItemBase):
|
||||
while query.next():
|
||||
if not query.value(0):
|
||||
continue
|
||||
child_item = CallTreeLevelOneItem(glb, self.child_count, query.value(0), query.value(1), self)
|
||||
child_item = CallTreeLevelOneItem(glb, params, self.child_count, query.value(0), query.value(1), self)
|
||||
self.child_items.append(child_item)
|
||||
self.child_count += 1
|
||||
|
||||
@ -816,17 +917,26 @@ class CallTreeModel(CallGraphModelBase):
|
||||
super(CallTreeModel, self).__init__(glb, parent)
|
||||
|
||||
def GetRoot(self):
|
||||
return CallTreeRootItem(self.glb)
|
||||
return CallTreeRootItem(self.glb, self.params)
|
||||
|
||||
def columnCount(self, parent=None):
|
||||
return 7
|
||||
if self.params.have_ipc:
|
||||
return 12
|
||||
else:
|
||||
return 7
|
||||
|
||||
def columnHeader(self, column):
|
||||
headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
|
||||
if self.params.have_ipc:
|
||||
headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Insn Cnt", "Insn Cnt (%)", "Cyc Cnt", "Cyc Cnt (%)", "IPC", "Branch Count ", "Branch Count (%) "]
|
||||
else:
|
||||
headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
|
||||
return headers[column]
|
||||
|
||||
def columnAlignment(self, column):
|
||||
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
|
||||
if self.params.have_ipc:
|
||||
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
|
||||
else:
|
||||
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
|
||||
return alignment[column]
|
||||
|
||||
def DoFindSelect(self, query, match):
|
||||
@ -1355,11 +1465,11 @@ class FetchMoreRecordsBar():
|
||||
|
||||
class BranchLevelTwoItem():
|
||||
|
||||
def __init__(self, row, text, parent_item):
|
||||
def __init__(self, row, col, text, parent_item):
|
||||
self.row = row
|
||||
self.parent_item = parent_item
|
||||
self.data = [""] * 8
|
||||
self.data[7] = text
|
||||
self.data = [""] * (col + 1)
|
||||
self.data[col] = text
|
||||
self.level = 2
|
||||
|
||||
def getParentItem(self):
|
||||
@ -1391,6 +1501,7 @@ class BranchLevelOneItem():
|
||||
self.dbid = data[0]
|
||||
self.level = 1
|
||||
self.query_done = False
|
||||
self.br_col = len(self.data) - 1
|
||||
|
||||
def getChildItem(self, row):
|
||||
return self.child_items[row]
|
||||
@ -1471,7 +1582,7 @@ class BranchLevelOneItem():
|
||||
while k < 15:
|
||||
byte_str += " "
|
||||
k += 1
|
||||
self.child_items.append(BranchLevelTwoItem(0, byte_str + " " + text, self))
|
||||
self.child_items.append(BranchLevelTwoItem(0, self.br_col, byte_str + " " + text, self))
|
||||
self.child_count += 1
|
||||
else:
|
||||
return
|
||||
@ -1522,16 +1633,37 @@ class BranchRootItem():
|
||||
def getData(self, column):
|
||||
return ""
|
||||
|
||||
# Calculate instructions per cycle
|
||||
|
||||
def CalcIPC(cyc_cnt, insn_cnt):
|
||||
if cyc_cnt and insn_cnt:
|
||||
ipc = Decimal(float(insn_cnt) / cyc_cnt)
|
||||
ipc = str(ipc.quantize(Decimal(".01"), rounding=ROUND_HALF_UP))
|
||||
else:
|
||||
ipc = "0"
|
||||
return ipc
|
||||
|
||||
# Branch data preparation
|
||||
|
||||
def BranchDataPrepBr(query, data):
|
||||
data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) +
|
||||
" (" + dsoname(query.value(11)) + ")" + " -> " +
|
||||
tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) +
|
||||
" (" + dsoname(query.value(15)) + ")")
|
||||
|
||||
def BranchDataPrepIPC(query, data):
|
||||
insn_cnt = query.value(16)
|
||||
cyc_cnt = query.value(17)
|
||||
ipc = CalcIPC(cyc_cnt, insn_cnt)
|
||||
data.append(insn_cnt)
|
||||
data.append(cyc_cnt)
|
||||
data.append(ipc)
|
||||
|
||||
def BranchDataPrep(query):
|
||||
data = []
|
||||
for i in xrange(0, 8):
|
||||
data.append(query.value(i))
|
||||
data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) +
|
||||
" (" + dsoname(query.value(11)) + ")" + " -> " +
|
||||
tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) +
|
||||
" (" + dsoname(query.value(15)) + ")")
|
||||
BranchDataPrepBr(query, data)
|
||||
return data
|
||||
|
||||
def BranchDataPrepWA(query):
|
||||
@ -1541,10 +1673,26 @@ def BranchDataPrepWA(query):
|
||||
data.append("{:>19}".format(query.value(1)))
|
||||
for i in xrange(2, 8):
|
||||
data.append(query.value(i))
|
||||
data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) +
|
||||
" (" + dsoname(query.value(11)) + ")" + " -> " +
|
||||
tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) +
|
||||
" (" + dsoname(query.value(15)) + ")")
|
||||
BranchDataPrepBr(query, data)
|
||||
return data
|
||||
|
||||
def BranchDataWithIPCPrep(query):
|
||||
data = []
|
||||
for i in xrange(0, 8):
|
||||
data.append(query.value(i))
|
||||
BranchDataPrepIPC(query, data)
|
||||
BranchDataPrepBr(query, data)
|
||||
return data
|
||||
|
||||
def BranchDataWithIPCPrepWA(query):
|
||||
data = []
|
||||
data.append(query.value(0))
|
||||
# Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
|
||||
data.append("{:>19}".format(query.value(1)))
|
||||
for i in xrange(2, 8):
|
||||
data.append(query.value(i))
|
||||
BranchDataPrepIPC(query, data)
|
||||
BranchDataPrepBr(query, data)
|
||||
return data
|
||||
|
||||
# Branch data model
|
||||
@ -1554,14 +1702,24 @@ class BranchModel(TreeModel):
|
||||
progress = Signal(object)
|
||||
|
||||
def __init__(self, glb, event_id, where_clause, parent=None):
|
||||
super(BranchModel, self).__init__(glb, parent)
|
||||
super(BranchModel, self).__init__(glb, None, parent)
|
||||
self.event_id = event_id
|
||||
self.more = True
|
||||
self.populated = 0
|
||||
self.have_ipc = IsSelectable(glb.db, "samples", columns = "insn_count, cyc_count")
|
||||
if self.have_ipc:
|
||||
select_ipc = ", insn_count, cyc_count"
|
||||
prep_fn = BranchDataWithIPCPrep
|
||||
prep_wa_fn = BranchDataWithIPCPrepWA
|
||||
else:
|
||||
select_ipc = ""
|
||||
prep_fn = BranchDataPrep
|
||||
prep_wa_fn = BranchDataPrepWA
|
||||
sql = ("SELECT samples.id, time, cpu, comm, pid, tid, branch_types.name,"
|
||||
" CASE WHEN in_tx = '0' THEN 'No' ELSE 'Yes' END,"
|
||||
" ip, symbols.name, sym_offset, dsos.short_name,"
|
||||
" to_ip, to_symbols.name, to_sym_offset, to_dsos.short_name"
|
||||
+ select_ipc +
|
||||
" FROM samples"
|
||||
" INNER JOIN comms ON comm_id = comms.id"
|
||||
" INNER JOIN threads ON thread_id = threads.id"
|
||||
@ -1575,9 +1733,9 @@ class BranchModel(TreeModel):
|
||||
" ORDER BY samples.id"
|
||||
" LIMIT " + str(glb_chunk_sz))
|
||||
if pyside_version_1 and sys.version_info[0] == 3:
|
||||
prep = BranchDataPrepWA
|
||||
prep = prep_fn
|
||||
else:
|
||||
prep = BranchDataPrep
|
||||
prep = prep_wa_fn
|
||||
self.fetcher = SQLFetcher(glb, sql, prep, self.AddSample)
|
||||
self.fetcher.done.connect(self.Update)
|
||||
self.fetcher.Fetch(glb_chunk_sz)
|
||||
@ -1586,13 +1744,23 @@ class BranchModel(TreeModel):
|
||||
return BranchRootItem()
|
||||
|
||||
def columnCount(self, parent=None):
|
||||
return 8
|
||||
if self.have_ipc:
|
||||
return 11
|
||||
else:
|
||||
return 8
|
||||
|
||||
def columnHeader(self, column):
|
||||
return ("Time", "CPU", "Command", "PID", "TID", "Branch Type", "In Tx", "Branch")[column]
|
||||
if self.have_ipc:
|
||||
return ("Time", "CPU", "Command", "PID", "TID", "Branch Type", "In Tx", "Insn Cnt", "Cyc Cnt", "IPC", "Branch")[column]
|
||||
else:
|
||||
return ("Time", "CPU", "Command", "PID", "TID", "Branch Type", "In Tx", "Branch")[column]
|
||||
|
||||
def columnFont(self, column):
|
||||
if column != 7:
|
||||
if self.have_ipc:
|
||||
br_col = 10
|
||||
else:
|
||||
br_col = 7
|
||||
if column != br_col:
|
||||
return None
|
||||
return QFont("Monospace")
|
||||
|
||||
@ -2100,10 +2268,10 @@ def GetEventList(db):
|
||||
|
||||
# Is a table selectable
|
||||
|
||||
def IsSelectable(db, table, sql = ""):
|
||||
def IsSelectable(db, table, sql = "", columns = "*"):
|
||||
query = QSqlQuery(db)
|
||||
try:
|
||||
QueryExec(query, "SELECT * FROM " + table + " " + sql + " LIMIT 1")
|
||||
QueryExec(query, "SELECT " + columns + " FROM " + table + " " + sql + " LIMIT 1")
|
||||
except:
|
||||
return False
|
||||
return True
|
||||
@ -2754,7 +2922,7 @@ class WindowMenu():
|
||||
action = self.window_menu.addAction(label)
|
||||
action.setCheckable(True)
|
||||
action.setChecked(sub_window == self.mdi_area.activeSubWindow())
|
||||
action.triggered.connect(lambda x=nr: self.setActiveSubWindow(x))
|
||||
action.triggered.connect(lambda a=None,x=nr: self.setActiveSubWindow(x))
|
||||
self.window_menu.addAction(action)
|
||||
nr += 1
|
||||
|
||||
@ -2840,6 +3008,12 @@ cd xed
|
||||
sudo ./mfile.py --prefix=/usr/local install
|
||||
sudo ldconfig
|
||||
</pre>
|
||||
<h3>Instructions per Cycle (IPC)</h3>
|
||||
If available, IPC information is displayed in columns 'insn_cnt', 'cyc_cnt' and 'IPC'.
|
||||
<p><b>Intel PT note:</b> The information applies to the blocks of code ending with, and including, that branch.
|
||||
Due to the granularity of timing information, the number of cycles for some code blocks will not be known.
|
||||
In that case, 'insn_cnt', 'cyc_cnt' and 'IPC' are zero, but when 'IPC' is displayed it covers the period
|
||||
since the previous displayed 'IPC'.
|
||||
<h3>Find</h3>
|
||||
Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
|
||||
Refer to Python documentation for the regular expression syntax.
|
||||
@ -3114,14 +3288,14 @@ class MainWindow(QMainWindow):
|
||||
event = event.split(":")[0]
|
||||
if event == "branches":
|
||||
label = "All branches" if branches_events == 1 else "All branches " + "(id=" + dbid + ")"
|
||||
reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda x=dbid: self.NewBranchView(x), self))
|
||||
reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda a=None,x=dbid: self.NewBranchView(x), self))
|
||||
label = "Selected branches" if branches_events == 1 else "Selected branches " + "(id=" + dbid + ")"
|
||||
reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda x=dbid: self.NewSelectedBranchView(x), self))
|
||||
reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda a=None,x=dbid: self.NewSelectedBranchView(x), self))
|
||||
|
||||
def TableMenu(self, tables, menu):
|
||||
table_menu = menu.addMenu("&Tables")
|
||||
for table in tables:
|
||||
table_menu.addAction(CreateAction(table, "Create a new window containing a table view", lambda t=table: self.NewTableView(t), self))
|
||||
table_menu.addAction(CreateAction(table, "Create a new window containing a table view", lambda a=None,t=table: self.NewTableView(t), self))
|
||||
|
||||
def NewCallGraph(self):
|
||||
CallGraphWindow(self.glb, self)
|
||||
@ -3361,18 +3535,27 @@ class DBRef():
|
||||
# Main
|
||||
|
||||
def Main():
|
||||
if (len(sys.argv) < 2):
|
||||
printerr("Usage is: exported-sql-viewer.py {<database name> | --help-only}");
|
||||
raise Exception("Too few arguments")
|
||||
usage_str = "exported-sql-viewer.py [--pyside-version-1] <database name>\n" \
|
||||
" or: exported-sql-viewer.py --help-only"
|
||||
ap = argparse.ArgumentParser(usage = usage_str, add_help = False)
|
||||
ap.add_argument("--pyside-version-1", action='store_true')
|
||||
ap.add_argument("dbname", nargs="?")
|
||||
ap.add_argument("--help-only", action='store_true')
|
||||
args = ap.parse_args()
|
||||
|
||||
dbname = sys.argv[1]
|
||||
if dbname == "--help-only":
|
||||
if args.help_only:
|
||||
app = QApplication(sys.argv)
|
||||
mainwindow = HelpOnlyWindow()
|
||||
mainwindow.show()
|
||||
err = app.exec_()
|
||||
sys.exit(err)
|
||||
|
||||
dbname = args.dbname
|
||||
if dbname is None:
|
||||
ap.print_usage()
|
||||
print("Too few arguments")
|
||||
sys.exit(1)
|
||||
|
||||
is_sqlite3 = False
|
||||
try:
|
||||
f = open(dbname, "rb")
|
||||
|
@ -1,3 +1,5 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
perf-y += builtin-test.o
|
||||
perf-y += parse-events.o
|
||||
perf-y += dso-data.o
|
||||
@ -50,6 +52,8 @@ perf-y += perf-hooks.o
|
||||
perf-y += clang.o
|
||||
perf-y += unit_number__scnprintf.o
|
||||
perf-y += mem2node.o
|
||||
perf-y += map_groups.o
|
||||
perf-y += time-utils-test.o
|
||||
|
||||
$(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c tests/Build
|
||||
$(call rule_mkdir)
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Powerpc needs __SANE_USERSPACE_TYPES__ before <linux/types.h> to select
|
||||
* 'int-ll64.h' and avoid compile warnings when printing __u64 with %llu.
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* bpf-script-example.c
|
||||
* Test basic LLVM building
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* bpf-script-test-kbuild.c
|
||||
* Test include from kernel header
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* bpf-script-test-prologue.c
|
||||
* Test BPF prologue
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* bpf-script-test-relocation.c
|
||||
* Test BPF loader checking relocation
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <errno.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/epoll.h>
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include "string2.h"
|
||||
#include "symbol.h"
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <subcmd/exec-cmd.h>
|
||||
|
||||
static bool dont_fork;
|
||||
@ -289,6 +290,14 @@ static struct test generic_tests[] = {
|
||||
.desc = "mem2node",
|
||||
.func = test__mem2node,
|
||||
},
|
||||
{
|
||||
.desc = "time utils",
|
||||
.func = test__time_utils,
|
||||
},
|
||||
{
|
||||
.desc = "map_groups__merge_in",
|
||||
.func = test__map_groups__merge_in,
|
||||
},
|
||||
{
|
||||
.func = NULL,
|
||||
},
|
||||
@ -430,7 +439,7 @@ static const char *shell_test__description(char *description, size_t size,
|
||||
description = fgets(description, size, fp);
|
||||
fclose(fp);
|
||||
|
||||
return description ? trim(description + 1) : NULL;
|
||||
return description ? strim(description + 1) : NULL;
|
||||
}
|
||||
|
||||
#define for_each_shell_test(dir, base, ent) \
|
||||
|
@ -22,7 +22,7 @@
|
||||
|
||||
#include "tests.h"
|
||||
|
||||
#include "sane_ctype.h"
|
||||
#include <linux/ctype.h>
|
||||
|
||||
#define BUFSZ 1024
|
||||
#define READLEN 128
|
||||
|
121
tools/perf/tests/map_groups.c
Normal file
121
tools/perf/tests/map_groups.c
Normal file
@ -0,0 +1,121 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/kernel.h>
|
||||
#include "tests.h"
|
||||
#include "map.h"
|
||||
#include "map_groups.h"
|
||||
#include "dso.h"
|
||||
#include "debug.h"
|
||||
|
||||
struct map_def {
|
||||
const char *name;
|
||||
u64 start;
|
||||
u64 end;
|
||||
};
|
||||
|
||||
static int check_maps(struct map_def *merged, unsigned int size, struct map_groups *mg)
|
||||
{
|
||||
struct map *map;
|
||||
unsigned int i = 0;
|
||||
|
||||
map = map_groups__first(mg);
|
||||
while (map) {
|
||||
TEST_ASSERT_VAL("wrong map start", map->start == merged[i].start);
|
||||
TEST_ASSERT_VAL("wrong map end", map->end == merged[i].end);
|
||||
TEST_ASSERT_VAL("wrong map name", !strcmp(map->dso->name, merged[i].name));
|
||||
TEST_ASSERT_VAL("wrong map refcnt", refcount_read(&map->refcnt) == 2);
|
||||
|
||||
i++;
|
||||
map = map_groups__next(map);
|
||||
|
||||
TEST_ASSERT_VAL("less maps expected", (map && i < size) || (!map && i == size));
|
||||
}
|
||||
|
||||
return TEST_OK;
|
||||
}
|
||||
|
||||
int test__map_groups__merge_in(struct test *t __maybe_unused, int subtest __maybe_unused)
|
||||
{
|
||||
struct map_groups mg;
|
||||
unsigned int i;
|
||||
struct map_def bpf_progs[] = {
|
||||
{ "bpf_prog_1", 200, 300 },
|
||||
{ "bpf_prog_2", 500, 600 },
|
||||
{ "bpf_prog_3", 800, 900 },
|
||||
};
|
||||
struct map_def merged12[] = {
|
||||
{ "kcore1", 100, 200 },
|
||||
{ "bpf_prog_1", 200, 300 },
|
||||
{ "kcore1", 300, 500 },
|
||||
{ "bpf_prog_2", 500, 600 },
|
||||
{ "kcore1", 600, 800 },
|
||||
{ "bpf_prog_3", 800, 900 },
|
||||
{ "kcore1", 900, 1000 },
|
||||
};
|
||||
struct map_def merged3[] = {
|
||||
{ "kcore1", 100, 200 },
|
||||
{ "bpf_prog_1", 200, 300 },
|
||||
{ "kcore1", 300, 500 },
|
||||
{ "bpf_prog_2", 500, 600 },
|
||||
{ "kcore1", 600, 800 },
|
||||
{ "bpf_prog_3", 800, 900 },
|
||||
{ "kcore1", 900, 1000 },
|
||||
{ "kcore3", 1000, 1100 },
|
||||
};
|
||||
struct map *map_kcore1, *map_kcore2, *map_kcore3;
|
||||
int ret;
|
||||
|
||||
map_groups__init(&mg, NULL);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(bpf_progs); i++) {
|
||||
struct map *map;
|
||||
|
||||
map = dso__new_map(bpf_progs[i].name);
|
||||
TEST_ASSERT_VAL("failed to create map", map);
|
||||
|
||||
map->start = bpf_progs[i].start;
|
||||
map->end = bpf_progs[i].end;
|
||||
map_groups__insert(&mg, map);
|
||||
map__put(map);
|
||||
}
|
||||
|
||||
map_kcore1 = dso__new_map("kcore1");
|
||||
TEST_ASSERT_VAL("failed to create map", map_kcore1);
|
||||
|
||||
map_kcore2 = dso__new_map("kcore2");
|
||||
TEST_ASSERT_VAL("failed to create map", map_kcore2);
|
||||
|
||||
map_kcore3 = dso__new_map("kcore3");
|
||||
TEST_ASSERT_VAL("failed to create map", map_kcore3);
|
||||
|
||||
/* kcore1 map overlaps over all bpf maps */
|
||||
map_kcore1->start = 100;
|
||||
map_kcore1->end = 1000;
|
||||
|
||||
/* kcore2 map hides behind bpf_prog_2 */
|
||||
map_kcore2->start = 550;
|
||||
map_kcore2->end = 570;
|
||||
|
||||
/* kcore3 map hides behind bpf_prog_3, kcore1 and adds new map */
|
||||
map_kcore3->start = 880;
|
||||
map_kcore3->end = 1100;
|
||||
|
||||
ret = map_groups__merge_in(&mg, map_kcore1);
|
||||
TEST_ASSERT_VAL("failed to merge map", !ret);
|
||||
|
||||
ret = check_maps(merged12, ARRAY_SIZE(merged12), &mg);
|
||||
TEST_ASSERT_VAL("merge check failed", !ret);
|
||||
|
||||
ret = map_groups__merge_in(&mg, map_kcore2);
|
||||
TEST_ASSERT_VAL("failed to merge map", !ret);
|
||||
|
||||
ret = check_maps(merged12, ARRAY_SIZE(merged12), &mg);
|
||||
TEST_ASSERT_VAL("merge check failed", !ret);
|
||||
|
||||
ret = map_groups__merge_in(&mg, map_kcore3);
|
||||
TEST_ASSERT_VAL("failed to merge map", !ret);
|
||||
|
||||
ret = check_maps(merged3, ARRAY_SIZE(merged3), &mg);
|
||||
TEST_ASSERT_VAL("merge check failed", !ret);
|
||||
return TEST_OK;
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user