Kernel side changes:

- Add AMD Fam17h RAPL support
   - Introduce CAP_PERFMON to kernel and user space
   - Add Zhaoxin CPU support
   - Misc fixes and cleanups
 
 Tooling changes:
 
   perf record:
 
     - Introduce --switch-output-event to use arbitrary events to be setup
       and read from a side band thread and, when they take place a signal
       be sent to the main 'perf record' thread, reusing the --switch-output
       code to take perf.data snapshots from the --overwrite ring buffer, e.g.:
 
 	# perf record --overwrite -e sched:* \
 		      --switch-output-event syscalls:*connect* \
 		      workload
 
       will take perf.data.YYYYMMDDHHMMSS snapshots up to around the
       connect syscalls.
 
     - Add --num-synthesize-threads option to control degree of parallelism of the
       synthesize_mmap() code which is scanning /proc/PID/task/PID/maps and can be
       time consuming. This mimics pre-existing behaviour in 'perf top'.
 
   perf bench:
 
     - Add a multi-threaded synthesize benchmark.
     - Add kallsyms parsing benchmark.
 
   Intel PT support:
 
     - Stitch LBR records from multiple samples to get deeper backtraces,
       there are caveats, see the csets for details.
     - Allow using Intel PT to synthesize callchains for regular events.
     - Add support for synthesizing branch stacks for regular events (cycles,
       instructions, etc) from Intel PT data.
 
   Misc changes:
 
     - Updated perf vendor events for power9 and Coresight.
     - Add flamegraph.py script via 'perf flamegraph'
     - Misc other changes, fixes and cleanups - see the Git log for details.
 
 Signed-off-by: Ingo Molnar <mingo@kernel.org>
 -----BEGIN PGP SIGNATURE-----
 
 iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAl7VJAcRHG1pbmdvQGtl
 cm5lbC5vcmcACgkQEnMQ0APhK1hAYw/8DFtzGkMaaWkrDSj62LXtWQiqr1l01ZFt
 9GzV4aN4/go+K4BQtsQN8cUjOkRHFnOryLuD9LfSBfqsdjuiyTynV/cJkeUGQBck
 TT/GgWf3XKJzTUBRQRk367Gbqs9UKwBP8CdFhOXcNzGEQpjhbwwIDPmem94U4L1N
 XLsysgC45ejWL1kMTZKmk6hDIidlFeDg9j70WDPX1nNfCeisk25rxwTpdgvjsjcj
 3RzPRt2EGS+IkuF4QSCT5leYSGaCpVDHCQrVpHj57UoADfWAyC71uopTLG4OgYSx
 PVd9gvloMeeqWmroirIxM67rMd/TBTfVekNolhnQDjqp60Huxm+gGUYmhsyjNqdx
 Pb8HRZCBAudei9Ue4jNMfhCRK2Ug1oL5wNvN1xcSteAqrwMlwBMGHWns6l12x0ks
 BxYhyLvfREvnKijXc1o8D5paRgqohJgfnHlrUZeacyaw5hQCbiVRpwg0T1mWAF53
 u9hfWLY0Oy+Qs2C7EInNsWSYXRw8oPQNTFVx2I968GZqsEn4DC6Pt3ovWrDKIDnz
 ugoZJQkJ3/O8stYSMiyENehdWlo575NkapCTDwhLWnYztrw4skqqHE8ighU/e8ug
 o/Kx7ANWN9OjjjQpq2GVUeT0jCaFO+OMiGMNEkKoniYgYjogt3Gw5PeedBMtY07p
 OcWTiQZamjU=
 =i27M
 -----END PGP SIGNATURE-----

Merge tag 'perf-core-2020-06-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf updates from Ingo Molnar:
 "Kernel side changes:

   - Add AMD Fam17h RAPL support

   - Introduce CAP_PERFMON to kernel and user space

   - Add Zhaoxin CPU support

   - Misc fixes and cleanups

  Tooling changes:

   - perf record:

     Introduce '--switch-output-event' to use arbitrary events to be
     setup and read from a side band thread and, when they take place a
     signal be sent to the main 'perf record' thread, reusing the core
     for '--switch-output' to take perf.data snapshots from the ring
     buffer used for '--overwrite', e.g.:

	# perf record --overwrite -e sched:* \
		      --switch-output-event syscalls:*connect* \
		      workload

     will take perf.data.YYYYMMDDHHMMSS snapshots up to around the
     connect syscalls.

     Add '--num-synthesize-threads' option to control degree of
     parallelism of the synthesize_mmap() code which is scanning
     /proc/PID/task/PID/maps and can be time consuming. This mimics
     pre-existing behaviour in 'perf top'.

   - perf bench:

     Add a multi-threaded synthesize benchmark and kallsyms parsing
     benchmark.

   - Intel PT support:

     Stitch LBR records from multiple samples to get deeper backtraces,
     there are caveats, see the csets for details.

     Allow using Intel PT to synthesize callchains for regular events.

     Add support for synthesizing branch stacks for regular events
     (cycles, instructions, etc) from Intel PT data.

  Misc changes:

   - Updated perf vendor events for power9 and Coresight.

   - Add flamegraph.py script via 'perf flamegraph'

   - Misc other changes, fixes and cleanups - see the Git log for details

  Also, since over the last couple of years perf tooling has matured and
  decoupled from the kernel perf changes to a large degree, going
  forward Arnaldo is going to send perf tooling changes via direct pull
  requests"

* tag 'perf-core-2020-06-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (163 commits)
  perf/x86/rapl: Add AMD Fam17h RAPL support
  perf/x86/rapl: Make perf_probe_msr() more robust and flexible
  perf/x86/rapl: Flip logic on default events visibility
  perf/x86/rapl: Refactor to share the RAPL code between Intel and AMD CPUs
  perf/x86/rapl: Move RAPL support to common x86 code
  perf/core: Replace zero-length array with flexible-array
  perf/x86: Replace zero-length array with flexible-array
  perf/x86/intel: Add more available bits for OFFCORE_RESPONSE of Intel Tremont
  perf/x86/rapl: Add Ice Lake RAPL support
  perf flamegraph: Use /bin/bash for report and record scripts
  perf cs-etm: Move definition of 'traceid_list' global variable from header file
  libsymbols kallsyms: Move hex2u64 out of header
  libsymbols kallsyms: Parse using io api
  perf bench: Add kallsyms parsing
  perf: cs-etm: Update to build with latest opencsd version.
  perf symbol: Fix kernel symbol address display
  perf inject: Rename perf_evsel__*() operating on 'struct evsel *' to evsel__*()
  perf annotate: Rename perf_evsel__*() operating on 'struct evsel *' to evsel__*()
  perf trace: Rename perf_evsel__*() operating on 'struct evsel *' to evsel__*()
  perf script: Rename perf_evsel__*() operating on 'struct evsel *' to evsel__*()
  ...
This commit is contained in:
Linus Torvalds 2020-06-01 13:23:59 -07:00
commit a7092c8204
194 changed files with 5234 additions and 2006 deletions

View File

@ -1,6 +1,6 @@
.. _perf_security:
Perf Events and tool security
Perf events and tool security
=============================
Overview
@ -42,11 +42,11 @@ categories:
Data that belong to the fourth category can potentially contain
sensitive process data. If PMUs in some monitoring modes capture values
of execution context registers or data from process memory then access
to such monitoring capabilities requires to be ordered and secured
properly. So, perf_events/Perf performance monitoring is the subject for
security access control management [5]_ .
to such monitoring modes requires to be ordered and secured properly.
So, perf_events performance monitoring and observability operations are
the subject for security access control management [5]_ .
perf_events/Perf access control
perf_events access control
-------------------------------
To perform security checks, the Linux implementation splits processes
@ -66,11 +66,25 @@ into distinct units, known as capabilities [6]_ , which can be
independently enabled and disabled on per-thread basis for processes and
files of unprivileged users.
Unprivileged processes with enabled CAP_SYS_ADMIN capability are treated
Unprivileged processes with enabled CAP_PERFMON capability are treated
as privileged processes with respect to perf_events performance
monitoring and bypass *scope* permissions checks in the kernel.
monitoring and observability operations, thus, bypass *scope* permissions
checks in the kernel. CAP_PERFMON implements the principle of least
privilege [13]_ (POSIX 1003.1e: 2.2.2.39) for performance monitoring and
observability operations in the kernel and provides a secure approach to
perfomance monitoring and observability in the system.
Unprivileged processes using perf_events system call API is also subject
For backward compatibility reasons the access to perf_events monitoring and
observability operations is also open for CAP_SYS_ADMIN privileged
processes but CAP_SYS_ADMIN usage for secure monitoring and observability
use cases is discouraged with respect to the CAP_PERFMON capability.
If system audit records [14]_ for a process using perf_events system call
API contain denial records of acquiring both CAP_PERFMON and CAP_SYS_ADMIN
capabilities then providing the process with CAP_PERFMON capability singly
is recommended as the preferred secure approach to resolve double access
denial logging related to usage of performance monitoring and observability.
Unprivileged processes using perf_events system call are also subject
for PTRACE_MODE_READ_REALCREDS ptrace access mode check [7]_ , whose
outcome determines whether monitoring is permitted. So unprivileged
processes provided with CAP_SYS_PTRACE capability are effectively
@ -82,14 +96,14 @@ performance analysis of monitored processes or a system. For example,
CAP_SYSLOG capability permits reading kernel space memory addresses from
/proc/kallsyms file.
perf_events/Perf privileged users
Privileged Perf users groups
---------------------------------
Mechanisms of capabilities, privileged capability-dumb files [6]_ and
file system ACLs [10]_ can be used to create a dedicated group of
perf_events/Perf privileged users who are permitted to execute
performance monitoring without scope limits. The following steps can be
taken to create such a group of privileged Perf users.
file system ACLs [10]_ can be used to create dedicated groups of
privileged Perf users who are permitted to execute performance monitoring
and observability without scope limits. The following steps can be
taken to create such groups of privileged Perf users.
1. Create perf_users group of privileged Perf users, assign perf_users
group to Perf tool executable and limit access to the executable for
@ -108,30 +122,51 @@ taken to create such a group of privileged Perf users.
-rwxr-x--- 2 root perf_users 11M Oct 19 15:12 perf
2. Assign the required capabilities to the Perf tool executable file and
enable members of perf_users group with performance monitoring
enable members of perf_users group with monitoring and observability
privileges [6]_ :
::
# setcap "cap_sys_admin,cap_sys_ptrace,cap_syslog=ep" perf
# setcap -v "cap_sys_admin,cap_sys_ptrace,cap_syslog=ep" perf
# setcap "cap_perfmon,cap_sys_ptrace,cap_syslog=ep" perf
# setcap -v "cap_perfmon,cap_sys_ptrace,cap_syslog=ep" perf
perf: OK
# getcap perf
perf = cap_sys_ptrace,cap_sys_admin,cap_syslog+ep
perf = cap_sys_ptrace,cap_syslog,cap_perfmon+ep
If the libcap installed doesn't yet support "cap_perfmon", use "38" instead,
i.e.:
::
# setcap "38,cap_ipc_lock,cap_sys_ptrace,cap_syslog=ep" perf
Note that you may need to have 'cap_ipc_lock' in the mix for tools such as
'perf top', alternatively use 'perf top -m N', to reduce the memory that
it uses for the perf ring buffer, see the memory allocation section below.
Using a libcap without support for CAP_PERFMON will make cap_get_flag(caps, 38,
CAP_EFFECTIVE, &val) fail, which will lead the default event to be 'cycles:u',
so as a workaround explicitly ask for the 'cycles' event, i.e.:
::
# perf top -e cycles
To get kernel and user samples with a perf binary with just CAP_PERFMON.
As a result, members of perf_users group are capable of conducting
performance monitoring by using functionality of the configured Perf
tool executable that, when executes, passes perf_events subsystem scope
checks.
performance monitoring and observability by using functionality of the
configured Perf tool executable that, when executes, passes perf_events
subsystem scope checks.
This specific access control management is only available to superuser
or root running processes with CAP_SETPCAP, CAP_SETFCAP [6]_
capabilities.
perf_events/Perf unprivileged users
Unprivileged users
-----------------------------------
perf_events/Perf *scope* and *access* control for unprivileged processes
perf_events *scope* and *access* control for unprivileged processes
is governed by perf_event_paranoid [2]_ setting:
-1:
@ -166,7 +201,7 @@ is governed by perf_event_paranoid [2]_ setting:
perf_event_mlock_kb locking limit is imposed but ignored for
unprivileged processes with CAP_IPC_LOCK capability.
perf_events/Perf resource control
Resource control
---------------------------------
Open file descriptors
@ -227,4 +262,5 @@ Bibliography
.. [10] `<http://man7.org/linux/man-pages/man5/acl.5.html>`_
.. [11] `<http://man7.org/linux/man-pages/man2/getrlimit.2.html>`_
.. [12] `<http://man7.org/linux/man-pages/man5/limits.conf.5.html>`_
.. [13] `<https://sites.google.com/site/fullycapable>`_
.. [14] `<http://man7.org/linux/man-pages/man8/auditd.8.html>`_

View File

@ -721,7 +721,13 @@ perf_event_paranoid
===================
Controls use of the performance events system by unprivileged
users (without CAP_SYS_ADMIN). The default value is 2.
users (without CAP_PERFMON). The default value is 2.
For backward compatibility reasons access to system performance
monitoring and observability remains open for CAP_SYS_ADMIN
privileged processes but CAP_SYS_ADMIN usage for secure system
performance monitoring and observability operations is discouraged
with respect to CAP_PERFMON use cases.
=== ==================================================================
-1 Allow use of (almost) all events by all users.
@ -730,13 +736,13 @@ users (without CAP_SYS_ADMIN). The default value is 2.
``CAP_IPC_LOCK``.
>=0 Disallow ftrace function tracepoint by users without
``CAP_SYS_ADMIN``.
``CAP_PERFMON``.
Disallow raw tracepoint access by users without ``CAP_SYS_ADMIN``.
Disallow raw tracepoint access by users without ``CAP_PERFMON``.
>=1 Disallow CPU event access by users without ``CAP_SYS_ADMIN``.
>=1 Disallow CPU event access by users without ``CAP_PERFMON``.
>=2 Disallow kernel profiling by users without ``CAP_SYS_ADMIN``.
>=2 Disallow kernel profiling by users without ``CAP_PERFMON``.
=== ==================================================================

View File

@ -300,7 +300,7 @@ static ssize_t perf_write(struct file *file, const char __user *buf,
else
return -EFAULT;
if (!capable(CAP_SYS_ADMIN))
if (!perfmon_capable())
return -EACCES;
if (count != sizeof(uint32_t))

View File

@ -976,7 +976,7 @@ static int thread_imc_event_init(struct perf_event *event)
if (event->attr.type != event->pmu->type)
return -ENOENT;
if (!capable(CAP_SYS_ADMIN))
if (!perfmon_capable())
return -EACCES;
/* Sampling not supported */
@ -1412,7 +1412,7 @@ static int trace_imc_event_init(struct perf_event *event)
if (event->attr.type != event->pmu->type)
return -ENOENT;
if (!capable(CAP_SYS_ADMIN))
if (!perfmon_capable())
return -EACCES;
/* Return if this is a couting event */

View File

@ -10,11 +10,11 @@ config PERF_EVENTS_INTEL_UNCORE
available on NehalemEX and more modern processors.
config PERF_EVENTS_INTEL_RAPL
tristate "Intel rapl performance events"
depends on PERF_EVENTS && CPU_SUP_INTEL && PCI
tristate "Intel/AMD rapl performance events"
depends on PERF_EVENTS && (CPU_SUP_INTEL || CPU_SUP_AMD) && PCI
default y
---help---
Include support for Intel rapl performance events for power
Include support for Intel and AMD rapl performance events for power
monitoring on modern processors.
config PERF_EVENTS_INTEL_CSTATE

View File

@ -1,5 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y += core.o probe.o
obj-$(PERF_EVENTS_INTEL_RAPL) += rapl.o
obj-y += amd/
obj-$(CONFIG_X86_LOCAL_APIC) += msr.o
obj-$(CONFIG_CPU_SUP_INTEL) += intel/
obj-$(CONFIG_CPU_SUP_CENTAUR) += zhaoxin/
obj-$(CONFIG_CPU_SUP_ZHAOXIN) += zhaoxin/

View File

@ -1839,6 +1839,10 @@ static int __init init_hw_perf_events(void)
err = amd_pmu_init();
x86_pmu.name = "HYGON";
break;
case X86_VENDOR_ZHAOXIN:
case X86_VENDOR_CENTAUR:
err = zhaoxin_pmu_init();
break;
default:
err = -ENOTSUPP;
}

View File

@ -2,8 +2,6 @@
obj-$(CONFIG_CPU_SUP_INTEL) += core.o bts.o
obj-$(CONFIG_CPU_SUP_INTEL) += ds.o knc.o
obj-$(CONFIG_CPU_SUP_INTEL) += lbr.o p4.o p6.o pt.o
obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += intel-rapl-perf.o
intel-rapl-perf-objs := rapl.o
obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += intel-uncore.o
intel-uncore-objs := uncore.o uncore_nhmex.o uncore_snb.o uncore_snbep.o
obj-$(CONFIG_PERF_EVENTS_INTEL_CSTATE) += intel-cstate.o

View File

@ -58,7 +58,7 @@ struct bts_buffer {
local_t head;
unsigned long end;
void **data_pages;
struct bts_phys buf[0];
struct bts_phys buf[];
};
static struct pmu bts_pmu;

View File

@ -1892,8 +1892,8 @@ static __initconst const u64 tnt_hw_cache_extra_regs
static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffffff9fffull, RSP_0),
INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xffffff9fffull, RSP_1),
INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0),
INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1),
EVENT_EXTRA_END
};

View File

@ -226,8 +226,6 @@ static int __init pt_pmu_hw_init(void)
pt_pmu.vmx = true;
}
attrs = NULL;
for (i = 0; i < PT_CPUID_LEAVES; i++) {
cpuid_count(20, i,
&pt_pmu.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM],

View File

@ -130,7 +130,7 @@ struct intel_uncore_box {
struct list_head list;
struct list_head active_list;
void __iomem *io_addr;
struct intel_uncore_extra_reg shared_regs[0];
struct intel_uncore_extra_reg shared_regs[];
};
/* CFL uncore 8th cbox MSRs */

View File

@ -618,6 +618,7 @@ struct x86_pmu {
/* PMI handler bits */
unsigned int late_ack :1,
enabled_ack :1,
counter_freezing :1;
/*
* sysfs attrs
@ -1133,3 +1134,12 @@ static inline int is_ht_workaround_enabled(void)
return 0;
}
#endif /* CONFIG_CPU_SUP_INTEL */
#if ((defined CONFIG_CPU_SUP_CENTAUR) || (defined CONFIG_CPU_SUP_ZHAOXIN))
int zhaoxin_pmu_init(void);
#else
static inline int zhaoxin_pmu_init(void)
{
return 0;
}
#endif /*CONFIG_CPU_SUP_CENTAUR or CONFIG_CPU_SUP_ZHAOXIN*/

View File

@ -10,6 +10,11 @@ not_visible(struct kobject *kobj, struct attribute *attr, int i)
return 0;
}
/*
* Accepts msr[] array with non populated entries as long as either
* msr[i].msr is 0 or msr[i].grp is NULL. Note that the default sysfs
* visibility is visible when group->is_visible callback is set.
*/
unsigned long
perf_msr_probe(struct perf_msr *msr, int cnt, bool zero, void *data)
{
@ -24,8 +29,16 @@ perf_msr_probe(struct perf_msr *msr, int cnt, bool zero, void *data)
if (!msr[bit].no_check) {
struct attribute_group *grp = msr[bit].grp;
/* skip entry with no group */
if (!grp)
continue;
grp->is_visible = not_visible;
/* skip unpopulated entry */
if (!msr[bit].msr)
continue;
if (msr[bit].test && !msr[bit].test(bit, data))
continue;
/* Virt sucks; you cannot tell if a R/O MSR is present :/ */

View File

@ -1,11 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Support Intel RAPL energy consumption counters
* Support Intel/AMD RAPL energy consumption counters
* Copyright (C) 2013 Google, Inc., Stephane Eranian
*
* Intel RAPL interface is specified in the IA-32 Manual Vol3b
* section 14.7.1 (September 2013)
*
* AMD RAPL interface for Fam17h is described in the public PPR:
* https://bugzilla.kernel.org/show_bug.cgi?id=206537
*
* RAPL provides more controls than just reporting energy consumption
* however here we only expose the 3 energy consumption free running
* counters (pp0, pkg, dram).
@ -58,8 +61,8 @@
#include <linux/nospec.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include "../perf_event.h"
#include "../probe.h"
#include "perf_event.h"
#include "probe.h"
MODULE_LICENSE("GPL");
@ -128,7 +131,9 @@ struct rapl_pmus {
};
struct rapl_model {
struct perf_msr *rapl_msrs;
unsigned long events;
unsigned int msr_power_unit;
bool apply_quirk;
};
@ -138,7 +143,7 @@ static struct rapl_pmus *rapl_pmus;
static cpumask_t rapl_cpu_mask;
static unsigned int rapl_cntr_mask;
static u64 rapl_timer_ms;
static struct perf_msr rapl_msrs[];
static struct perf_msr *rapl_msrs;
static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
{
@ -455,9 +460,16 @@ static struct attribute *rapl_events_cores[] = {
NULL,
};
static umode_t
rapl_not_visible(struct kobject *kobj, struct attribute *attr, int i)
{
return 0;
}
static struct attribute_group rapl_events_cores_group = {
.name = "events",
.attrs = rapl_events_cores,
.is_visible = rapl_not_visible,
};
static struct attribute *rapl_events_pkg[] = {
@ -470,6 +482,7 @@ static struct attribute *rapl_events_pkg[] = {
static struct attribute_group rapl_events_pkg_group = {
.name = "events",
.attrs = rapl_events_pkg,
.is_visible = rapl_not_visible,
};
static struct attribute *rapl_events_ram[] = {
@ -482,6 +495,7 @@ static struct attribute *rapl_events_ram[] = {
static struct attribute_group rapl_events_ram_group = {
.name = "events",
.attrs = rapl_events_ram,
.is_visible = rapl_not_visible,
};
static struct attribute *rapl_events_gpu[] = {
@ -494,6 +508,7 @@ static struct attribute *rapl_events_gpu[] = {
static struct attribute_group rapl_events_gpu_group = {
.name = "events",
.attrs = rapl_events_gpu,
.is_visible = rapl_not_visible,
};
static struct attribute *rapl_events_psys[] = {
@ -506,6 +521,7 @@ static struct attribute *rapl_events_psys[] = {
static struct attribute_group rapl_events_psys_group = {
.name = "events",
.attrs = rapl_events_psys,
.is_visible = rapl_not_visible,
};
static bool test_msr(int idx, void *data)
@ -513,7 +529,7 @@ static bool test_msr(int idx, void *data)
return test_bit(idx, (unsigned long *) data);
}
static struct perf_msr rapl_msrs[] = {
static struct perf_msr intel_rapl_msrs[] = {
[PERF_RAPL_PP0] = { MSR_PP0_ENERGY_STATUS, &rapl_events_cores_group, test_msr },
[PERF_RAPL_PKG] = { MSR_PKG_ENERGY_STATUS, &rapl_events_pkg_group, test_msr },
[PERF_RAPL_RAM] = { MSR_DRAM_ENERGY_STATUS, &rapl_events_ram_group, test_msr },
@ -521,6 +537,16 @@ static struct perf_msr rapl_msrs[] = {
[PERF_RAPL_PSYS] = { MSR_PLATFORM_ENERGY_STATUS, &rapl_events_psys_group, test_msr },
};
/*
* Force to PERF_RAPL_MAX size due to:
* - perf_msr_probe(PERF_RAPL_MAX)
* - want to use same event codes across both architectures
*/
static struct perf_msr amd_rapl_msrs[PERF_RAPL_MAX] = {
[PERF_RAPL_PKG] = { MSR_AMD_PKG_ENERGY_STATUS, &rapl_events_pkg_group, test_msr },
};
static int rapl_cpu_offline(unsigned int cpu)
{
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
@ -575,13 +601,13 @@ static int rapl_cpu_online(unsigned int cpu)
return 0;
}
static int rapl_check_hw_unit(bool apply_quirk)
static int rapl_check_hw_unit(struct rapl_model *rm)
{
u64 msr_rapl_power_unit_bits;
int i;
/* protect rdmsrl() to handle virtualization */
if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
if (rdmsrl_safe(rm->msr_power_unit, &msr_rapl_power_unit_bits))
return -1;
for (i = 0; i < NR_RAPL_DOMAINS; i++)
rapl_hw_unit[i] = (msr_rapl_power_unit_bits >> 8) & 0x1FULL;
@ -592,7 +618,7 @@ static int rapl_check_hw_unit(bool apply_quirk)
* "Intel Xeon Processor E5-1600 and E5-2600 v3 Product Families, V2
* of 2. Datasheet, September 2014, Reference Number: 330784-001 "
*/
if (apply_quirk)
if (rm->apply_quirk)
rapl_hw_unit[PERF_RAPL_RAM] = 16;
/*
@ -673,6 +699,8 @@ static struct rapl_model model_snb = {
BIT(PERF_RAPL_PKG) |
BIT(PERF_RAPL_PP1),
.apply_quirk = false,
.msr_power_unit = MSR_RAPL_POWER_UNIT,
.rapl_msrs = intel_rapl_msrs,
};
static struct rapl_model model_snbep = {
@ -680,6 +708,8 @@ static struct rapl_model model_snbep = {
BIT(PERF_RAPL_PKG) |
BIT(PERF_RAPL_RAM),
.apply_quirk = false,
.msr_power_unit = MSR_RAPL_POWER_UNIT,
.rapl_msrs = intel_rapl_msrs,
};
static struct rapl_model model_hsw = {
@ -688,6 +718,8 @@ static struct rapl_model model_hsw = {
BIT(PERF_RAPL_RAM) |
BIT(PERF_RAPL_PP1),
.apply_quirk = false,
.msr_power_unit = MSR_RAPL_POWER_UNIT,
.rapl_msrs = intel_rapl_msrs,
};
static struct rapl_model model_hsx = {
@ -695,12 +727,16 @@ static struct rapl_model model_hsx = {
BIT(PERF_RAPL_PKG) |
BIT(PERF_RAPL_RAM),
.apply_quirk = true,
.msr_power_unit = MSR_RAPL_POWER_UNIT,
.rapl_msrs = intel_rapl_msrs,
};
static struct rapl_model model_knl = {
.events = BIT(PERF_RAPL_PKG) |
BIT(PERF_RAPL_RAM),
.apply_quirk = true,
.msr_power_unit = MSR_RAPL_POWER_UNIT,
.rapl_msrs = intel_rapl_msrs,
};
static struct rapl_model model_skl = {
@ -710,6 +746,15 @@ static struct rapl_model model_skl = {
BIT(PERF_RAPL_PP1) |
BIT(PERF_RAPL_PSYS),
.apply_quirk = false,
.msr_power_unit = MSR_RAPL_POWER_UNIT,
.rapl_msrs = intel_rapl_msrs,
};
static struct rapl_model model_amd_fam17h = {
.events = BIT(PERF_RAPL_PKG),
.apply_quirk = false,
.msr_power_unit = MSR_AMD_RAPL_POWER_UNIT,
.rapl_msrs = amd_rapl_msrs,
};
static const struct x86_cpu_id rapl_model_match[] __initconst = {
@ -738,8 +783,11 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &model_hsw),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &model_hsx),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &model_hsx),
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &model_skl),
X86_MATCH_VENDOR_FAM(AMD, 0x17, &model_amd_fam17h),
{},
};
MODULE_DEVICE_TABLE(x86cpu, rapl_model_match);
@ -755,10 +803,13 @@ static int __init rapl_pmu_init(void)
return -ENODEV;
rm = (struct rapl_model *) id->driver_data;
rapl_msrs = rm->rapl_msrs;
rapl_cntr_mask = perf_msr_probe(rapl_msrs, PERF_RAPL_MAX,
false, (void *) &rm->events);
ret = rapl_check_hw_unit(rm->apply_quirk);
ret = rapl_check_hw_unit(rm);
if (ret)
return ret;

View File

@ -0,0 +1,2 @@
# SPDX-License-Identifier: GPL-2.0
obj-y += core.o

View File

@ -0,0 +1,613 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Zhoaxin PMU; like Intel Architectural PerfMon-v2
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/nmi.h>
#include <asm/cpufeature.h>
#include <asm/hardirq.h>
#include <asm/apic.h>
#include "../perf_event.h"
/*
* Zhaoxin PerfMon, used on zxc and later.
*/
static u64 zx_pmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = {
[PERF_COUNT_HW_CPU_CYCLES] = 0x0082,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0515,
[PERF_COUNT_HW_CACHE_MISSES] = 0x051a,
[PERF_COUNT_HW_BUS_CYCLES] = 0x0083,
};
static struct event_constraint zxc_event_constraints[] __read_mostly = {
FIXED_EVENT_CONSTRAINT(0x0082, 1), /* unhalted core clock cycles */
EVENT_CONSTRAINT_END
};
static struct event_constraint zxd_event_constraints[] __read_mostly = {
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* retired instructions */
FIXED_EVENT_CONSTRAINT(0x0082, 1), /* unhalted core clock cycles */
FIXED_EVENT_CONSTRAINT(0x0083, 2), /* unhalted bus clock cycles */
EVENT_CONSTRAINT_END
};
static __initconst const u64 zxd_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x0042,
[C(RESULT_MISS)] = 0x0538,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = 0x0043,
[C(RESULT_MISS)] = 0x0562,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x0300,
[C(RESULT_MISS)] = 0x0301,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = 0x030a,
[C(RESULT_MISS)] = 0x030b,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x0042,
[C(RESULT_MISS)] = 0x052c,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = 0x0043,
[C(RESULT_MISS)] = 0x0530,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = 0x0564,
[C(RESULT_MISS)] = 0x0565,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x00c0,
[C(RESULT_MISS)] = 0x0534,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x0700,
[C(RESULT_MISS)] = 0x0709,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
[C(NODE)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
};
static __initconst const u64 zxe_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x0568,
[C(RESULT_MISS)] = 0x054b,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = 0x0669,
[C(RESULT_MISS)] = 0x0562,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x0300,
[C(RESULT_MISS)] = 0x0301,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = 0x030a,
[C(RESULT_MISS)] = 0x030b,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x0,
[C(RESULT_MISS)] = 0x0,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = 0x0,
[C(RESULT_MISS)] = 0x0,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = 0x0,
[C(RESULT_MISS)] = 0x0,
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x0568,
[C(RESULT_MISS)] = 0x052c,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = 0x0669,
[C(RESULT_MISS)] = 0x0530,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = 0x0564,
[C(RESULT_MISS)] = 0x0565,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x00c0,
[C(RESULT_MISS)] = 0x0534,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x0028,
[C(RESULT_MISS)] = 0x0029,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
[C(NODE)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
};
static void zhaoxin_pmu_disable_all(void)
{
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
}
static void zhaoxin_pmu_enable_all(int added)
{
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
}
static inline u64 zhaoxin_pmu_get_status(void)
{
u64 status;
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
return status;
}
static inline void zhaoxin_pmu_ack_status(u64 ack)
{
wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
}
static inline void zxc_pmu_ack_status(u64 ack)
{
/*
* ZXC needs global control enabled in order to clear status bits.
*/
zhaoxin_pmu_enable_all(0);
zhaoxin_pmu_ack_status(ack);
zhaoxin_pmu_disable_all();
}
static void zhaoxin_pmu_disable_fixed(struct hw_perf_event *hwc)
{
int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
u64 ctrl_val, mask;
mask = 0xfULL << (idx * 4);
rdmsrl(hwc->config_base, ctrl_val);
ctrl_val &= ~mask;
wrmsrl(hwc->config_base, ctrl_val);
}
static void zhaoxin_pmu_disable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
zhaoxin_pmu_disable_fixed(hwc);
return;
}
x86_pmu_disable_event(event);
}
static void zhaoxin_pmu_enable_fixed(struct hw_perf_event *hwc)
{
int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
u64 ctrl_val, bits, mask;
/*
* Enable IRQ generation (0x8),
* and enable ring-3 counting (0x2) and ring-0 counting (0x1)
* if requested:
*/
bits = 0x8ULL;
if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
bits |= 0x2;
if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
bits |= 0x1;
bits <<= (idx * 4);
mask = 0xfULL << (idx * 4);
rdmsrl(hwc->config_base, ctrl_val);
ctrl_val &= ~mask;
ctrl_val |= bits;
wrmsrl(hwc->config_base, ctrl_val);
}
static void zhaoxin_pmu_enable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
zhaoxin_pmu_enable_fixed(hwc);
return;
}
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
}
/*
* This handler is triggered by the local APIC, so the APIC IRQ handling
* rules apply:
*/
static int zhaoxin_pmu_handle_irq(struct pt_regs *regs)
{
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
int handled = 0;
u64 status;
int bit;
cpuc = this_cpu_ptr(&cpu_hw_events);
apic_write(APIC_LVTPC, APIC_DM_NMI);
zhaoxin_pmu_disable_all();
status = zhaoxin_pmu_get_status();
if (!status)
goto done;
again:
if (x86_pmu.enabled_ack)
zxc_pmu_ack_status(status);
else
zhaoxin_pmu_ack_status(status);
inc_irq_stat(apic_perf_irqs);
/*
* CondChgd bit 63 doesn't mean any overflow status. Ignore
* and clear the bit.
*/
if (__test_and_clear_bit(63, (unsigned long *)&status)) {
if (!status)
goto done;
}
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
struct perf_event *event = cpuc->events[bit];
handled++;
if (!test_bit(bit, cpuc->active_mask))
continue;
x86_perf_event_update(event);
perf_sample_data_init(&data, 0, event->hw.last_period);
if (!x86_perf_event_set_period(event))
continue;
if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0);
}
/*
* Repeat if there is more work to be done:
*/
status = zhaoxin_pmu_get_status();
if (status)
goto again;
done:
zhaoxin_pmu_enable_all(0);
return handled;
}
static u64 zhaoxin_pmu_event_map(int hw_event)
{
return zx_pmon_event_map[hw_event];
}
static struct event_constraint *
zhaoxin_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{
struct event_constraint *c;
if (x86_pmu.event_constraints) {
for_each_event_constraint(c, x86_pmu.event_constraints) {
if ((event->hw.config & c->cmask) == c->code)
return c;
}
}
return &unconstrained;
}
PMU_FORMAT_ATTR(event, "config:0-7");
PMU_FORMAT_ATTR(umask, "config:8-15");
PMU_FORMAT_ATTR(edge, "config:18");
PMU_FORMAT_ATTR(inv, "config:23");
PMU_FORMAT_ATTR(cmask, "config:24-31");
static struct attribute *zx_arch_formats_attr[] = {
&format_attr_event.attr,
&format_attr_umask.attr,
&format_attr_edge.attr,
&format_attr_inv.attr,
&format_attr_cmask.attr,
NULL,
};
static ssize_t zhaoxin_event_sysfs_show(char *page, u64 config)
{
u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
return x86_event_sysfs_show(page, config, event);
}
static const struct x86_pmu zhaoxin_pmu __initconst = {
.name = "zhaoxin",
.handle_irq = zhaoxin_pmu_handle_irq,
.disable_all = zhaoxin_pmu_disable_all,
.enable_all = zhaoxin_pmu_enable_all,
.enable = zhaoxin_pmu_enable_event,
.disable = zhaoxin_pmu_disable_event,
.hw_config = x86_pmu_hw_config,
.schedule_events = x86_schedule_events,
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
.event_map = zhaoxin_pmu_event_map,
.max_events = ARRAY_SIZE(zx_pmon_event_map),
.apic = 1,
/*
* For zxd/zxe, read/write operation for PMCx MSR is 48 bits.
*/
.max_period = (1ULL << 47) - 1,
.get_event_constraints = zhaoxin_get_event_constraints,
.format_attrs = zx_arch_formats_attr,
.events_sysfs_show = zhaoxin_event_sysfs_show,
};
static const struct { int id; char *name; } zx_arch_events_map[] __initconst = {
{ PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
{ PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
{ PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
{ PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
{ PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
{ PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
{ PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
};
static __init void zhaoxin_arch_events_quirk(void)
{
int bit;
/* disable event that reported as not presend by cpuid */
for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(zx_arch_events_map)) {
zx_pmon_event_map[zx_arch_events_map[bit].id] = 0;
pr_warn("CPUID marked event: \'%s\' unavailable\n",
zx_arch_events_map[bit].name);
}
}
__init int zhaoxin_pmu_init(void)
{
union cpuid10_edx edx;
union cpuid10_eax eax;
union cpuid10_ebx ebx;
struct event_constraint *c;
unsigned int unused;
int version;
pr_info("Welcome to zhaoxin pmu!\n");
/*
* Check whether the Architectural PerfMon supports
* hw_event or not.
*/
cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT - 1)
return -ENODEV;
version = eax.split.version_id;
if (version != 2)
return -ENODEV;
x86_pmu = zhaoxin_pmu;
pr_info("Version check pass!\n");
x86_pmu.version = version;
x86_pmu.num_counters = eax.split.num_counters;
x86_pmu.cntval_bits = eax.split.bit_width;
x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
x86_pmu.events_maskl = ebx.full;
x86_pmu.events_mask_len = eax.split.mask_length;
x86_pmu.num_counters_fixed = edx.split.num_counters_fixed;
x86_add_quirk(zhaoxin_arch_events_quirk);
switch (boot_cpu_data.x86) {
case 0x06:
if (boot_cpu_data.x86_model == 0x0f || boot_cpu_data.x86_model == 0x19) {
x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
/* Clearing status works only if the global control is enable on zxc. */
x86_pmu.enabled_ack = 1;
x86_pmu.event_constraints = zxc_event_constraints;
zx_pmon_event_map[PERF_COUNT_HW_INSTRUCTIONS] = 0;
zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0;
zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0;
zx_pmon_event_map[PERF_COUNT_HW_BUS_CYCLES] = 0;
pr_cont("ZXC events, ");
break;
}
return -ENODEV;
case 0x07:
zx_pmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
X86_CONFIG(.event = 0x01, .umask = 0x01, .inv = 0x01, .cmask = 0x01);
zx_pmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
X86_CONFIG(.event = 0x0f, .umask = 0x04, .inv = 0, .cmask = 0);
switch (boot_cpu_data.x86_model) {
case 0x1b:
memcpy(hw_cache_event_ids, zxd_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
x86_pmu.event_constraints = zxd_event_constraints;
zx_pmon_event_map[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0700;
zx_pmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x0709;
pr_cont("ZXD events, ");
break;
case 0x3b:
memcpy(hw_cache_event_ids, zxe_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
x86_pmu.event_constraints = zxd_event_constraints;
zx_pmon_event_map[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0028;
zx_pmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x0029;
pr_cont("ZXE events, ");
break;
default:
return -ENODEV;
}
break;
default:
return -ENODEV;
}
x86_pmu.intel_ctrl = (1 << (x86_pmu.num_counters)) - 1;
x86_pmu.intel_ctrl |= ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
if (x86_pmu.event_constraints) {
for_each_event_constraint(c, x86_pmu.event_constraints) {
c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
c->weight += x86_pmu.num_counters;
}
}
return 0;
}

View File

@ -301,6 +301,9 @@
#define MSR_PP1_ENERGY_STATUS 0x00000641
#define MSR_PP1_POLICY 0x00000642
#define MSR_AMD_PKG_ENERGY_STATUS 0xc001029b
#define MSR_AMD_RAPL_POWER_UNIT 0xc0010299
/* Config TDP MSRs */
#define MSR_CONFIG_TDP_NOMINAL 0x00000648
#define MSR_CONFIG_TDP_LEVEL_1 0x00000649

View File

@ -63,6 +63,10 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
case 15:
return msr - MSR_P4_BPU_PERFCTR0;
}
fallthrough;
case X86_VENDOR_ZHAOXIN:
case X86_VENDOR_CENTAUR:
return msr - MSR_ARCH_PERFMON_PERFCTR0;
}
return 0;
}
@ -92,6 +96,10 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
case 15:
return msr - MSR_P4_BSU_ESCR0;
}
fallthrough;
case X86_VENDOR_ZHAOXIN:
case X86_VENDOR_CENTAUR:
return msr - MSR_ARCH_PERFMON_EVENTSEL0;
}
return 0;

View File

@ -3388,10 +3388,10 @@ i915_perf_open_ioctl_locked(struct i915_perf *perf,
/* Similar to perf's kernel.perf_paranoid_cpu sysctl option
* we check a dev.i915.perf_stream_paranoid sysctl option
* to determine if it's ok to access system wide OA counters
* without CAP_SYS_ADMIN privileges.
* without CAP_PERFMON or CAP_SYS_ADMIN privileges.
*/
if (privileged_op &&
i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
i915_perf_stream_paranoid && !perfmon_capable()) {
DRM_DEBUG("Insufficient privileges to open i915 perf stream\n");
ret = -EACCES;
goto err_ctx;
@ -3584,9 +3584,8 @@ static int read_properties_unlocked(struct i915_perf *perf,
} else
oa_freq_hz = 0;
if (oa_freq_hz > i915_oa_max_sample_rate &&
!capable(CAP_SYS_ADMIN)) {
DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n",
if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) {
DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n",
i915_oa_max_sample_rate);
return -EACCES;
}
@ -4007,7 +4006,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
if (i915_perf_stream_paranoid && !perfmon_capable()) {
DRM_DEBUG("Insufficient privileges to add i915 OA config\n");
return -EACCES;
}
@ -4154,7 +4153,7 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
return -ENOTSUPP;
}
if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
if (i915_perf_stream_paranoid && !perfmon_capable()) {
DRM_DEBUG("Insufficient privileges to remove i915 OA config\n");
return -EACCES;
}

View File

@ -113,7 +113,7 @@ static int event_buffer_open(struct inode *inode, struct file *file)
{
int err = -EPERM;
if (!capable(CAP_SYS_ADMIN))
if (!perfmon_capable())
return -EPERM;
if (test_and_set_bit_lock(0, &buffer_opened))

View File

@ -274,7 +274,7 @@ static u64 arm_spe_event_to_pmscr(struct perf_event *event)
if (!attr->exclude_kernel)
reg |= BIT(SYS_PMSCR_EL1_E1SPE_SHIFT);
if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && capable(CAP_SYS_ADMIN))
if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable())
reg |= BIT(SYS_PMSCR_EL1_CX_SHIFT);
return reg;
@ -700,7 +700,7 @@ static int arm_spe_pmu_event_init(struct perf_event *event)
return -EOPNOTSUPP;
reg = arm_spe_event_to_pmscr(event);
if (!capable(CAP_SYS_ADMIN) &&
if (!perfmon_capable() &&
(reg & (BIT(SYS_PMSCR_EL1_PA_SHIFT) |
BIT(SYS_PMSCR_EL1_CX_SHIFT) |
BIT(SYS_PMSCR_EL1_PCT_SHIFT))))

View File

@ -251,6 +251,10 @@ extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct
extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns);
static inline bool perfmon_capable(void)
{
return capable(CAP_PERFMON) || capable(CAP_SYS_ADMIN);
}
/* audit system wants to get cap info from files as well */
extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);

View File

@ -61,7 +61,7 @@ struct perf_guest_info_callbacks {
struct perf_callchain_entry {
__u64 nr;
__u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */
__u64 ip[]; /* /proc/sys/kernel/perf_event_max_stack */
};
struct perf_callchain_entry_ctx {
@ -113,7 +113,7 @@ struct perf_raw_record {
struct perf_branch_stack {
__u64 nr;
__u64 hw_idx;
struct perf_branch_entry entries[0];
struct perf_branch_entry entries[];
};
struct task_struct;
@ -1305,7 +1305,7 @@ static inline int perf_is_paranoid(void)
static inline int perf_allow_kernel(struct perf_event_attr *attr)
{
if (sysctl_perf_event_paranoid > 1 && !capable(CAP_SYS_ADMIN))
if (sysctl_perf_event_paranoid > 1 && !perfmon_capable())
return -EACCES;
return security_perf_event_open(attr, PERF_SECURITY_KERNEL);
@ -1313,7 +1313,7 @@ static inline int perf_allow_kernel(struct perf_event_attr *attr)
static inline int perf_allow_cpu(struct perf_event_attr *attr)
{
if (sysctl_perf_event_paranoid > 0 && !capable(CAP_SYS_ADMIN))
if (sysctl_perf_event_paranoid > 0 && !perfmon_capable())
return -EACCES;
return security_perf_event_open(attr, PERF_SECURITY_CPU);
@ -1321,7 +1321,7 @@ static inline int perf_allow_cpu(struct perf_event_attr *attr)
static inline int perf_allow_tracepoint(struct perf_event_attr *attr)
{
if (sysctl_perf_event_paranoid > -1 && !capable(CAP_SYS_ADMIN))
if (sysctl_perf_event_paranoid > -1 && !perfmon_capable())
return -EPERM;
return security_perf_event_open(attr, PERF_SECURITY_TRACEPOINT);

View File

@ -367,8 +367,14 @@ struct vfs_ns_cap_data {
#define CAP_AUDIT_READ 37
/*
* Allow system performance and observability privileged operations
* using perf_events, i915_perf and other kernel subsystems
*/
#define CAP_LAST_CAP CAP_AUDIT_READ
#define CAP_PERFMON 38
#define CAP_LAST_CAP CAP_PERFMON
#define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP)

View File

@ -16,7 +16,7 @@
struct callchain_cpus_entries {
struct rcu_head rcu_head;
struct perf_callchain_entry *cpu_entries[0];
struct perf_callchain_entry *cpu_entries[];
};
int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH;

View File

@ -95,11 +95,11 @@ static void remote_function(void *data)
* @info: the function call argument
*
* Calls the function @func when the task is currently running. This might
* be on the current CPU, which just calls the function directly
* be on the current CPU, which just calls the function directly. This will
* retry due to any failures in smp_call_function_single(), such as if the
* task_cpu() goes offline concurrently.
*
* returns: @func return value, or
* -ESRCH - when the process isn't running
* -EAGAIN - when the process moved away
* returns @func return value or -ESRCH when the process isn't running
*/
static int
task_function_call(struct task_struct *p, remote_function_f func, void *info)
@ -112,11 +112,16 @@ task_function_call(struct task_struct *p, remote_function_f func, void *info)
};
int ret;
do {
ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1);
if (!ret)
ret = data.ret;
} while (ret == -EAGAIN);
for (;;) {
ret = smp_call_function_single(task_cpu(p), remote_function,
&data, 1);
ret = !ret ? data.ret : -EAGAIN;
if (ret != -EAGAIN)
break;
cond_resched();
}
return ret;
}
@ -9404,7 +9409,7 @@ static int perf_kprobe_event_init(struct perf_event *event)
if (event->attr.type != perf_kprobe.type)
return -ENOENT;
if (!capable(CAP_SYS_ADMIN))
if (!perfmon_capable())
return -EACCES;
/*
@ -9464,7 +9469,7 @@ static int perf_uprobe_event_init(struct perf_event *event)
if (event->attr.type != perf_uprobe.type)
return -ENOENT;
if (!capable(CAP_SYS_ADMIN))
if (!perfmon_capable())
return -EACCES;
/*
@ -11511,7 +11516,7 @@ SYSCALL_DEFINE5(perf_event_open,
}
if (attr.namespaces) {
if (!capable(CAP_SYS_ADMIN))
if (!perfmon_capable())
return -EACCES;
}

View File

@ -55,7 +55,7 @@ struct perf_buffer {
void *aux_priv;
struct perf_event_mmap_page *user_page;
void *data_pages[0];
void *data_pages[];
};
extern void rb_free(struct perf_buffer *rb);

View File

@ -1500,7 +1500,7 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
u32 *ids, prog_cnt, ids_len;
int ret;
if (!capable(CAP_SYS_ADMIN))
if (!perfmon_capable())
return -EPERM;
if (event->attr.type != PERF_TYPE_TRACEPOINT)
return -EINVAL;

View File

@ -27,9 +27,9 @@
"audit_control", "setfcap"
#define COMMON_CAP2_PERMS "mac_override", "mac_admin", "syslog", \
"wake_alarm", "block_suspend", "audit_read"
"wake_alarm", "block_suspend", "audit_read", "perfmon"
#if CAP_LAST_CAP > CAP_AUDIT_READ
#if CAP_LAST_CAP > CAP_PERFMON
#error New capability defined, please update COMMON_CAP2_PERMS.
#endif

View File

@ -98,7 +98,8 @@ FEATURE_TESTS_EXTRA := \
llvm \
llvm-version \
clang \
libbpf
libbpf \
libpfm4
FEATURE_TESTS ?= $(FEATURE_TESTS_BASIC)

View File

@ -69,7 +69,8 @@ FILES= \
test-libaio.bin \
test-libzstd.bin \
test-clang-bpf-global-var.bin \
test-file-handle.bin
test-file-handle.bin \
test-libpfm4.bin
FILES := $(addprefix $(OUTPUT),$(FILES))
@ -331,6 +332,9 @@ $(OUTPUT)test-clang-bpf-global-var.bin:
$(OUTPUT)test-file-handle.bin:
$(BUILD)
$(OUTPUT)test-libpfm4.bin:
$(BUILD) -lpfm
###############################
clean:

View File

@ -4,9 +4,9 @@
/*
* Check OpenCSD library version is sufficient to provide required features
*/
#define OCSD_MIN_VER ((0 << 16) | (11 << 8) | (0))
#define OCSD_MIN_VER ((0 << 16) | (14 << 8) | (0))
#if !defined(OCSD_VER_NUM) || (OCSD_VER_NUM < OCSD_MIN_VER)
#error "OpenCSD >= 0.11.0 is required"
#error "OpenCSD >= 0.14.0 is required"
#endif
int main(void)

View File

@ -0,0 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
#include <sys/types.h>
#include <perfmon/pfmlib.h>
int main(void)
{
pfm_initialize();
return 0;
}

View File

@ -90,6 +90,7 @@ struct fs {
const char * const *mounts;
char path[PATH_MAX];
bool found;
bool checked;
long magic;
};
@ -111,31 +112,37 @@ static struct fs fs__entries[] = {
.name = "sysfs",
.mounts = sysfs__fs_known_mountpoints,
.magic = SYSFS_MAGIC,
.checked = false,
},
[FS__PROCFS] = {
.name = "proc",
.mounts = procfs__known_mountpoints,
.magic = PROC_SUPER_MAGIC,
.checked = false,
},
[FS__DEBUGFS] = {
.name = "debugfs",
.mounts = debugfs__known_mountpoints,
.magic = DEBUGFS_MAGIC,
.checked = false,
},
[FS__TRACEFS] = {
.name = "tracefs",
.mounts = tracefs__known_mountpoints,
.magic = TRACEFS_MAGIC,
.checked = false,
},
[FS__HUGETLBFS] = {
.name = "hugetlbfs",
.mounts = hugetlbfs__known_mountpoints,
.magic = HUGETLBFS_MAGIC,
.checked = false,
},
[FS__BPF_FS] = {
.name = "bpf",
.mounts = bpf_fs__known_mountpoints,
.magic = BPF_FS_MAGIC,
.checked = false,
},
};
@ -158,6 +165,7 @@ static bool fs__read_mounts(struct fs *fs)
}
fclose(fp);
fs->checked = true;
return fs->found = found;
}
@ -220,6 +228,7 @@ static bool fs__env_override(struct fs *fs)
return false;
fs->found = true;
fs->checked = true;
strncpy(fs->path, override_path, sizeof(fs->path) - 1);
fs->path[sizeof(fs->path) - 1] = '\0';
return true;
@ -246,6 +255,14 @@ static const char *fs__mountpoint(int idx)
if (fs->found)
return (const char *)fs->path;
/* the mount point was already checked for the mount point
* but and did not exist, so return NULL to avoid scanning again.
* This makes the found and not found paths cost equivalent
* in case of multiple calls.
*/
if (fs->checked)
return NULL;
return fs__get_mountpoint(fs);
}

View File

@ -18,6 +18,18 @@
const char *name##__mount(void); \
bool name##__configured(void); \
/*
* The xxxx__mountpoint() entry points find the first match mount point for each
* filesystems listed below, where xxxx is the filesystem type.
*
* The interface is as follows:
*
* - If a mount point is found on first call, it is cached and used for all
* subsequent calls.
*
* - If a mount point is not found, NULL is returned on first call and all
* subsequent calls.
*/
FS(sysfs)
FS(procfs)
FS(debugfs)

115
tools/lib/api/io.h Normal file
View File

@ -0,0 +1,115 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Lightweight buffered reading library.
*
* Copyright 2019 Google LLC.
*/
#ifndef __API_IO__
#define __API_IO__
#include <stdlib.h>
#include <unistd.h>
struct io {
/* File descriptor being read/ */
int fd;
/* Size of the read buffer. */
unsigned int buf_len;
/* Pointer to storage for buffering read. */
char *buf;
/* End of the storage. */
char *end;
/* Currently accessed data pointer. */
char *data;
/* Set true on when the end of file on read error. */
bool eof;
};
static inline void io__init(struct io *io, int fd,
char *buf, unsigned int buf_len)
{
io->fd = fd;
io->buf_len = buf_len;
io->buf = buf;
io->end = buf;
io->data = buf;
io->eof = false;
}
/* Reads one character from the "io" file with similar semantics to fgetc. */
static inline int io__get_char(struct io *io)
{
char *ptr = io->data;
if (io->eof)
return -1;
if (ptr == io->end) {
ssize_t n = read(io->fd, io->buf, io->buf_len);
if (n <= 0) {
io->eof = true;
return -1;
}
ptr = &io->buf[0];
io->end = &io->buf[n];
}
io->data = ptr + 1;
return *ptr;
}
/* Read a hexadecimal value with no 0x prefix into the out argument hex. If the
* first character isn't hexadecimal returns -2, io->eof returns -1, otherwise
* returns the character after the hexadecimal value which may be -1 for eof.
* If the read value is larger than a u64 the high-order bits will be dropped.
*/
static inline int io__get_hex(struct io *io, __u64 *hex)
{
bool first_read = true;
*hex = 0;
while (true) {
int ch = io__get_char(io);
if (ch < 0)
return ch;
if (ch >= '0' && ch <= '9')
*hex = (*hex << 4) | (ch - '0');
else if (ch >= 'a' && ch <= 'f')
*hex = (*hex << 4) | (ch - 'a' + 10);
else if (ch >= 'A' && ch <= 'F')
*hex = (*hex << 4) | (ch - 'A' + 10);
else if (first_read)
return -2;
else
return ch;
first_read = false;
}
}
/* Read a positive decimal value with out argument dec. If the first character
* isn't a decimal returns -2, io->eof returns -1, otherwise returns the
* character after the decimal value which may be -1 for eof. If the read value
* is larger than a u64 the high-order bits will be dropped.
*/
static inline int io__get_dec(struct io *io, __u64 *dec)
{
bool first_read = true;
*dec = 0;
while (true) {
int ch = io__get_char(io);
if (ch < 0)
return ch;
if (ch >= '0' && ch <= '9')
*dec = (*dec * 10) + ch - '0';
else if (first_read)
return -2;
else
return ch;
first_read = false;
}
}
#endif /* __API_IO__ */

View File

@ -247,7 +247,7 @@ out:
int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
{
if (idx < cpus->nr)
if (cpus && idx < cpus->nr)
return cpus->map[idx];
return -1;

View File

@ -11,10 +11,8 @@
#include <internal/mmap.h>
#include <internal/cpumap.h>
#include <internal/threadmap.h>
#include <internal/xyarray.h>
#include <internal/lib.h>
#include <linux/zalloc.h>
#include <sys/ioctl.h>
#include <stdlib.h>
#include <errno.h>
#include <unistd.h>
@ -125,8 +123,10 @@ static void perf_evlist__purge(struct perf_evlist *evlist)
void perf_evlist__exit(struct perf_evlist *evlist)
{
perf_cpu_map__put(evlist->cpus);
perf_cpu_map__put(evlist->all_cpus);
perf_thread_map__put(evlist->threads);
evlist->cpus = NULL;
evlist->all_cpus = NULL;
evlist->threads = NULL;
fdarray__exit(&evlist->pollfd);
}

View File

@ -151,6 +151,8 @@ struct option {
{ .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = "time", .help = (h), .callback = parse_opt_approxidate_cb }
#define OPT_CALLBACK(s, l, v, a, h, f) \
{ .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = (a), .help = (h), .callback = (f) }
#define OPT_CALLBACK_SET(s, l, v, os, a, h, f) \
{ .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = (a), .help = (h), .callback = (f), .set = check_vtype(os, bool *)}
#define OPT_CALLBACK_NOOPT(s, l, v, a, h, f) \
{ .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = (a), .help = (h), .callback = (f), .flags = PARSE_OPT_NOARG }
#define OPT_CALLBACK_DEFAULT(s, l, v, a, h, f, d) \

View File

@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
#include "symbol/kallsyms.h"
#include "api/io.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <fcntl.h>
u8 kallsyms2elf_type(char type)
{
@ -15,74 +17,62 @@ bool kallsyms__is_function(char symbol_type)
return symbol_type == 'T' || symbol_type == 'W';
}
/*
* While we find nice hex chars, build a long_val.
* Return number of chars processed.
*/
int hex2u64(const char *ptr, u64 *long_val)
static void read_to_eol(struct io *io)
{
char *p;
int ch;
*long_val = strtoull(ptr, &p, 16);
return p - ptr;
for (;;) {
ch = io__get_char(io);
if (ch < 0 || ch == '\n')
return;
}
}
int kallsyms__parse(const char *filename, void *arg,
int (*process_symbol)(void *arg, const char *name,
char type, u64 start))
{
char *line = NULL;
size_t n;
int err = -1;
FILE *file = fopen(filename, "r");
struct io io;
char bf[BUFSIZ];
int err;
if (file == NULL)
goto out_failure;
io.fd = open(filename, O_RDONLY, 0);
if (io.fd < 0)
return -1;
io__init(&io, io.fd, bf, sizeof(bf));
err = 0;
while (!feof(file)) {
u64 start;
int line_len, len;
while (!io.eof) {
__u64 start;
int ch;
size_t i;
char symbol_type;
char *symbol_name;
char symbol_name[KSYM_NAME_LEN + 1];
line_len = getline(&line, &n, file);
if (line_len < 0 || !line)
break;
line[--line_len] = '\0'; /* \n */
len = hex2u64(line, &start);
/* Skip the line if we failed to parse the address. */
if (!len)
if (io__get_hex(&io, &start) != ' ') {
read_to_eol(&io);
continue;
len++;
if (len + 2 >= line_len)
continue;
symbol_type = line[len];
len += 2;
symbol_name = line + len;
len = line_len - len;
if (len >= KSYM_NAME_LEN) {
err = -1;
break;
}
symbol_type = io__get_char(&io);
if (io__get_char(&io) != ' ') {
read_to_eol(&io);
continue;
}
for (i = 0; i < sizeof(symbol_name); i++) {
ch = io__get_char(&io);
if (ch < 0 || ch == '\n')
break;
symbol_name[i] = ch;
}
symbol_name[i] = '\0';
err = process_symbol(arg, symbol_name, symbol_type, start);
if (err)
break;
}
free(line);
fclose(file);
close(io.fd);
return err;
out_failure:
return -1;
}

View File

@ -18,8 +18,6 @@ static inline u8 kallsyms2elf_binding(char type)
return isupper(type) ? STB_GLOBAL : STB_LOCAL;
}
int hex2u64(const char *ptr, u64 *long_val);
u8 kallsyms2elf_type(char type);
bool kallsyms__is_function(char symbol_type);

View File

@ -438,7 +438,7 @@ void *kbuffer_translate_data(int swap, void *data, unsigned int *size)
case KBUFFER_TYPE_TIME_EXTEND:
case KBUFFER_TYPE_TIME_STAMP:
return NULL;
};
}
*size = length;

View File

@ -1958,7 +1958,8 @@ static char *op_to_str(struct tep_event_filter *filter, struct tep_filter_arg *a
default:
break;
}
asprintf(&str, val ? "TRUE" : "FALSE");
if (asprintf(&str, val ? "TRUE" : "FALSE") < 0)
str = NULL;
break;
}
}
@ -1976,7 +1977,8 @@ static char *op_to_str(struct tep_event_filter *filter, struct tep_filter_arg *a
break;
}
asprintf(&str, "(%s) %s (%s)", left, op, right);
if (asprintf(&str, "(%s) %s (%s)", left, op, right) < 0)
str = NULL;
break;
case TEP_FILTER_OP_NOT:
@ -1992,10 +1994,12 @@ static char *op_to_str(struct tep_event_filter *filter, struct tep_filter_arg *a
right_val = 0;
if (right_val >= 0) {
/* just return the opposite */
asprintf(&str, right_val ? "FALSE" : "TRUE");
if (asprintf(&str, right_val ? "FALSE" : "TRUE") < 0)
str = NULL;
break;
}
asprintf(&str, "%s(%s)", op, right);
if (asprintf(&str, "%s(%s)", op, right) < 0)
str = NULL;
break;
default:
@ -2011,7 +2015,8 @@ static char *val_to_str(struct tep_event_filter *filter, struct tep_filter_arg *
{
char *str = NULL;
asprintf(&str, "%lld", arg->value.val);
if (asprintf(&str, "%lld", arg->value.val) < 0)
str = NULL;
return str;
}
@ -2069,7 +2074,8 @@ static char *exp_to_str(struct tep_event_filter *filter, struct tep_filter_arg *
break;
}
asprintf(&str, "%s %s %s", lstr, op, rstr);
if (asprintf(&str, "%s %s %s", lstr, op, rstr) < 0)
str = NULL;
out:
free(lstr);
free(rstr);
@ -2113,7 +2119,8 @@ static char *num_to_str(struct tep_event_filter *filter, struct tep_filter_arg *
if (!op)
op = "<=";
asprintf(&str, "%s %s %s", lstr, op, rstr);
if (asprintf(&str, "%s %s %s", lstr, op, rstr) < 0)
str = NULL;
break;
default:
@ -2148,8 +2155,9 @@ static char *str_to_str(struct tep_event_filter *filter, struct tep_filter_arg *
if (!op)
op = "!~";
asprintf(&str, "%s %s \"%s\"",
arg->str.field->name, op, arg->str.val);
if (asprintf(&str, "%s %s \"%s\"",
arg->str.field->name, op, arg->str.val) < 0)
str = NULL;
break;
default:
@ -2165,7 +2173,8 @@ static char *arg_to_str(struct tep_event_filter *filter, struct tep_filter_arg *
switch (arg->type) {
case TEP_FILTER_ARG_BOOLEAN:
asprintf(&str, arg->boolean.value ? "TRUE" : "FALSE");
if (asprintf(&str, arg->boolean.value ? "TRUE" : "FALSE") < 0)
str = NULL;
return str;
case TEP_FILTER_ARG_OP:

View File

@ -48,7 +48,7 @@ man5dir=$(mandir)/man5
man7dir=$(mandir)/man7
ASCIIDOC=asciidoc
ASCIIDOC_EXTRA = --unsafe -f asciidoc.conf
ASCIIDOC_EXTRA += --unsafe -f asciidoc.conf
ASCIIDOC_HTML = xhtml11
MANPAGE_XSL = manpage-normal.xsl
XMLTO_EXTRA =
@ -59,7 +59,7 @@ HTML_REF = origin/html
ifdef USE_ASCIIDOCTOR
ASCIIDOC = asciidoctor
ASCIIDOC_EXTRA = -a compat-mode
ASCIIDOC_EXTRA += -a compat-mode
ASCIIDOC_EXTRA += -I. -rasciidoctor-extensions
ASCIIDOC_EXTRA += -a mansource="perf" -a manmanual="perf Manual"
ASCIIDOC_HTML = xhtml5

View File

@ -10,7 +10,9 @@
e synthesize error events
d create a debug log
g synthesize a call chain (use with i or x)
G synthesize a call chain on existing event records
l synthesize last branch entries (use with i or x)
L synthesize last branch entries on existing event records
s skip initial number of events
The default is all events i.e. the same as --itrace=ibxwpe,
@ -31,6 +33,10 @@
Also the number of last branch entries (default 64, max. 1024) for
instructions or transactions events can be specified.
Similar to options g and l, size may also be specified for options G and L.
On x86, note that G and L work poorly when data has been recorded with
large PEBS. Refer linkperf:perf-intel-pt[1] man page for details.
It is also possible to skip events generated (instructions, branches, transactions,
ptwrite, power) at the beginning. This is useful to ignore initialization code.

View File

@ -61,6 +61,9 @@ SUBSYSTEM
'epoll'::
Eventpoll (epoll) stressing benchmarks.
'internals'::
Benchmark internal perf functionality.
'all'::
All benchmark subsystems.
@ -214,6 +217,11 @@ Suite for evaluating concurrent epoll_wait calls.
*ctl*::
Suite for evaluating multiple epoll_ctl calls.
SUITES FOR 'internals'
~~~~~~~~~~~~~~~~~~~~~~
*synthesize*::
Suite for evaluating perf's event synthesis performance.
SEE ALSO
--------
linkperf:perf[1]

View File

@ -111,6 +111,17 @@ REPORT OPTIONS
--display::
Switch to HITM type (rmt, lcl) to display and sort on. Total HITMs as default.
--stitch-lbr::
Show callgraph with stitched LBRs, which may have more complete
callgraph. The perf.data file must have been obtained using
perf c2c record --call-graph lbr.
Disabled by default. In common cases with call stack overflows,
it can recreate better call stacks than the default lbr call stack
output. But this approach is not full proof. There can be cases
where it creates incorrect call stacks from incorrect matches.
The known limitations include exception handing such as
setjmp/longjmp will have calls/returns not match.
C2C RECORD
----------
The perf c2c record command setup options related to HITM cacheline analysis

View File

@ -69,22 +69,22 @@ And profiled with 'perf report' e.g.
To also trace kernel space presents a problem, namely kernel self-modifying
code. A fairly good kernel image is available in /proc/kcore but to get an
accurate image a copy of /proc/kcore needs to be made under the same conditions
as the data capture. A script perf-with-kcore can do that, but beware that the
script makes use of 'sudo' to copy /proc/kcore. If you have perf installed
locally from the source tree you can do:
as the data capture. 'perf record' can make a copy of /proc/kcore if the option
--kcore is used, but access to /proc/kcore is restricted e.g.
~/libexec/perf-core/perf-with-kcore record pt_ls -e intel_pt// -- ls
sudo perf record -o pt_ls --kcore -e intel_pt// -- ls
which will create a directory named 'pt_ls' and put the perf.data file and
copies of /proc/kcore, /proc/kallsyms and /proc/modules into it. Then to use
'perf report' becomes:
which will create a directory named 'pt_ls' and put the perf.data file (named
simply 'data') and copies of /proc/kcore, /proc/kallsyms and /proc/modules into
it. The other tools understand the directory format, so to use 'perf report'
becomes:
~/libexec/perf-core/perf-with-kcore report pt_ls
sudo perf report -i pt_ls
Because samples are synthesized after-the-fact, the sampling period can be
selected for reporting. e.g. sample every microsecond
~/libexec/perf-core/perf-with-kcore report pt_ls --itrace=i1usge
sudo perf report pt_ls --itrace=i1usge
See the sections below for more information about the --itrace option.
@ -821,7 +821,9 @@ The letters are:
e synthesize tracing error events
d create a debug log
g synthesize a call chain (use with i or x)
G synthesize a call chain on existing event records
l synthesize last branch entries (use with i or x)
L synthesize last branch entries on existing event records
s skip initial number of events
"Instructions" events look like they were recorded by "perf record -e
@ -912,6 +914,39 @@ transactions events can be specified. e.g.
Note that last branch entries are cleared for each sample, so there is no overlap
from one sample to the next.
The G and L options are designed in particular for sample mode, and work much
like g and l but add call chain and branch stack to the other selected events
instead of synthesized events. For example, to record branch-misses events for
'ls' and then add a call chain derived from the Intel PT trace:
perf record --aux-sample -e '{intel_pt//u,branch-misses:u}' -- ls
perf report --itrace=Ge
Although in fact G is a default for perf report, so that is the same as just:
perf report
One caveat with the G and L options is that they work poorly with "Large PEBS".
Large PEBS means PEBS records will be accumulated by hardware and the written
into the event buffer in one go. That reduces interrupts, but can give very
late timestamps. Because the Intel PT trace is synchronized by timestamps,
the PEBS events do not match the trace. Currently, Large PEBS is used only in
certain circumstances:
- hardware supports it
- PEBS is used
- event period is specified, instead of frequency
- the sample type is limited to the following flags:
PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR |
PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID |
PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER |
PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR |
PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER |
PERF_SAMPLE_PERIOD (and sometimes) | PERF_SAMPLE_TIME
Because Intel PT sample mode uses a different sample type to the list above,
Large PEBS is not used with Intel PT sample mode. To avoid Large PEBS in other
cases, avoid specifying the event period i.e. avoid the 'perf record' -c option,
--count option, or 'period' config term.
To disable trace decoding entirely, use the option --no-itrace.
It is also possible to skip events generated (instructions, branches, transactions)

View File

@ -115,6 +115,11 @@ raw encoding of 0x1A8 can be used:
perf stat -e r1a8 -a sleep 1
perf record -e r1a8 ...
It's also possible to use pmu syntax:
perf record -e r1a8 -a sleep 1
perf record -e cpu/r1a8/ ...
You should refer to the processor specific documentation for getting these
details. Some of them are referenced in the SEE ALSO section below.
@ -258,6 +263,9 @@ Normally all events in an event group sample, but with :S only
the first event (the leader) samples, and it only reads the values of the
other events in the group.
However, in the case AUX area events (e.g. Intel PT or CoreSight), the AUX
area event must be the leader, so then the second event samples, not the first.
OPTIONS
-------

View File

@ -556,6 +556,19 @@ overhead. You can still switch them on with:
--switch-output --no-no-buildid --no-no-buildid-cache
--switch-output-event::
Events that will cause the switch of the perf.data file, auto-selecting
--switch-output=signal, the results are similar as internally the side band
thread will also send a SIGUSR2 to the main one.
Uses the same syntax as --event, it will just not be recorded, serving only to
switch the perf.data file as soon as the --switch-output event is processed by
a separate sideband thread.
This sideband thread is also used to other purposes, like processing the
PERF_RECORD_BPF_EVENT records as they happen, asking the kernel for extra BPF
information, etc.
--switch-max-files=N::
When rotating perf.data with --switch-output, only keep N files.
@ -596,6 +609,10 @@ Make a copy of /proc/kcore and place it into a directory with the perf data file
Limit the sample data max size, <size> is expected to be a number with
appended unit character - B/K/M/G
--num-thread-synthesize::
The number of threads to run when synthesizing events for existing processes.
By default, the number of threads equals 1.
SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-list[1], linkperf:perf-intel-pt[1]

View File

@ -488,6 +488,17 @@ include::itrace.txt[]
This option extends the perf report to show reference callgraphs,
which collected by reference event, in no callgraph event.
--stitch-lbr::
Show callgraph with stitched LBRs, which may have more complete
callgraph. The perf.data file must have been obtained using
perf record --call-graph lbr.
Disabled by default. In common cases with call stack overflows,
it can recreate better call stacks than the default lbr call stack
output. But this approach is not full proof. There can be cases
where it creates incorrect call stacks from incorrect matches.
The known limitations include exception handing such as
setjmp/longjmp will have calls/returns not match.
--socket-filter::
Only report the samples on the processor socket that match with this filter

View File

@ -440,6 +440,17 @@ include::itrace.txt[]
--show-on-off-events::
Show the --switch-on/off events too.
--stitch-lbr::
Show callgraph with stitched LBRs, which may have more complete
callgraph. The perf.data file must have been obtained using
perf record --call-graph lbr.
Disabled by default. In common cases with call stack overflows,
it can recreate better call stacks than the default lbr call stack
output. But this approach is not full proof. There can be cases
where it creates incorrect call stacks from incorrect matches.
The known limitations include exception handing such as
setjmp/longjmp will have calls/returns not match.
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-script-perl[1],

View File

@ -176,6 +176,8 @@ Print count deltas every N milliseconds (minimum: 1ms)
The overhead percentage could be high in some cases, for instance with small, sub 100ms intervals. Use with caution.
example: 'perf stat -I 1000 -e cycles -a sleep 5'
If the metric exists, it is calculated by the counts generated in this interval and the metric is printed after #.
--interval-count times::
Print count deltas for fixed number of times.
This option should be used together with "-I" option.

View File

@ -319,6 +319,15 @@ Default is to monitor all CPUS.
go straight to the histogram browser, just like 'perf top' with no events
explicitely specified does.
--stitch-lbr::
Show callgraph with stitched LBRs, which may have more complete
callgraph. The option must be used with --call-graph lbr recording.
Disabled by default. In common cases with call stack overflows,
it can recreate better call stacks than the default lbr call stack
output. But this approach is not full proof. There can be cases
where it creates incorrect call stacks from incorrect matches.
The known limitations include exception handing such as
setjmp/longjmp will have calls/returns not match.
INTERACTIVE PROMPTING KEYS
--------------------------

View File

@ -373,6 +373,22 @@ struct {
Indicates that trace contains records of PERF_RECORD_COMPRESSED type
that have perf_events records in compressed form.
HEADER_CPU_PMU_CAPS = 28,
A list of cpu PMU capabilities. The format of data is as below.
struct {
u32 nr_cpu_pmu_caps;
{
char name[];
char value[];
} [nr_cpu_pmu_caps]
};
Example:
cpu pmu capabilities: branches=32, max_precise=3, pmu_name=icelake
other bits are reserved and should ignored for now
HEADER_FEAT_BITS = 256,

View File

@ -188,7 +188,7 @@ AWK = awk
# non-config cases
config := 1
NON_CONFIG_TARGETS := clean python-clean TAGS tags cscope help install-doc install-man install-html install-info install-pdf doc man html info pdf
NON_CONFIG_TARGETS := clean python-clean TAGS tags cscope help
ifdef MAKECMDGOALS
ifeq ($(filter-out $(NON_CONFIG_TARGETS),$(MAKECMDGOALS)),)
@ -832,7 +832,7 @@ INSTALL_DOC_TARGETS += quick-install-doc quick-install-man quick-install-html
# 'make doc' should call 'make -C Documentation all'
$(DOC_TARGETS):
$(Q)$(MAKE) -C $(DOC_DIR) O=$(OUTPUT) $(@:doc=all)
$(Q)$(MAKE) -C $(DOC_DIR) O=$(OUTPUT) $(@:doc=all) ASCIIDOC_EXTRA=$(ASCIIDOC_EXTRA)
TAG_FOLDERS= . ../lib ../include
TAG_FILES= ../../include/uapi/linux/perf_event.h
@ -959,7 +959,7 @@ install-python_ext:
# 'make install-doc' should call 'make -C Documentation install'
$(INSTALL_DOC_TARGETS):
$(Q)$(MAKE) -C $(DOC_DIR) O=$(OUTPUT) $(@:-doc=)
$(Q)$(MAKE) -C $(DOC_DIR) O=$(OUTPUT) $(@:-doc=) ASCIIDOC_EXTRA=$(ASCIIDOC_EXTRA)
### Cleaning rules

View File

@ -23,6 +23,7 @@
#include "../../util/event.h"
#include "../../util/evlist.h"
#include "../../util/evsel.h"
#include "../../util/perf_api_probe.h"
#include "../../util/evsel_config.h"
#include "../../util/pmu.h"
#include "../../util/cs-etm.h"
@ -232,7 +233,7 @@ static int cs_etm_set_sink_attr(struct perf_pmu *pmu,
ret = perf_pmu__scan_file(pmu, path, "%x", &hash);
if (ret != 1) {
pr_err("failed to set sink \"%s\" on event %s with %d (%s)\n",
sink, perf_evsel__name(evsel), errno,
sink, evsel__name(evsel), errno,
str_error_r(errno, msg, sizeof(msg)));
return ret;
}
@ -401,7 +402,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
* when a context switch happened.
*/
if (!perf_cpu_map__empty(cpus)) {
perf_evsel__set_sample_bit(cs_etm_evsel, CPU);
evsel__set_sample_bit(cs_etm_evsel, CPU);
err = cs_etm_set_option(itr, cs_etm_evsel,
ETM_OPT_CTXTID | ETM_OPT_TS);
@ -425,7 +426,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
/* In per-cpu case, always need the time of mmap events etc */
if (!perf_cpu_map__empty(cpus))
perf_evsel__set_sample_bit(tracking_evsel, TIME);
evsel__set_sample_bit(tracking_evsel, TIME);
}
out:

View File

@ -120,9 +120,9 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
*/
perf_evlist__to_front(evlist, arm_spe_evsel);
perf_evsel__set_sample_bit(arm_spe_evsel, CPU);
perf_evsel__set_sample_bit(arm_spe_evsel, TIME);
perf_evsel__set_sample_bit(arm_spe_evsel, TID);
evsel__set_sample_bit(arm_spe_evsel, CPU);
evsel__set_sample_bit(arm_spe_evsel, TIME);
evsel__set_sample_bit(arm_spe_evsel, TID);
/* Add dummy event to keep tracking */
err = parse_events(evlist, "dummy:u", NULL);
@ -134,9 +134,9 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
tracking_evsel->core.attr.freq = 0;
tracking_evsel->core.attr.sample_period = 1;
perf_evsel__set_sample_bit(tracking_evsel, TIME);
perf_evsel__set_sample_bit(tracking_evsel, CPU);
perf_evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK);
evsel__set_sample_bit(tracking_evsel, TIME);
evsel__set_sample_bit(tracking_evsel, CPU);
evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK);
return 0;
}

View File

@ -7,6 +7,8 @@
#include <string.h>
#include <linux/stringify.h>
#include "header.h"
#include "metricgroup.h"
#include <api/fs/fs.h>
#define mfspr(rn) ({unsigned long rval; \
asm volatile("mfspr %0," __stringify(rn) \
@ -44,3 +46,9 @@ get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
return bufp;
}
int arch_get_runtimeparam(void)
{
int count;
return sysfs__read_int("/devices/hv_24x7/interface/sockets", &count) < 0 ? 1 : count;
}

View File

@ -39,7 +39,7 @@ static void hcall_event_get_key(struct evsel *evsel,
struct event_key *key)
{
key->info = 0;
key->key = perf_evsel__intval(evsel, sample, "req");
key->key = evsel__intval(evsel, sample, "req");
}
static const char *get_hcall_exit_reason(u64 exit_code)

View File

@ -30,7 +30,7 @@ static void event_icpt_insn_get_key(struct evsel *evsel,
{
unsigned long insn;
insn = perf_evsel__intval(evsel, sample, "instruction");
insn = evsel__intval(evsel, sample, "instruction");
key->key = icpt_insn_decoder(insn);
key->exit_reasons = sie_icpt_insn_codes;
}
@ -39,7 +39,7 @@ static void event_sigp_get_key(struct evsel *evsel,
struct perf_sample *sample,
struct event_key *key)
{
key->key = perf_evsel__intval(evsel, sample, "order_code");
key->key = evsel__intval(evsel, sample, "order_code");
key->exit_reasons = sie_sigp_order_codes;
}
@ -47,7 +47,7 @@ static void event_diag_get_key(struct evsel *evsel,
struct perf_sample *sample,
struct event_key *key)
{
key->key = perf_evsel__intval(evsel, sample, "code");
key->key = evsel__intval(evsel, sample, "code");
key->exit_reasons = sie_diagnose_codes;
}
@ -55,7 +55,7 @@ static void event_icpt_prog_get_key(struct evsel *evsel,
struct perf_sample *sample,
struct event_key *key)
{
key->key = perf_evsel__intval(evsel, sample, "code");
key->key = evsel__intval(evsel, sample, "code");
key->exit_reasons = sie_icpt_prog_codes;
}

View File

@ -130,13 +130,11 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
goto next_event;
if (strcmp(event->comm.comm, comm1) == 0) {
CHECK__(perf_evsel__parse_sample(evsel, event,
&sample));
CHECK__(evsel__parse_sample(evsel, event, &sample));
comm1_time = sample.time;
}
if (strcmp(event->comm.comm, comm2) == 0) {
CHECK__(perf_evsel__parse_sample(evsel, event,
&sample));
CHECK__(evsel__parse_sample(evsel, event, &sample));
comm2_time = sample.time;
}
next_event:

View File

@ -224,7 +224,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
* AUX event.
*/
if (!perf_cpu_map__empty(cpus))
perf_evsel__set_sample_bit(intel_bts_evsel, CPU);
evsel__set_sample_bit(intel_bts_evsel, CPU);
}
/* Add dummy event to keep tracking */

View File

@ -25,6 +25,7 @@
#include "../../../util/pmu.h"
#include "../../../util/debug.h"
#include "../../../util/auxtrace.h"
#include "../../../util/perf_api_probe.h"
#include "../../../util/record.h"
#include "../../../util/target.h"
#include "../../../util/tsc.h"
@ -420,8 +421,8 @@ static int intel_pt_track_switches(struct evlist *evlist)
evsel = evlist__last(evlist);
perf_evsel__set_sample_bit(evsel, CPU);
perf_evsel__set_sample_bit(evsel, TIME);
evsel__set_sample_bit(evsel, CPU);
evsel__set_sample_bit(evsel, TIME);
evsel->core.system_wide = true;
evsel->no_aux_samples = true;
@ -801,10 +802,10 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
switch_evsel->no_aux_samples = true;
switch_evsel->immediate = true;
perf_evsel__set_sample_bit(switch_evsel, TID);
perf_evsel__set_sample_bit(switch_evsel, TIME);
perf_evsel__set_sample_bit(switch_evsel, CPU);
perf_evsel__reset_sample_bit(switch_evsel, BRANCH_STACK);
evsel__set_sample_bit(switch_evsel, TID);
evsel__set_sample_bit(switch_evsel, TIME);
evsel__set_sample_bit(switch_evsel, CPU);
evsel__reset_sample_bit(switch_evsel, BRANCH_STACK);
opts->record_switch_events = false;
ptr->have_sched_switch = 3;
@ -838,7 +839,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
* AUX event.
*/
if (!perf_cpu_map__empty(cpus))
perf_evsel__set_sample_bit(intel_pt_evsel, CPU);
evsel__set_sample_bit(intel_pt_evsel, CPU);
}
/* Add dummy event to keep tracking */
@ -862,11 +863,11 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
/* In per-cpu case, always need the time of mmap events etc */
if (!perf_cpu_map__empty(cpus)) {
perf_evsel__set_sample_bit(tracking_evsel, TIME);
evsel__set_sample_bit(tracking_evsel, TIME);
/* And the CPU for switch events */
perf_evsel__set_sample_bit(tracking_evsel, CPU);
evsel__set_sample_bit(tracking_evsel, CPU);
}
perf_evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK);
evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK);
}
/*

View File

@ -31,8 +31,8 @@ const char *kvm_exit_trace = "kvm:kvm_exit";
static void mmio_event_get_key(struct evsel *evsel, struct perf_sample *sample,
struct event_key *key)
{
key->key = perf_evsel__intval(evsel, sample, "gpa");
key->info = perf_evsel__intval(evsel, sample, "type");
key->key = evsel__intval(evsel, sample, "gpa");
key->info = evsel__intval(evsel, sample, "type");
}
#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
@ -48,7 +48,7 @@ static bool mmio_event_begin(struct evsel *evsel,
/* MMIO write begin event in kernel. */
if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) {
evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) {
mmio_event_get_key(evsel, sample, key);
return true;
}
@ -65,7 +65,7 @@ static bool mmio_event_end(struct evsel *evsel, struct perf_sample *sample,
/* MMIO read end event in kernel.*/
if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) {
evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) {
mmio_event_get_key(evsel, sample, key);
return true;
}
@ -94,8 +94,8 @@ static void ioport_event_get_key(struct evsel *evsel,
struct perf_sample *sample,
struct event_key *key)
{
key->key = perf_evsel__intval(evsel, sample, "port");
key->info = perf_evsel__intval(evsel, sample, "rw");
key->key = evsel__intval(evsel, sample, "port");
key->info = evsel__intval(evsel, sample, "rw");
}
static bool ioport_event_begin(struct evsel *evsel,

View File

@ -6,9 +6,10 @@ perf-y += futex-wake.o
perf-y += futex-wake-parallel.o
perf-y += futex-requeue.o
perf-y += futex-lock-pi.o
perf-y += epoll-wait.o
perf-y += epoll-ctl.o
perf-y += synthesize.o
perf-y += kallsyms-parse.o
perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-lib.o
perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-asm.o

View File

@ -41,9 +41,10 @@ int bench_futex_wake_parallel(int argc, const char **argv);
int bench_futex_requeue(int argc, const char **argv);
/* pi futexes */
int bench_futex_lock_pi(int argc, const char **argv);
int bench_epoll_wait(int argc, const char **argv);
int bench_epoll_ctl(int argc, const char **argv);
int bench_synthesize(int argc, const char **argv);
int bench_kallsyms_parse(int argc, const char **argv);
#define BENCH_FORMAT_DEFAULT_STR "default"
#define BENCH_FORMAT_DEFAULT 0

View File

@ -519,7 +519,8 @@ int bench_epoll_wait(int argc, const char **argv)
qsort(worker, nthreads, sizeof(struct worker), cmpworker);
for (i = 0; i < nthreads; i++) {
unsigned long t = worker[i].ops / bench__runtime.tv_sec;
unsigned long t = bench__runtime.tv_sec > 0 ?
worker[i].ops / bench__runtime.tv_sec : 0;
update_stats(&throughput_stats, t);

View File

@ -205,7 +205,8 @@ int bench_futex_hash(int argc, const char **argv)
pthread_mutex_destroy(&thread_lock);
for (i = 0; i < nthreads; i++) {
unsigned long t = worker[i].ops / bench__runtime.tv_sec;
unsigned long t = bench__runtime.tv_sec > 0 ?
worker[i].ops / bench__runtime.tv_sec : 0;
update_stats(&throughput_stats, t);
if (!silent) {
if (nfutexes == 1)

View File

@ -211,7 +211,8 @@ int bench_futex_lock_pi(int argc, const char **argv)
pthread_mutex_destroy(&thread_lock);
for (i = 0; i < nthreads; i++) {
unsigned long t = worker[i].ops / bench__runtime.tv_sec;
unsigned long t = bench__runtime.tv_sec > 0 ?
worker[i].ops / bench__runtime.tv_sec : 0;
update_stats(&throughput_stats, t);
if (!silent)

View File

@ -0,0 +1,75 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Benchmark of /proc/kallsyms parsing.
*
* Copyright 2020 Google LLC.
*/
#include <stdlib.h>
#include "bench.h"
#include "../util/stat.h"
#include <linux/time64.h>
#include <subcmd/parse-options.h>
#include <symbol/kallsyms.h>
static unsigned int iterations = 100;
static const struct option options[] = {
OPT_UINTEGER('i', "iterations", &iterations,
"Number of iterations used to compute average"),
OPT_END()
};
static const char *const bench_usage[] = {
"perf bench internals kallsyms-parse <options>",
NULL
};
static int bench_process_symbol(void *arg __maybe_unused,
const char *name __maybe_unused,
char type __maybe_unused,
u64 start __maybe_unused)
{
return 0;
}
static int do_kallsyms_parse(void)
{
struct timeval start, end, diff;
u64 runtime_us;
unsigned int i;
double time_average, time_stddev;
int err;
struct stats time_stats;
init_stats(&time_stats);
for (i = 0; i < iterations; i++) {
gettimeofday(&start, NULL);
err = kallsyms__parse("/proc/kallsyms", NULL,
bench_process_symbol);
if (err)
return err;
gettimeofday(&end, NULL);
timersub(&end, &start, &diff);
runtime_us = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
update_stats(&time_stats, runtime_us);
}
time_average = avg_stats(&time_stats) / USEC_PER_MSEC;
time_stddev = stddev_stats(&time_stats) / USEC_PER_MSEC;
printf(" Average kallsyms__parse took: %.3f ms (+- %.3f ms)\n",
time_average, time_stddev);
return 0;
}
int bench_kallsyms_parse(int argc, const char **argv)
{
argc = parse_options(argc, argv, options, bench_usage, 0);
if (argc) {
usage_with_options(bench_usage, options);
exit(EXIT_FAILURE);
}
return do_kallsyms_parse();
}

View File

@ -0,0 +1,262 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Benchmark synthesis of perf events such as at the start of a 'perf
* record'. Synthesis is done on the current process and the 'dummy' event
* handlers are invoked that support dump_trace but otherwise do nothing.
*
* Copyright 2019 Google LLC.
*/
#include <stdio.h>
#include "bench.h"
#include "../util/debug.h"
#include "../util/session.h"
#include "../util/stat.h"
#include "../util/synthetic-events.h"
#include "../util/target.h"
#include "../util/thread_map.h"
#include "../util/tool.h"
#include "../util/util.h"
#include <linux/atomic.h>
#include <linux/err.h>
#include <linux/time64.h>
#include <subcmd/parse-options.h>
static unsigned int min_threads = 1;
static unsigned int max_threads = UINT_MAX;
static unsigned int single_iterations = 10000;
static unsigned int multi_iterations = 10;
static bool run_st;
static bool run_mt;
static const struct option options[] = {
OPT_BOOLEAN('s', "st", &run_st, "Run single threaded benchmark"),
OPT_BOOLEAN('t', "mt", &run_mt, "Run multi-threaded benchmark"),
OPT_UINTEGER('m', "min-threads", &min_threads,
"Minimum number of threads in multithreaded bench"),
OPT_UINTEGER('M', "max-threads", &max_threads,
"Maximum number of threads in multithreaded bench"),
OPT_UINTEGER('i', "single-iterations", &single_iterations,
"Number of iterations used to compute single-threaded average"),
OPT_UINTEGER('I', "multi-iterations", &multi_iterations,
"Number of iterations used to compute multi-threaded average"),
OPT_END()
};
static const char *const bench_usage[] = {
"perf bench internals synthesize <options>",
NULL
};
static atomic_t event_count;
static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
atomic_inc(&event_count);
return 0;
}
static int do_run_single_threaded(struct perf_session *session,
struct perf_thread_map *threads,
struct target *target, bool data_mmap)
{
const unsigned int nr_threads_synthesize = 1;
struct timeval start, end, diff;
u64 runtime_us;
unsigned int i;
double time_average, time_stddev, event_average, event_stddev;
int err;
struct stats time_stats, event_stats;
init_stats(&time_stats);
init_stats(&event_stats);
for (i = 0; i < single_iterations; i++) {
atomic_set(&event_count, 0);
gettimeofday(&start, NULL);
err = __machine__synthesize_threads(&session->machines.host,
NULL,
target, threads,
process_synthesized_event,
data_mmap,
nr_threads_synthesize);
if (err)
return err;
gettimeofday(&end, NULL);
timersub(&end, &start, &diff);
runtime_us = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
update_stats(&time_stats, runtime_us);
update_stats(&event_stats, atomic_read(&event_count));
}
time_average = avg_stats(&time_stats);
time_stddev = stddev_stats(&time_stats);
printf(" Average %ssynthesis took: %.3f usec (+- %.3f usec)\n",
data_mmap ? "data " : "", time_average, time_stddev);
event_average = avg_stats(&event_stats);
event_stddev = stddev_stats(&event_stats);
printf(" Average num. events: %.3f (+- %.3f)\n",
event_average, event_stddev);
printf(" Average time per event %.3f usec\n",
time_average / event_average);
return 0;
}
static int run_single_threaded(void)
{
struct perf_session *session;
struct target target = {
.pid = "self",
};
struct perf_thread_map *threads;
int err;
perf_set_singlethreaded();
session = perf_session__new(NULL, false, NULL);
if (IS_ERR(session)) {
pr_err("Session creation failed.\n");
return PTR_ERR(session);
}
threads = thread_map__new_by_pid(getpid());
if (!threads) {
pr_err("Thread map creation failed.\n");
err = -ENOMEM;
goto err_out;
}
puts(
"Computing performance of single threaded perf event synthesis by\n"
"synthesizing events on the perf process itself:");
err = do_run_single_threaded(session, threads, &target, false);
if (err)
goto err_out;
err = do_run_single_threaded(session, threads, &target, true);
err_out:
if (threads)
perf_thread_map__put(threads);
perf_session__delete(session);
return err;
}
static int do_run_multi_threaded(struct target *target,
unsigned int nr_threads_synthesize)
{
struct timeval start, end, diff;
u64 runtime_us;
unsigned int i;
double time_average, time_stddev, event_average, event_stddev;
int err;
struct stats time_stats, event_stats;
struct perf_session *session;
init_stats(&time_stats);
init_stats(&event_stats);
for (i = 0; i < multi_iterations; i++) {
session = perf_session__new(NULL, false, NULL);
if (!session)
return -ENOMEM;
atomic_set(&event_count, 0);
gettimeofday(&start, NULL);
err = __machine__synthesize_threads(&session->machines.host,
NULL,
target, NULL,
process_synthesized_event,
false,
nr_threads_synthesize);
if (err) {
perf_session__delete(session);
return err;
}
gettimeofday(&end, NULL);
timersub(&end, &start, &diff);
runtime_us = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
update_stats(&time_stats, runtime_us);
update_stats(&event_stats, atomic_read(&event_count));
perf_session__delete(session);
}
time_average = avg_stats(&time_stats);
time_stddev = stddev_stats(&time_stats);
printf(" Average synthesis took: %.3f usec (+- %.3f usec)\n",
time_average, time_stddev);
event_average = avg_stats(&event_stats);
event_stddev = stddev_stats(&event_stats);
printf(" Average num. events: %.3f (+- %.3f)\n",
event_average, event_stddev);
printf(" Average time per event %.3f usec\n",
time_average / event_average);
return 0;
}
static int run_multi_threaded(void)
{
struct target target = {
.cpu_list = "0"
};
unsigned int nr_threads_synthesize;
int err;
if (max_threads == UINT_MAX)
max_threads = sysconf(_SC_NPROCESSORS_ONLN);
puts(
"Computing performance of multi threaded perf event synthesis by\n"
"synthesizing events on CPU 0:");
for (nr_threads_synthesize = min_threads;
nr_threads_synthesize <= max_threads;
nr_threads_synthesize++) {
if (nr_threads_synthesize == 1)
perf_set_singlethreaded();
else
perf_set_multithreaded();
printf(" Number of synthesis threads: %u\n",
nr_threads_synthesize);
err = do_run_multi_threaded(&target, nr_threads_synthesize);
if (err)
return err;
}
perf_set_singlethreaded();
return 0;
}
int bench_synthesize(int argc, const char **argv)
{
int err = 0;
argc = parse_options(argc, argv, options, bench_usage, 0);
if (argc) {
usage_with_options(bench_usage, options);
exit(EXIT_FAILURE);
}
/*
* If neither single threaded or multi-threaded are specified, default
* to running just single threaded.
*/
if (!run_st && !run_mt)
run_st = true;
if (run_st)
err = run_single_threaded();
if (!err && run_mt)
err = run_multi_threaded();
return err;
}

View File

@ -212,11 +212,9 @@ static bool has_annotation(struct perf_annotate *ann)
return ui__has_annotation() || ann->use_stdio2;
}
static int perf_evsel__add_sample(struct evsel *evsel,
struct perf_sample *sample,
struct addr_location *al,
struct perf_annotate *ann,
struct machine *machine)
static int evsel__add_sample(struct evsel *evsel, struct perf_sample *sample,
struct addr_location *al, struct perf_annotate *ann,
struct machine *machine)
{
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
@ -278,7 +276,7 @@ static int process_sample_event(struct perf_tool *tool,
goto out_put;
if (!al.filtered &&
perf_evsel__add_sample(evsel, sample, &al, ann, machine)) {
evsel__add_sample(evsel, sample, &al, ann, machine)) {
pr_warning("problem incrementing symbol count, "
"skipping event\n");
ret = -1;
@ -433,11 +431,10 @@ static int __cmd_annotate(struct perf_annotate *ann)
total_nr_samples += nr_samples;
hists__collapse_resort(hists, NULL);
/* Don't sort callchain */
perf_evsel__reset_sample_bit(pos, CALLCHAIN);
evsel__reset_sample_bit(pos, CALLCHAIN);
perf_evsel__output_resort(pos, NULL);
if (symbol_conf.event_group &&
!perf_evsel__is_group_leader(pos))
if (symbol_conf.event_group && !evsel__is_group_leader(pos))
continue;
hists__find_annotations(hists, pos, ann);

View File

@ -76,6 +76,12 @@ static struct bench epoll_benchmarks[] = {
};
#endif // HAVE_EVENTFD
static struct bench internals_benchmarks[] = {
{ "synthesize", "Benchmark perf event synthesis", bench_synthesize },
{ "kallsyms-parse", "Benchmark kallsyms parsing", bench_kallsyms_parse },
{ NULL, NULL, NULL }
};
struct collection {
const char *name;
const char *summary;
@ -92,6 +98,7 @@ static struct collection collections[] = {
#ifdef HAVE_EVENTFD
{"epoll", "Epoll stressing benchmarks", epoll_benchmarks },
#endif
{ "internals", "Perf-internals benchmarks", internals_benchmarks },
{ "all", "All benchmarks", NULL },
{ NULL, NULL, NULL }
};

View File

@ -95,6 +95,7 @@ struct perf_c2c {
bool use_stdio;
bool stats_only;
bool symbol_full;
bool stitch_lbr;
/* HITM shared clines stats */
struct c2c_stats hitm_stats;
@ -273,6 +274,9 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
return -1;
}
if (c2c.stitch_lbr)
al.thread->lbr_stitch_enable = true;
ret = sample__resolve_callchain(sample, &callchain_cursor, NULL,
evsel, &al, sysctl_perf_event_max_stack);
if (ret)
@ -1705,7 +1709,7 @@ static struct c2c_dimension *get_dimension(const char *name)
if (!strcmp(dim->name, name))
return dim;
};
}
return NULL;
}
@ -1921,7 +1925,7 @@ static bool he__display(struct hist_entry *he, struct c2c_stats *stats)
FILTER_HITM(tot_hitm);
default:
break;
};
}
#undef FILTER_HITM
@ -2255,8 +2259,7 @@ static void print_c2c_info(FILE *out, struct perf_session *session)
fprintf(out, "=================================================\n");
evlist__for_each_entry(evlist, evsel) {
fprintf(out, "%-36s: %s\n", first ? " Events" : "",
perf_evsel__name(evsel));
fprintf(out, "%-36s: %s\n", first ? " Events" : "", evsel__name(evsel));
first = false;
}
fprintf(out, " Cachelines sort on : %s HITMs\n",
@ -2601,6 +2604,12 @@ static int setup_callchain(struct evlist *evlist)
}
}
if (c2c.stitch_lbr && (mode != CALLCHAIN_LBR)) {
ui__warning("Can't find LBR callchain. Switch off --stitch-lbr.\n"
"Please apply --call-graph lbr when recording.\n");
c2c.stitch_lbr = false;
}
callchain_param.record_mode = mode;
callchain_param.min_percent = 0;
return 0;
@ -2752,6 +2761,8 @@ static int perf_c2c__report(int argc, const char **argv)
OPT_STRING('c', "coalesce", &coalesce, "coalesce fields",
"coalesce fields: pid,tid,iaddr,dso"),
OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
OPT_BOOLEAN(0, "stitch-lbr", &c2c.stitch_lbr,
"Enable LBR callgraph stitching approach"),
OPT_PARENT(c2c_options),
OPT_END()
};
@ -2947,7 +2958,7 @@ static int perf_c2c__record(int argc, const char **argv)
rec_argv[i++] = "-e";
rec_argv[i++] = perf_mem_events__name(j);
};
}
if (all_user)
rec_argv[i++] = "--all-user";

View File

@ -467,7 +467,7 @@ static struct evsel *evsel_match(struct evsel *evsel,
struct evsel *e;
evlist__for_each_entry(evlist, e) {
if (perf_evsel__match2(evsel, e))
if (evsel__match2(evsel, e))
return e;
}
@ -981,7 +981,7 @@ static void data_process(void)
if (!quiet) {
fprintf(stdout, "%s# Event '%s'\n#\n", first ? "" : "\n",
perf_evsel__name(evsel_base));
evsel__name(evsel_base));
}
first = false;
@ -990,7 +990,7 @@ static void data_process(void)
data__fprintf();
/* Don't sort callchain for perf diff */
perf_evsel__reset_sample_bit(evsel_base, CALLCHAIN);
evsel__reset_sample_bit(evsel_base, CALLCHAIN);
hists__process(hists_base);
}
@ -1562,7 +1562,7 @@ hpp__entry_pair(struct hist_entry *he, struct hist_entry *pair,
default:
BUG_ON(1);
};
}
}
static void

View File

@ -284,10 +284,11 @@ static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
.events = POLLIN,
};
if (!perf_cap__capable(CAP_SYS_ADMIN)) {
if (!(perf_cap__capable(CAP_PERFMON) ||
perf_cap__capable(CAP_SYS_ADMIN))) {
pr_err("ftrace only works for %s!\n",
#ifdef HAVE_LIBCAP_SUPPORT
"users with the SYS_ADMIN capability"
"users with the CAP_PERFMON or CAP_SYS_ADMIN capability"
#else
"root"
#endif

View File

@ -536,7 +536,7 @@ static int perf_inject__sched_stat(struct perf_tool *tool,
union perf_event *event_sw;
struct perf_sample sample_sw;
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
u32 pid = perf_evsel__intval(evsel, sample, "pid");
u32 pid = evsel__intval(evsel, sample, "pid");
list_for_each_entry(ent, &inject->samples, node) {
if (pid == ent->tid)
@ -546,7 +546,7 @@ static int perf_inject__sched_stat(struct perf_tool *tool,
return 0;
found:
event_sw = &ent->event[0];
perf_evsel__parse_sample(evsel, event_sw, &sample_sw);
evsel__parse_sample(evsel, event_sw, &sample_sw);
sample_sw.period = sample->period;
sample_sw.time = sample->time;
@ -561,11 +561,10 @@ static void sig_handler(int sig __maybe_unused)
session_done = 1;
}
static int perf_evsel__check_stype(struct evsel *evsel,
u64 sample_type, const char *sample_msg)
static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg)
{
struct perf_event_attr *attr = &evsel->core.attr;
const char *name = perf_evsel__name(evsel);
const char *name = evsel__name(evsel);
if (!(attr->sample_type & sample_type)) {
pr_err("Samples for %s event do not have %s attribute set.",
@ -622,10 +621,10 @@ static int __cmd_inject(struct perf_inject *inject)
struct evsel *evsel;
evlist__for_each_entry(session->evlist, evsel) {
const char *name = perf_evsel__name(evsel);
const char *name = evsel__name(evsel);
if (!strcmp(name, "sched:sched_switch")) {
if (perf_evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID"))
if (evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID"))
return -EINVAL;
evsel->handler = perf_inject__sched_switch;
@ -684,14 +683,14 @@ static int __cmd_inject(struct perf_inject *inject)
perf_header__clear_feat(&session->header,
HEADER_AUXTRACE);
if (inject->itrace_synth_opts.last_branch)
if (inject->itrace_synth_opts.last_branch ||
inject->itrace_synth_opts.add_last_branch)
perf_header__set_feat(&session->header,
HEADER_BRANCH_STACK);
evsel = perf_evlist__id2evsel_strict(session->evlist,
inject->aux_id);
if (evsel) {
pr_debug("Deleting %s\n",
perf_evsel__name(evsel));
pr_debug("Deleting %s\n", evsel__name(evsel));
evlist__remove(session->evlist, evsel);
evsel__delete(evsel);
}

View File

@ -169,13 +169,12 @@ static int insert_caller_stat(unsigned long call_site,
return 0;
}
static int perf_evsel__process_alloc_event(struct evsel *evsel,
struct perf_sample *sample)
static int evsel__process_alloc_event(struct evsel *evsel, struct perf_sample *sample)
{
unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"),
call_site = perf_evsel__intval(evsel, sample, "call_site");
int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"),
bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc");
unsigned long ptr = evsel__intval(evsel, sample, "ptr"),
call_site = evsel__intval(evsel, sample, "call_site");
int bytes_req = evsel__intval(evsel, sample, "bytes_req"),
bytes_alloc = evsel__intval(evsel, sample, "bytes_alloc");
if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
insert_caller_stat(call_site, bytes_req, bytes_alloc))
@ -188,14 +187,13 @@ static int perf_evsel__process_alloc_event(struct evsel *evsel,
return 0;
}
static int perf_evsel__process_alloc_node_event(struct evsel *evsel,
struct perf_sample *sample)
static int evsel__process_alloc_node_event(struct evsel *evsel, struct perf_sample *sample)
{
int ret = perf_evsel__process_alloc_event(evsel, sample);
int ret = evsel__process_alloc_event(evsel, sample);
if (!ret) {
int node1 = cpu__get_node(sample->cpu),
node2 = perf_evsel__intval(evsel, sample, "node");
node2 = evsel__intval(evsel, sample, "node");
if (node1 != node2)
nr_cross_allocs++;
@ -232,10 +230,9 @@ static struct alloc_stat *search_alloc_stat(unsigned long ptr,
return NULL;
}
static int perf_evsel__process_free_event(struct evsel *evsel,
struct perf_sample *sample)
static int evsel__process_free_event(struct evsel *evsel, struct perf_sample *sample)
{
unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr");
unsigned long ptr = evsel__intval(evsel, sample, "ptr");
struct alloc_stat *s_alloc, *s_caller;
s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
@ -784,13 +781,12 @@ static int parse_gfp_flags(struct evsel *evsel, struct perf_sample *sample,
return 0;
}
static int perf_evsel__process_page_alloc_event(struct evsel *evsel,
struct perf_sample *sample)
static int evsel__process_page_alloc_event(struct evsel *evsel, struct perf_sample *sample)
{
u64 page;
unsigned int order = perf_evsel__intval(evsel, sample, "order");
unsigned int gfp_flags = perf_evsel__intval(evsel, sample, "gfp_flags");
unsigned int migrate_type = perf_evsel__intval(evsel, sample,
unsigned int order = evsel__intval(evsel, sample, "order");
unsigned int gfp_flags = evsel__intval(evsel, sample, "gfp_flags");
unsigned int migrate_type = evsel__intval(evsel, sample,
"migratetype");
u64 bytes = kmem_page_size << order;
u64 callsite;
@ -802,9 +798,9 @@ static int perf_evsel__process_page_alloc_event(struct evsel *evsel,
};
if (use_pfn)
page = perf_evsel__intval(evsel, sample, "pfn");
page = evsel__intval(evsel, sample, "pfn");
else
page = perf_evsel__intval(evsel, sample, "page");
page = evsel__intval(evsel, sample, "page");
nr_page_allocs++;
total_page_alloc_bytes += bytes;
@ -857,11 +853,10 @@ static int perf_evsel__process_page_alloc_event(struct evsel *evsel,
return 0;
}
static int perf_evsel__process_page_free_event(struct evsel *evsel,
struct perf_sample *sample)
static int evsel__process_page_free_event(struct evsel *evsel, struct perf_sample *sample)
{
u64 page;
unsigned int order = perf_evsel__intval(evsel, sample, "order");
unsigned int order = evsel__intval(evsel, sample, "order");
u64 bytes = kmem_page_size << order;
struct page_stat *pstat;
struct page_stat this = {
@ -869,9 +864,9 @@ static int perf_evsel__process_page_free_event(struct evsel *evsel,
};
if (use_pfn)
page = perf_evsel__intval(evsel, sample, "pfn");
page = evsel__intval(evsel, sample, "pfn");
else
page = perf_evsel__intval(evsel, sample, "page");
page = evsel__intval(evsel, sample, "page");
nr_page_frees++;
total_page_free_bytes += bytes;
@ -1371,15 +1366,15 @@ static int __cmd_kmem(struct perf_session *session)
struct evsel *evsel;
const struct evsel_str_handler kmem_tracepoints[] = {
/* slab allocator */
{ "kmem:kmalloc", perf_evsel__process_alloc_event, },
{ "kmem:kmem_cache_alloc", perf_evsel__process_alloc_event, },
{ "kmem:kmalloc_node", perf_evsel__process_alloc_node_event, },
{ "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, },
{ "kmem:kfree", perf_evsel__process_free_event, },
{ "kmem:kmem_cache_free", perf_evsel__process_free_event, },
{ "kmem:kmalloc", evsel__process_alloc_event, },
{ "kmem:kmem_cache_alloc", evsel__process_alloc_event, },
{ "kmem:kmalloc_node", evsel__process_alloc_node_event, },
{ "kmem:kmem_cache_alloc_node", evsel__process_alloc_node_event, },
{ "kmem:kfree", evsel__process_free_event, },
{ "kmem:kmem_cache_free", evsel__process_free_event, },
/* page allocator */
{ "kmem:mm_page_alloc", perf_evsel__process_page_alloc_event, },
{ "kmem:mm_page_free", perf_evsel__process_page_free_event, },
{ "kmem:mm_page_alloc", evsel__process_page_alloc_event, },
{ "kmem:mm_page_free", evsel__process_page_free_event, },
};
if (!perf_session__has_traces(session, "kmem record"))
@ -1391,8 +1386,8 @@ static int __cmd_kmem(struct perf_session *session)
}
evlist__for_each_entry(session->evlist, evsel) {
if (!strcmp(perf_evsel__name(evsel), "kmem:mm_page_alloc") &&
perf_evsel__field(evsel, "pfn")) {
if (!strcmp(evsel__name(evsel), "kmem:mm_page_alloc") &&
evsel__field(evsel, "pfn")) {
use_pfn = true;
break;
}

View File

@ -69,7 +69,7 @@ void exit_event_get_key(struct evsel *evsel,
struct event_key *key)
{
key->info = 0;
key->key = perf_evsel__intval(evsel, sample, kvm_exit_reason);
key->key = evsel__intval(evsel, sample, kvm_exit_reason);
}
bool kvm_exit_event(struct evsel *evsel)
@ -416,8 +416,7 @@ struct vcpu_event_record *per_vcpu_record(struct thread *thread,
return NULL;
}
vcpu_record->vcpu_id = perf_evsel__intval(evsel, sample,
vcpu_id_str);
vcpu_record->vcpu_id = evsel__intval(evsel, sample, vcpu_id_str);
thread__set_priv(thread, vcpu_record);
}
@ -1033,16 +1032,16 @@ static int kvm_live_open_events(struct perf_kvm_stat *kvm)
struct perf_event_attr *attr = &pos->core.attr;
/* make sure these *are* set */
perf_evsel__set_sample_bit(pos, TID);
perf_evsel__set_sample_bit(pos, TIME);
perf_evsel__set_sample_bit(pos, CPU);
perf_evsel__set_sample_bit(pos, RAW);
evsel__set_sample_bit(pos, TID);
evsel__set_sample_bit(pos, TIME);
evsel__set_sample_bit(pos, CPU);
evsel__set_sample_bit(pos, RAW);
/* make sure these are *not*; want as small a sample as possible */
perf_evsel__reset_sample_bit(pos, PERIOD);
perf_evsel__reset_sample_bit(pos, IP);
perf_evsel__reset_sample_bit(pos, CALLCHAIN);
perf_evsel__reset_sample_bit(pos, ADDR);
perf_evsel__reset_sample_bit(pos, READ);
evsel__reset_sample_bit(pos, PERIOD);
evsel__reset_sample_bit(pos, IP);
evsel__reset_sample_bit(pos, CALLCHAIN);
evsel__reset_sample_bit(pos, ADDR);
evsel__reset_sample_bit(pos, READ);
attr->mmap = 0;
attr->comm = 0;
attr->task = 0;

View File

@ -48,7 +48,7 @@ struct lock_stat {
struct rb_node rb; /* used for sorting */
/*
* FIXME: perf_evsel__intval() returns u64,
* FIXME: evsel__intval() returns u64,
* so address of lockdep_map should be dealed as 64bit.
* Is there more better solution?
*/
@ -404,9 +404,9 @@ static int report_lock_acquire_event(struct evsel *evsel,
struct lock_stat *ls;
struct thread_stat *ts;
struct lock_seq_stat *seq;
const char *name = perf_evsel__strval(evsel, sample, "name");
u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
int flag = perf_evsel__intval(evsel, sample, "flag");
const char *name = evsel__strval(evsel, sample, "name");
u64 tmp = evsel__intval(evsel, sample, "lockdep_addr");
int flag = evsel__intval(evsel, sample, "flag");
memcpy(&addr, &tmp, sizeof(void *));
@ -477,8 +477,8 @@ static int report_lock_acquired_event(struct evsel *evsel,
struct thread_stat *ts;
struct lock_seq_stat *seq;
u64 contended_term;
const char *name = perf_evsel__strval(evsel, sample, "name");
u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
const char *name = evsel__strval(evsel, sample, "name");
u64 tmp = evsel__intval(evsel, sample, "lockdep_addr");
memcpy(&addr, &tmp, sizeof(void *));
@ -539,8 +539,8 @@ static int report_lock_contended_event(struct evsel *evsel,
struct lock_stat *ls;
struct thread_stat *ts;
struct lock_seq_stat *seq;
const char *name = perf_evsel__strval(evsel, sample, "name");
u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
const char *name = evsel__strval(evsel, sample, "name");
u64 tmp = evsel__intval(evsel, sample, "lockdep_addr");
memcpy(&addr, &tmp, sizeof(void *));
@ -594,8 +594,8 @@ static int report_lock_release_event(struct evsel *evsel,
struct lock_stat *ls;
struct thread_stat *ts;
struct lock_seq_stat *seq;
const char *name = perf_evsel__strval(evsel, sample, "name");
u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
const char *name = evsel__strval(evsel, sample, "name");
u64 tmp = evsel__intval(evsel, sample, "lockdep_addr");
memcpy(&addr, &tmp, sizeof(void *));
@ -657,32 +657,28 @@ static struct trace_lock_handler report_lock_ops = {
static struct trace_lock_handler *trace_handler;
static int perf_evsel__process_lock_acquire(struct evsel *evsel,
struct perf_sample *sample)
static int evsel__process_lock_acquire(struct evsel *evsel, struct perf_sample *sample)
{
if (trace_handler->acquire_event)
return trace_handler->acquire_event(evsel, sample);
return 0;
}
static int perf_evsel__process_lock_acquired(struct evsel *evsel,
struct perf_sample *sample)
static int evsel__process_lock_acquired(struct evsel *evsel, struct perf_sample *sample)
{
if (trace_handler->acquired_event)
return trace_handler->acquired_event(evsel, sample);
return 0;
}
static int perf_evsel__process_lock_contended(struct evsel *evsel,
struct perf_sample *sample)
static int evsel__process_lock_contended(struct evsel *evsel, struct perf_sample *sample)
{
if (trace_handler->contended_event)
return trace_handler->contended_event(evsel, sample);
return 0;
}
static int perf_evsel__process_lock_release(struct evsel *evsel,
struct perf_sample *sample)
static int evsel__process_lock_release(struct evsel *evsel, struct perf_sample *sample)
{
if (trace_handler->release_event)
return trace_handler->release_event(evsel, sample);
@ -775,7 +771,7 @@ static void dump_threads(void)
pr_info("%10d: %s\n", st->tid, thread__comm_str(t));
node = rb_next(node);
thread__put(t);
};
}
}
static void dump_map(void)
@ -849,10 +845,10 @@ static void sort_result(void)
}
static const struct evsel_str_handler lock_tracepoints[] = {
{ "lock:lock_acquire", perf_evsel__process_lock_acquire, }, /* CONFIG_LOCKDEP */
{ "lock:lock_acquired", perf_evsel__process_lock_acquired, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
{ "lock:lock_contended", perf_evsel__process_lock_contended, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
{ "lock:lock_release", perf_evsel__process_lock_release, }, /* CONFIG_LOCKDEP */
{ "lock:lock_acquire", evsel__process_lock_acquire, }, /* CONFIG_LOCKDEP */
{ "lock:lock_acquired", evsel__process_lock_acquired, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
{ "lock:lock_contended", evsel__process_lock_contended, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
{ "lock:lock_release", evsel__process_lock_release, }, /* CONFIG_LOCKDEP */
};
static bool force;

View File

@ -123,7 +123,7 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
rec_argv[i++] = "-e";
rec_argv[i++] = perf_mem_events__name(j);
};
}
if (all_user)
rec_argv[i++] = "--all-user";

View File

@ -34,6 +34,7 @@
#include "util/tsc.h"
#include "util/parse-branch-options.h"
#include "util/parse-regs-options.h"
#include "util/perf_api_probe.h"
#include "util/llvm-utils.h"
#include "util/bpf-loader.h"
#include "util/trigger.h"
@ -43,6 +44,7 @@
#include "util/time-utils.h"
#include "util/units.h"
#include "util/bpf-event.h"
#include "util/util.h"
#include "asm/bug.h"
#include "perf.h"
@ -50,6 +52,7 @@
#include <inttypes.h>
#include <locale.h>
#include <poll.h>
#include <pthread.h>
#include <unistd.h>
#include <sched.h>
#include <signal.h>
@ -84,7 +87,10 @@ struct record {
struct auxtrace_record *itr;
struct evlist *evlist;
struct perf_session *session;
struct evlist *sb_evlist;
pthread_t thread_id;
int realtime_prio;
bool switch_output_event_set;
bool no_buildid;
bool no_buildid_set;
bool no_buildid_cache;
@ -503,6 +509,20 @@ static int process_synthesized_event(struct perf_tool *tool,
return record__write(rec, NULL, event, event->header.size);
}
static int process_locked_synthesized_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
static pthread_mutex_t synth_lock = PTHREAD_MUTEX_INITIALIZER;
int ret;
pthread_mutex_lock(&synth_lock);
ret = process_synthesized_event(tool, event, sample, machine);
pthread_mutex_unlock(&synth_lock);
return ret;
}
static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
{
struct record *rec = to;
@ -825,7 +845,7 @@ static int record__open(struct record *rec)
evlist__for_each_entry(evlist, pos) {
try_again:
if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
if (evsel__fallback(pos, errno, msg, sizeof(msg))) {
if (verbose > 0)
ui__warning("%s\n", msg);
goto try_again;
@ -837,8 +857,7 @@ try_again:
goto try_again;
}
rc = -errno;
perf_evsel__open_strerror(pos, &opts->target,
errno, msg, sizeof(msg));
evsel__open_strerror(pos, &opts->target, errno, msg, sizeof(msg));
ui__error("%s\n", msg);
goto out;
}
@ -859,7 +878,7 @@ try_again:
if (perf_evlist__apply_filters(evlist, &pos)) {
pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
pos->filter, perf_evsel__name(pos), errno,
pos->filter, evsel__name(pos), errno,
str_error_r(errno, msg, sizeof(msg)));
rc = -1;
goto out;
@ -1288,6 +1307,7 @@ static int record__synthesize(struct record *rec, bool tail)
struct perf_tool *tool = &rec->tool;
int fd = perf_data__fd(data);
int err = 0;
event_op f = process_synthesized_event;
if (rec->opts.tail_synthesize != tail)
return 0;
@ -1402,13 +1422,67 @@ static int record__synthesize(struct record *rec, bool tail)
if (err < 0)
pr_warning("Couldn't synthesize cgroup events.\n");
if (rec->opts.nr_threads_synthesize > 1) {
perf_set_multithreaded();
f = process_locked_synthesized_event;
}
err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->core.threads,
process_synthesized_event, opts->sample_address,
1);
f, opts->sample_address,
rec->opts.nr_threads_synthesize);
if (rec->opts.nr_threads_synthesize > 1)
perf_set_singlethreaded();
out:
return err;
}
static int record__process_signal_event(union perf_event *event __maybe_unused, void *data)
{
struct record *rec = data;
pthread_kill(rec->thread_id, SIGUSR2);
return 0;
}
static int record__setup_sb_evlist(struct record *rec)
{
struct record_opts *opts = &rec->opts;
if (rec->sb_evlist != NULL) {
/*
* We get here if --switch-output-event populated the
* sb_evlist, so associate a callback that will send a SIGUSR2
* to the main thread.
*/
evlist__set_cb(rec->sb_evlist, record__process_signal_event, rec);
rec->thread_id = pthread_self();
}
if (!opts->no_bpf_event) {
if (rec->sb_evlist == NULL) {
rec->sb_evlist = evlist__new();
if (rec->sb_evlist == NULL) {
pr_err("Couldn't create side band evlist.\n.");
return -1;
}
}
if (evlist__add_bpf_sb_event(rec->sb_evlist, &rec->session->header.env)) {
pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
return -1;
}
}
if (perf_evlist__start_sb_thread(rec->sb_evlist, &rec->opts.target)) {
pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
opts->no_bpf_event = true;
}
return 0;
}
static int __cmd_record(struct record *rec, int argc, const char **argv)
{
int err;
@ -1420,7 +1494,6 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
struct perf_data *data = &rec->data;
struct perf_session *session;
bool disabled = false, draining = false;
struct evlist *sb_evlist = NULL;
int fd;
float ratio = 0;
@ -1546,21 +1619,17 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
goto out_child;
}
err = -1;
if (!rec->no_buildid
&& !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
pr_err("Couldn't generate buildids. "
"Use --no-buildid to profile anyway.\n");
err = -1;
goto out_child;
}
if (!opts->no_bpf_event)
bpf_event__add_sb_event(&sb_evlist, &session->header.env);
if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
opts->no_bpf_event = true;
}
err = record__setup_sb_evlist(rec);
if (err)
goto out_child;
err = record__synthesize(rec, false);
if (err < 0)
@ -1831,7 +1900,7 @@ out_delete_session:
perf_session__delete(session);
if (!opts->no_bpf_event)
perf_evlist__stop_sb_thread(sb_evlist);
perf_evlist__stop_sb_thread(rec->sb_evlist);
return status;
}
@ -2142,10 +2211,19 @@ static int switch_output_setup(struct record *rec)
};
unsigned long val;
/*
* If we're using --switch-output-events, then we imply its
* --switch-output=signal, as we'll send a SIGUSR2 from the side band
* thread to its parent.
*/
if (rec->switch_output_event_set)
goto do_signal;
if (!s->set)
return 0;
if (!strcmp(s->str, "signal")) {
do_signal:
s->signal = true;
pr_debug("switch-output with SIGUSR2 signal\n");
goto enabled;
@ -2232,6 +2310,7 @@ static struct record record = {
.default_per_cpu = true,
},
.mmap_flush = MMAP_FLUSH_DEFAULT,
.nr_threads_synthesize = 1,
},
.tool = {
.sample = process_sample_event,
@ -2402,6 +2481,9 @@ static struct option __record_options[] = {
&record.switch_output.set, "signal or size[BKMG] or time[smhd]",
"Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
"signal"),
OPT_CALLBACK_SET(0, "switch-output-event", &record.sb_evlist, &record.switch_output_event_set, "switch output event",
"switch output event selector. use 'perf list' to list available events",
parse_events_option_new_evlist),
OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
"Limit number of switch output generated files"),
OPT_BOOLEAN(0, "dry-run", &dry_run,
@ -2421,6 +2503,9 @@ static struct option __record_options[] = {
#endif
OPT_CALLBACK(0, "max-size", &record.output_max_size,
"size", "Limit the maximum size of the output file", parse_output_max_size),
OPT_UINTEGER(0, "num-thread-synthesize",
&record.opts.nr_threads_synthesize,
"number of threads to run for event synthesis"),
OPT_END()
};

View File

@ -84,6 +84,7 @@ struct report {
bool header_only;
bool nonany_branch_mode;
bool group_set;
bool stitch_lbr;
int max_stack;
struct perf_read_values show_threads_values;
struct annotation_options annotation_opts;
@ -267,6 +268,9 @@ static int process_sample_event(struct perf_tool *tool,
return -1;
}
if (rep->stitch_lbr)
al.thread->lbr_stitch_enable = true;
if (symbol_conf.hide_unresolved && al.sym == NULL)
goto out_put;
@ -317,7 +321,7 @@ static int process_read_event(struct perf_tool *tool,
struct report *rep = container_of(tool, struct report, tool);
if (rep->show_threads) {
const char *name = perf_evsel__name(evsel);
const char *name = evsel__name(evsel);
int err = perf_read_values_add_value(&rep->show_threads_values,
event->read.pid, event->read.tid,
evsel->idx,
@ -339,12 +343,14 @@ static int report__setup_sample_type(struct report *rep)
bool is_pipe = perf_data__is_pipe(session->data);
if (session->itrace_synth_opts->callchain ||
session->itrace_synth_opts->add_callchain ||
(!is_pipe &&
perf_header__has_feat(&session->header, HEADER_AUXTRACE) &&
!session->itrace_synth_opts->set))
sample_type |= PERF_SAMPLE_CALLCHAIN;
if (session->itrace_synth_opts->last_branch)
if (session->itrace_synth_opts->last_branch ||
session->itrace_synth_opts->add_last_branch)
sample_type |= PERF_SAMPLE_BRANCH_STACK;
if (!is_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) {
@ -407,6 +413,12 @@ static int report__setup_sample_type(struct report *rep)
callchain_param.record_mode = CALLCHAIN_FP;
}
if (rep->stitch_lbr && (callchain_param.record_mode != CALLCHAIN_LBR)) {
ui__warning("Can't find LBR callchain. Switch off --stitch-lbr.\n"
"Please apply --call-graph lbr when recording.\n");
rep->stitch_lbr = false;
}
/* ??? handle more cases than just ANY? */
if (!(perf_evlist__combined_branch_type(session->evlist) &
PERF_SAMPLE_BRANCH_ANY))
@ -447,10 +459,10 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report
nr_events = hists->stats.total_non_filtered_period;
}
if (perf_evsel__is_group_event(evsel)) {
if (evsel__is_group_event(evsel)) {
struct evsel *pos;
perf_evsel__group_desc(evsel, buf, size);
evsel__group_desc(evsel, buf, size);
evname = buf;
for_each_group_member(pos, evsel) {
@ -525,10 +537,9 @@ static int perf_evlist__tty_browse_hists(struct evlist *evlist,
evlist__for_each_entry(evlist, pos) {
struct hists *hists = evsel__hists(pos);
const char *evname = perf_evsel__name(pos);
const char *evname = evsel__name(pos);
if (symbol_conf.event_group &&
!perf_evsel__is_group_leader(pos))
if (symbol_conf.event_group && !evsel__is_group_leader(pos))
continue;
hists__fprintf_nr_sample_events(hists, rep, evname, stdout);
@ -670,8 +681,7 @@ static int report__collapse_hists(struct report *rep)
break;
/* Non-group events are considered as leader */
if (symbol_conf.event_group &&
!perf_evsel__is_group_leader(pos)) {
if (symbol_conf.event_group && !evsel__is_group_leader(pos)) {
struct hists *leader_hists = evsel__hists(pos->leader);
hists__match(leader_hists, hists);
@ -1257,6 +1267,8 @@ int cmd_report(int argc, const char **argv)
"Show full source file name path for source lines"),
OPT_BOOLEAN(0, "show-ref-call-graph", &symbol_conf.show_ref_callgraph,
"Show callgraph from reference event"),
OPT_BOOLEAN(0, "stitch-lbr", &report.stitch_lbr,
"Enable LBR callgraph stitching approach"),
OPT_INTEGER(0, "socket-filter", &report.socket_filter,
"only show processor socket that match with this filter"),
OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
@ -1332,7 +1344,7 @@ int cmd_report(int argc, const char **argv)
if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
callchain_param.order = ORDER_CALLER;
if (itrace_synth_opts.callchain &&
if ((itrace_synth_opts.callchain || itrace_synth_opts.add_callchain) &&
(int)itrace_synth_opts.callchain_sz > report.max_stack)
report.max_stack = itrace_synth_opts.callchain_sz;
@ -1380,7 +1392,7 @@ repeat:
goto error;
}
if (itrace_synth_opts.last_branch)
if (itrace_synth_opts.last_branch || itrace_synth_opts.add_last_branch)
has_br_stack = true;
if (has_br_stack && branch_call_mode)
@ -1400,7 +1412,7 @@ repeat:
}
if (branch_call_mode) {
callchain_param.key = CCKEY_ADDRESS;
callchain_param.branch_callstack = 1;
callchain_param.branch_callstack = true;
symbol_conf.use_callchain = true;
callchain_register_param(&callchain_param);
if (sort_order == NULL)

View File

@ -811,8 +811,8 @@ replay_wakeup_event(struct perf_sched *sched,
struct evsel *evsel, struct perf_sample *sample,
struct machine *machine __maybe_unused)
{
const char *comm = perf_evsel__strval(evsel, sample, "comm");
const u32 pid = perf_evsel__intval(evsel, sample, "pid");
const char *comm = evsel__strval(evsel, sample, "comm");
const u32 pid = evsel__intval(evsel, sample, "pid");
struct task_desc *waker, *wakee;
if (verbose > 0) {
@ -833,11 +833,11 @@ static int replay_switch_event(struct perf_sched *sched,
struct perf_sample *sample,
struct machine *machine __maybe_unused)
{
const char *prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"),
*next_comm = perf_evsel__strval(evsel, sample, "next_comm");
const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
next_pid = perf_evsel__intval(evsel, sample, "next_pid");
const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
const char *prev_comm = evsel__strval(evsel, sample, "prev_comm"),
*next_comm = evsel__strval(evsel, sample, "next_comm");
const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
next_pid = evsel__intval(evsel, sample, "next_pid");
const u64 prev_state = evsel__intval(evsel, sample, "prev_state");
struct task_desc *prev, __maybe_unused *next;
u64 timestamp0, timestamp = sample->time;
int cpu = sample->cpu;
@ -1106,9 +1106,9 @@ static int latency_switch_event(struct perf_sched *sched,
struct perf_sample *sample,
struct machine *machine)
{
const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
next_pid = perf_evsel__intval(evsel, sample, "next_pid");
const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
next_pid = evsel__intval(evsel, sample, "next_pid");
const u64 prev_state = evsel__intval(evsel, sample, "prev_state");
struct work_atoms *out_events, *in_events;
struct thread *sched_out, *sched_in;
u64 timestamp0, timestamp = sample->time;
@ -1176,8 +1176,8 @@ static int latency_runtime_event(struct perf_sched *sched,
struct perf_sample *sample,
struct machine *machine)
{
const u32 pid = perf_evsel__intval(evsel, sample, "pid");
const u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
const u32 pid = evsel__intval(evsel, sample, "pid");
const u64 runtime = evsel__intval(evsel, sample, "runtime");
struct thread *thread = machine__findnew_thread(machine, -1, pid);
struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
u64 timestamp = sample->time;
@ -1211,7 +1211,7 @@ static int latency_wakeup_event(struct perf_sched *sched,
struct perf_sample *sample,
struct machine *machine)
{
const u32 pid = perf_evsel__intval(evsel, sample, "pid");
const u32 pid = evsel__intval(evsel, sample, "pid");
struct work_atoms *atoms;
struct work_atom *atom;
struct thread *wakee;
@ -1272,7 +1272,7 @@ static int latency_migrate_task_event(struct perf_sched *sched,
struct perf_sample *sample,
struct machine *machine)
{
const u32 pid = perf_evsel__intval(evsel, sample, "pid");
const u32 pid = evsel__intval(evsel, sample, "pid");
u64 timestamp = sample->time;
struct work_atoms *atoms;
struct work_atom *atom;
@ -1526,7 +1526,7 @@ map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid
static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
struct perf_sample *sample, struct machine *machine)
{
const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
struct thread *sched_in;
struct thread_runtime *tr;
int new_shortname;
@ -1670,8 +1670,8 @@ static int process_sched_switch_event(struct perf_tool *tool,
{
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
int this_cpu = sample->cpu, err = 0;
u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
next_pid = perf_evsel__intval(evsel, sample, "next_pid");
u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
next_pid = evsel__intval(evsel, sample, "next_pid");
if (sched->curr_pid[this_cpu] != (u32)-1) {
/*
@ -1848,7 +1848,7 @@ static inline void print_sched_time(unsigned long long nsecs, int width)
* returns runtime data for event, allocating memory for it the
* first time it is used.
*/
static struct evsel_runtime *perf_evsel__get_runtime(struct evsel *evsel)
static struct evsel_runtime *evsel__get_runtime(struct evsel *evsel)
{
struct evsel_runtime *r = evsel->priv;
@ -1863,10 +1863,9 @@ static struct evsel_runtime *perf_evsel__get_runtime(struct evsel *evsel)
/*
* save last time event was seen per cpu
*/
static void perf_evsel__save_time(struct evsel *evsel,
u64 timestamp, u32 cpu)
static void evsel__save_time(struct evsel *evsel, u64 timestamp, u32 cpu)
{
struct evsel_runtime *r = perf_evsel__get_runtime(evsel);
struct evsel_runtime *r = evsel__get_runtime(evsel);
if (r == NULL)
return;
@ -1890,9 +1889,9 @@ static void perf_evsel__save_time(struct evsel *evsel,
}
/* returns last time this event was seen on the given cpu */
static u64 perf_evsel__get_time(struct evsel *evsel, u32 cpu)
static u64 evsel__get_time(struct evsel *evsel, u32 cpu)
{
struct evsel_runtime *r = perf_evsel__get_runtime(evsel);
struct evsel_runtime *r = evsel__get_runtime(evsel);
if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu))
return 0;
@ -2004,8 +2003,8 @@ static void timehist_print_sample(struct perf_sched *sched,
u64 t, int state)
{
struct thread_runtime *tr = thread__priv(thread);
const char *next_comm = perf_evsel__strval(evsel, sample, "next_comm");
const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
const char *next_comm = evsel__strval(evsel, sample, "next_comm");
const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
u32 max_cpus = sched->max_cpu + 1;
char tstr[64];
char nstr[30];
@ -2136,8 +2135,8 @@ static bool is_idle_sample(struct perf_sample *sample,
struct evsel *evsel)
{
/* pid 0 == swapper == idle task */
if (strcmp(perf_evsel__name(evsel), "sched:sched_switch") == 0)
return perf_evsel__intval(evsel, sample, "prev_pid") == 0;
if (strcmp(evsel__name(evsel), "sched:sched_switch") == 0)
return evsel__intval(evsel, sample, "prev_pid") == 0;
return sample->pid == 0;
}
@ -2334,7 +2333,7 @@ static struct thread *timehist_get_thread(struct perf_sched *sched,
itr->last_thread = thread;
/* copy task callchain when entering to idle */
if (perf_evsel__intval(evsel, sample, "next_pid") == 0)
if (evsel__intval(evsel, sample, "next_pid") == 0)
save_idle_callchain(sched, itr, sample);
}
}
@ -2355,10 +2354,10 @@ static bool timehist_skip_sample(struct perf_sched *sched,
}
if (sched->idle_hist) {
if (strcmp(perf_evsel__name(evsel), "sched:sched_switch"))
if (strcmp(evsel__name(evsel), "sched:sched_switch"))
rc = true;
else if (perf_evsel__intval(evsel, sample, "prev_pid") != 0 &&
perf_evsel__intval(evsel, sample, "next_pid") != 0)
else if (evsel__intval(evsel, sample, "prev_pid") != 0 &&
evsel__intval(evsel, sample, "next_pid") != 0)
rc = true;
}
@ -2409,7 +2408,7 @@ static int timehist_sched_wakeup_event(struct perf_tool *tool,
struct thread *thread;
struct thread_runtime *tr = NULL;
/* want pid of awakened task not pid in sample */
const u32 pid = perf_evsel__intval(evsel, sample, "pid");
const u32 pid = evsel__intval(evsel, sample, "pid");
thread = machine__findnew_thread(machine, 0, pid);
if (thread == NULL)
@ -2445,8 +2444,8 @@ static void timehist_print_migration_event(struct perf_sched *sched,
return;
max_cpus = sched->max_cpu + 1;
ocpu = perf_evsel__intval(evsel, sample, "orig_cpu");
dcpu = perf_evsel__intval(evsel, sample, "dest_cpu");
ocpu = evsel__intval(evsel, sample, "orig_cpu");
dcpu = evsel__intval(evsel, sample, "dest_cpu");
thread = machine__findnew_thread(machine, sample->pid, sample->tid);
if (thread == NULL)
@ -2493,7 +2492,7 @@ static int timehist_migrate_task_event(struct perf_tool *tool,
struct thread *thread;
struct thread_runtime *tr = NULL;
/* want pid of migrated task not pid in sample */
const u32 pid = perf_evsel__intval(evsel, sample, "pid");
const u32 pid = evsel__intval(evsel, sample, "pid");
thread = machine__findnew_thread(machine, 0, pid);
if (thread == NULL)
@ -2524,8 +2523,7 @@ static int timehist_sched_change_event(struct perf_tool *tool,
struct thread_runtime *tr = NULL;
u64 tprev, t = sample->time;
int rc = 0;
int state = perf_evsel__intval(evsel, sample, "prev_state");
int state = evsel__intval(evsel, sample, "prev_state");
if (machine__resolve(machine, &al, sample) < 0) {
pr_err("problem processing %d event. skipping it\n",
@ -2549,7 +2547,7 @@ static int timehist_sched_change_event(struct perf_tool *tool,
goto out;
}
tprev = perf_evsel__get_time(evsel, sample->cpu);
tprev = evsel__get_time(evsel, sample->cpu);
/*
* If start time given:
@ -2632,7 +2630,7 @@ out:
tr->ready_to_run = 0;
}
perf_evsel__save_time(evsel, sample->time, sample->cpu);
evsel__save_time(evsel, sample->time, sample->cpu);
return rc;
}
@ -2942,7 +2940,7 @@ static int timehist_check_attr(struct perf_sched *sched,
struct evsel_runtime *er;
list_for_each_entry(evsel, &evlist->core.entries, core.node) {
er = perf_evsel__get_runtime(evsel);
er = evsel__get_runtime(evsel);
if (er == NULL) {
pr_err("Failed to allocate memory for evsel runtime data\n");
return -1;

View File

@ -273,7 +273,7 @@ static struct evsel_script *perf_evsel_script__new(struct evsel *evsel,
struct evsel_script *es = zalloc(sizeof(*es));
if (es != NULL) {
if (asprintf(&es->filename, "%s.%s.dump", data->file.path, perf_evsel__name(evsel)) < 0)
if (asprintf(&es->filename, "%s.%s.dump", data->file.path, evsel__name(evsel)) < 0)
goto out_free;
es->fp = fopen(es->filename, "w");
if (es->fp == NULL)
@ -351,10 +351,8 @@ static const char *output_field2str(enum perf_output_field field)
#define PRINT_FIELD(x) (output[output_type(attr->type)].fields & PERF_OUTPUT_##x)
static int perf_evsel__do_check_stype(struct evsel *evsel,
u64 sample_type, const char *sample_msg,
enum perf_output_field field,
bool allow_user_set)
static int evsel__do_check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg,
enum perf_output_field field, bool allow_user_set)
{
struct perf_event_attr *attr = &evsel->core.attr;
int type = output_type(attr->type);
@ -366,7 +364,7 @@ static int perf_evsel__do_check_stype(struct evsel *evsel,
if (output[type].user_set_fields & field) {
if (allow_user_set)
return 0;
evname = perf_evsel__name(evsel);
evname = evsel__name(evsel);
pr_err("Samples for '%s' event do not have %s attribute set. "
"Cannot print '%s' field.\n",
evname, sample_msg, output_field2str(field));
@ -375,7 +373,7 @@ static int perf_evsel__do_check_stype(struct evsel *evsel,
/* user did not ask for it explicitly so remove from the default list */
output[type].fields &= ~field;
evname = perf_evsel__name(evsel);
evname = evsel__name(evsel);
pr_debug("Samples for '%s' event do not have %s attribute set. "
"Skipping '%s' field.\n",
evname, sample_msg, output_field2str(field));
@ -383,16 +381,13 @@ static int perf_evsel__do_check_stype(struct evsel *evsel,
return 0;
}
static int perf_evsel__check_stype(struct evsel *evsel,
u64 sample_type, const char *sample_msg,
enum perf_output_field field)
static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg,
enum perf_output_field field)
{
return perf_evsel__do_check_stype(evsel, sample_type, sample_msg, field,
false);
return evsel__do_check_stype(evsel, sample_type, sample_msg, field, false);
}
static int perf_evsel__check_attr(struct evsel *evsel,
struct perf_session *session)
static int perf_evsel__check_attr(struct evsel *evsel, struct perf_session *session)
{
struct perf_event_attr *attr = &evsel->core.attr;
bool allow_user_set;
@ -404,32 +399,28 @@ static int perf_evsel__check_attr(struct evsel *evsel,
HEADER_AUXTRACE);
if (PRINT_FIELD(TRACE) &&
!perf_session__has_traces(session, "record -R"))
!perf_session__has_traces(session, "record -R"))
return -EINVAL;
if (PRINT_FIELD(IP)) {
if (perf_evsel__check_stype(evsel, PERF_SAMPLE_IP, "IP",
PERF_OUTPUT_IP))
if (evsel__check_stype(evsel, PERF_SAMPLE_IP, "IP", PERF_OUTPUT_IP))
return -EINVAL;
}
if (PRINT_FIELD(ADDR) &&
perf_evsel__do_check_stype(evsel, PERF_SAMPLE_ADDR, "ADDR",
PERF_OUTPUT_ADDR, allow_user_set))
evsel__do_check_stype(evsel, PERF_SAMPLE_ADDR, "ADDR", PERF_OUTPUT_ADDR, allow_user_set))
return -EINVAL;
if (PRINT_FIELD(DATA_SRC) &&
perf_evsel__check_stype(evsel, PERF_SAMPLE_DATA_SRC, "DATA_SRC",
PERF_OUTPUT_DATA_SRC))
evsel__check_stype(evsel, PERF_SAMPLE_DATA_SRC, "DATA_SRC", PERF_OUTPUT_DATA_SRC))
return -EINVAL;
if (PRINT_FIELD(WEIGHT) &&
perf_evsel__check_stype(evsel, PERF_SAMPLE_WEIGHT, "WEIGHT",
PERF_OUTPUT_WEIGHT))
evsel__check_stype(evsel, PERF_SAMPLE_WEIGHT, "WEIGHT", PERF_OUTPUT_WEIGHT))
return -EINVAL;
if (PRINT_FIELD(SYM) &&
!(evsel->core.attr.sample_type & (PERF_SAMPLE_IP|PERF_SAMPLE_ADDR))) {
!(evsel->core.attr.sample_type & (PERF_SAMPLE_IP|PERF_SAMPLE_ADDR))) {
pr_err("Display of symbols requested but neither sample IP nor "
"sample address\navailable. Hence, no addresses to convert "
"to symbols.\n");
@ -441,7 +432,7 @@ static int perf_evsel__check_attr(struct evsel *evsel,
return -EINVAL;
}
if (PRINT_FIELD(DSO) &&
!(evsel->core.attr.sample_type & (PERF_SAMPLE_IP|PERF_SAMPLE_ADDR))) {
!(evsel->core.attr.sample_type & (PERF_SAMPLE_IP|PERF_SAMPLE_ADDR))) {
pr_err("Display of DSO requested but no address to convert.\n");
return -EINVAL;
}
@ -458,33 +449,27 @@ static int perf_evsel__check_attr(struct evsel *evsel,
return -EINVAL;
}
if ((PRINT_FIELD(PID) || PRINT_FIELD(TID)) &&
perf_evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID",
PERF_OUTPUT_TID|PERF_OUTPUT_PID))
evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID", PERF_OUTPUT_TID|PERF_OUTPUT_PID))
return -EINVAL;
if (PRINT_FIELD(TIME) &&
perf_evsel__check_stype(evsel, PERF_SAMPLE_TIME, "TIME",
PERF_OUTPUT_TIME))
evsel__check_stype(evsel, PERF_SAMPLE_TIME, "TIME", PERF_OUTPUT_TIME))
return -EINVAL;
if (PRINT_FIELD(CPU) &&
perf_evsel__do_check_stype(evsel, PERF_SAMPLE_CPU, "CPU",
PERF_OUTPUT_CPU, allow_user_set))
evsel__do_check_stype(evsel, PERF_SAMPLE_CPU, "CPU", PERF_OUTPUT_CPU, allow_user_set))
return -EINVAL;
if (PRINT_FIELD(IREGS) &&
perf_evsel__check_stype(evsel, PERF_SAMPLE_REGS_INTR, "IREGS",
PERF_OUTPUT_IREGS))
evsel__check_stype(evsel, PERF_SAMPLE_REGS_INTR, "IREGS", PERF_OUTPUT_IREGS))
return -EINVAL;
if (PRINT_FIELD(UREGS) &&
perf_evsel__check_stype(evsel, PERF_SAMPLE_REGS_USER, "UREGS",
PERF_OUTPUT_UREGS))
evsel__check_stype(evsel, PERF_SAMPLE_REGS_USER, "UREGS", PERF_OUTPUT_UREGS))
return -EINVAL;
if (PRINT_FIELD(PHYS_ADDR) &&
perf_evsel__check_stype(evsel, PERF_SAMPLE_PHYS_ADDR, "PHYS_ADDR",
PERF_OUTPUT_PHYS_ADDR))
evsel__check_stype(evsel, PERF_SAMPLE_PHYS_ADDR, "PHYS_ADDR", PERF_OUTPUT_PHYS_ADDR))
return -EINVAL;
return 0;
@ -604,8 +589,6 @@ static int perf_sample__fprintf_regs(struct regs_dump *regs, uint64_t mask,
printed += fprintf(fp, "%5s:0x%"PRIx64" ", perf_reg_name(r), val);
}
fprintf(fp, "\n");
return printed;
}
@ -1697,6 +1680,7 @@ struct perf_script {
bool show_cgroup_events;
bool allocated;
bool per_event_dump;
bool stitch_lbr;
struct evswitch evswitch;
struct perf_cpu_map *cpus;
struct perf_thread_map *threads;
@ -1713,7 +1697,7 @@ static int perf_evlist__max_name_len(struct evlist *evlist)
int max = 0;
evlist__for_each_entry(evlist, evsel) {
int len = strlen(perf_evsel__name(evsel));
int len = strlen(evsel__name(evsel));
max = MAX(len, max);
}
@ -1887,7 +1871,7 @@ static void process_event(struct perf_script *script,
fprintf(fp, "%10" PRIu64 " ", sample->period);
if (PRINT_FIELD(EVNAME)) {
const char *evname = perf_evsel__name(evsel);
const char *evname = evsel__name(evsel);
if (!script->name_width)
script->name_width = perf_evlist__max_name_len(script->session->evlist);
@ -1923,6 +1907,9 @@ static void process_event(struct perf_script *script,
if (PRINT_FIELD(IP)) {
struct callchain_cursor *cursor = NULL;
if (script->stitch_lbr)
al->thread->lbr_stitch_enable = true;
if (symbol_conf.use_callchain && sample->callchain &&
thread__resolve_callchain(al->thread, &callchain_cursor, evsel,
sample, NULL, NULL, scripting_max_stack) == 0)
@ -1946,7 +1933,7 @@ static void process_event(struct perf_script *script,
else if (PRINT_FIELD(BRSTACKOFF))
perf_sample__fprintf_brstackoff(sample, thread, attr, fp);
if (perf_evsel__is_bpf_output(evsel) && PRINT_FIELD(BPF_OUTPUT))
if (evsel__is_bpf_output(evsel) && PRINT_FIELD(BPF_OUTPUT))
perf_sample__fprintf_bpf_output(sample, fp);
perf_sample__fprintf_insn(sample, attr, thread, machine, fp);
@ -1975,7 +1962,7 @@ static struct scripting_ops *scripting_ops;
static void __process_stat(struct evsel *counter, u64 tstamp)
{
int nthreads = perf_thread_map__nr(counter->core.threads);
int ncpus = perf_evsel__nr_cpus(counter);
int ncpus = evsel__nr_cpus(counter);
int cpu, thread;
static int header_printed;
@ -2001,7 +1988,7 @@ static void __process_stat(struct evsel *counter, u64 tstamp)
counts->ena,
counts->run,
tstamp,
perf_evsel__name(counter));
evsel__name(counter));
}
}
}
@ -2040,7 +2027,7 @@ static int cleanup_scripting(void)
static bool filter_cpu(struct perf_sample *sample)
{
if (cpu_list)
if (cpu_list && sample->cpu != (u32)-1)
return !test_bit(sample->cpu, cpu_bitmap);
return false;
}
@ -2138,41 +2125,59 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
return err;
}
static int print_event_with_time(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine,
pid_t pid, pid_t tid, u64 timestamp)
{
struct perf_script *script = container_of(tool, struct perf_script, tool);
struct perf_session *session = script->session;
struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
struct thread *thread = NULL;
if (evsel && !evsel->core.attr.sample_id_all) {
sample->cpu = 0;
sample->time = timestamp;
sample->pid = pid;
sample->tid = tid;
}
if (filter_cpu(sample))
return 0;
if (tid != -1)
thread = machine__findnew_thread(machine, pid, tid);
if (thread && evsel) {
perf_sample__fprintf_start(sample, thread, evsel,
event->header.type, stdout);
}
perf_event__fprintf(event, stdout);
thread__put(thread);
return 0;
}
static int print_event(struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample, struct machine *machine,
pid_t pid, pid_t tid)
{
return print_event_with_time(tool, event, sample, machine, pid, tid, 0);
}
static int process_comm_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
struct thread *thread;
struct perf_script *script = container_of(tool, struct perf_script, tool);
struct perf_session *session = script->session;
struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
int ret = -1;
thread = machine__findnew_thread(machine, event->comm.pid, event->comm.tid);
if (thread == NULL) {
pr_debug("problem processing COMM event, skipping it.\n");
return -1;
}
if (perf_event__process_comm(tool, event, sample, machine) < 0)
goto out;
return -1;
if (!evsel->core.attr.sample_id_all) {
sample->cpu = 0;
sample->time = 0;
sample->tid = event->comm.tid;
sample->pid = event->comm.pid;
}
if (!filter_cpu(sample)) {
perf_sample__fprintf_start(sample, thread, evsel,
PERF_RECORD_COMM, stdout);
perf_event__fprintf(event, stdout);
}
ret = 0;
out:
thread__put(thread);
return ret;
return print_event(tool, event, sample, machine, event->comm.pid,
event->comm.tid);
}
static int process_namespaces_event(struct perf_tool *tool,
@ -2180,37 +2185,11 @@ static int process_namespaces_event(struct perf_tool *tool,
struct perf_sample *sample,
struct machine *machine)
{
struct thread *thread;
struct perf_script *script = container_of(tool, struct perf_script, tool);
struct perf_session *session = script->session;
struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
int ret = -1;
thread = machine__findnew_thread(machine, event->namespaces.pid,
event->namespaces.tid);
if (thread == NULL) {
pr_debug("problem processing NAMESPACES event, skipping it.\n");
return -1;
}
if (perf_event__process_namespaces(tool, event, sample, machine) < 0)
goto out;
return -1;
if (!evsel->core.attr.sample_id_all) {
sample->cpu = 0;
sample->time = 0;
sample->tid = event->namespaces.tid;
sample->pid = event->namespaces.pid;
}
if (!filter_cpu(sample)) {
perf_sample__fprintf_start(sample, thread, evsel,
PERF_RECORD_NAMESPACES, stdout);
perf_event__fprintf(event, stdout);
}
ret = 0;
out:
thread__put(thread);
return ret;
return print_event(tool, event, sample, machine, event->namespaces.pid,
event->namespaces.tid);
}
static int process_cgroup_event(struct perf_tool *tool,
@ -2218,34 +2197,11 @@ static int process_cgroup_event(struct perf_tool *tool,
struct perf_sample *sample,
struct machine *machine)
{
struct thread *thread;
struct perf_script *script = container_of(tool, struct perf_script, tool);
struct perf_session *session = script->session;
struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
int ret = -1;
thread = machine__findnew_thread(machine, sample->pid, sample->tid);
if (thread == NULL) {
pr_debug("problem processing CGROUP event, skipping it.\n");
return -1;
}
if (perf_event__process_cgroup(tool, event, sample, machine) < 0)
goto out;
return -1;
if (!evsel->core.attr.sample_id_all) {
sample->cpu = 0;
sample->time = 0;
}
if (!filter_cpu(sample)) {
perf_sample__fprintf_start(sample, thread, evsel,
PERF_RECORD_CGROUP, stdout);
perf_event__fprintf(event, stdout);
}
ret = 0;
out:
thread__put(thread);
return ret;
return print_event(tool, event, sample, machine, sample->pid,
sample->tid);
}
static int process_fork_event(struct perf_tool *tool,
@ -2253,69 +2209,24 @@ static int process_fork_event(struct perf_tool *tool,
struct perf_sample *sample,
struct machine *machine)
{
struct thread *thread;
struct perf_script *script = container_of(tool, struct perf_script, tool);
struct perf_session *session = script->session;
struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
if (perf_event__process_fork(tool, event, sample, machine) < 0)
return -1;
thread = machine__findnew_thread(machine, event->fork.pid, event->fork.tid);
if (thread == NULL) {
pr_debug("problem processing FORK event, skipping it.\n");
return -1;
}
if (!evsel->core.attr.sample_id_all) {
sample->cpu = 0;
sample->time = event->fork.time;
sample->tid = event->fork.tid;
sample->pid = event->fork.pid;
}
if (!filter_cpu(sample)) {
perf_sample__fprintf_start(sample, thread, evsel,
PERF_RECORD_FORK, stdout);
perf_event__fprintf(event, stdout);
}
thread__put(thread);
return 0;
return print_event_with_time(tool, event, sample, machine,
event->fork.pid, event->fork.tid,
event->fork.time);
}
static int process_exit_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
int err = 0;
struct thread *thread;
struct perf_script *script = container_of(tool, struct perf_script, tool);
struct perf_session *session = script->session;
struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
thread = machine__findnew_thread(machine, event->fork.pid, event->fork.tid);
if (thread == NULL) {
pr_debug("problem processing EXIT event, skipping it.\n");
/* Print before 'exit' deletes anything */
if (print_event_with_time(tool, event, sample, machine, event->fork.pid,
event->fork.tid, event->fork.time))
return -1;
}
if (!evsel->core.attr.sample_id_all) {
sample->cpu = 0;
sample->time = 0;
sample->tid = event->fork.tid;
sample->pid = event->fork.pid;
}
if (!filter_cpu(sample)) {
perf_sample__fprintf_start(sample, thread, evsel,
PERF_RECORD_EXIT, stdout);
perf_event__fprintf(event, stdout);
}
if (perf_event__process_exit(tool, event, sample, machine) < 0)
err = -1;
thread__put(thread);
return err;
return perf_event__process_exit(tool, event, sample, machine);
}
static int process_mmap_event(struct perf_tool *tool,
@ -2323,33 +2234,11 @@ static int process_mmap_event(struct perf_tool *tool,
struct perf_sample *sample,
struct machine *machine)
{
struct thread *thread;
struct perf_script *script = container_of(tool, struct perf_script, tool);
struct perf_session *session = script->session;
struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
if (perf_event__process_mmap(tool, event, sample, machine) < 0)
return -1;
thread = machine__findnew_thread(machine, event->mmap.pid, event->mmap.tid);
if (thread == NULL) {
pr_debug("problem processing MMAP event, skipping it.\n");
return -1;
}
if (!evsel->core.attr.sample_id_all) {
sample->cpu = 0;
sample->time = 0;
sample->tid = event->mmap.tid;
sample->pid = event->mmap.pid;
}
if (!filter_cpu(sample)) {
perf_sample__fprintf_start(sample, thread, evsel,
PERF_RECORD_MMAP, stdout);
perf_event__fprintf(event, stdout);
}
thread__put(thread);
return 0;
return print_event(tool, event, sample, machine, event->mmap.pid,
event->mmap.tid);
}
static int process_mmap2_event(struct perf_tool *tool,
@ -2357,33 +2246,11 @@ static int process_mmap2_event(struct perf_tool *tool,
struct perf_sample *sample,
struct machine *machine)
{
struct thread *thread;
struct perf_script *script = container_of(tool, struct perf_script, tool);
struct perf_session *session = script->session;
struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
if (perf_event__process_mmap2(tool, event, sample, machine) < 0)
return -1;
thread = machine__findnew_thread(machine, event->mmap2.pid, event->mmap2.tid);
if (thread == NULL) {
pr_debug("problem processing MMAP2 event, skipping it.\n");
return -1;
}
if (!evsel->core.attr.sample_id_all) {
sample->cpu = 0;
sample->time = 0;
sample->tid = event->mmap2.tid;
sample->pid = event->mmap2.pid;
}
if (!filter_cpu(sample)) {
perf_sample__fprintf_start(sample, thread, evsel,
PERF_RECORD_MMAP2, stdout);
perf_event__fprintf(event, stdout);
}
thread__put(thread);
return 0;
return print_event(tool, event, sample, machine, event->mmap2.pid,
event->mmap2.tid);
}
static int process_switch_event(struct perf_tool *tool,
@ -2391,10 +2258,7 @@ static int process_switch_event(struct perf_tool *tool,
struct perf_sample *sample,
struct machine *machine)
{
struct thread *thread;
struct perf_script *script = container_of(tool, struct perf_script, tool);
struct perf_session *session = script->session;
struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
if (perf_event__process_switch(tool, event, sample, machine) < 0)
return -1;
@ -2405,20 +2269,8 @@ static int process_switch_event(struct perf_tool *tool,
if (!script->show_switch_events)
return 0;
thread = machine__findnew_thread(machine, sample->pid,
sample->tid);
if (thread == NULL) {
pr_debug("problem processing SWITCH event, skipping it.\n");
return -1;
}
if (!filter_cpu(sample)) {
perf_sample__fprintf_start(sample, thread, evsel,
PERF_RECORD_SWITCH, stdout);
perf_event__fprintf(event, stdout);
}
thread__put(thread);
return 0;
return print_event(tool, event, sample, machine, sample->pid,
sample->tid);
}
static int
@ -2427,23 +2279,8 @@ process_lost_event(struct perf_tool *tool,
struct perf_sample *sample,
struct machine *machine)
{
struct perf_script *script = container_of(tool, struct perf_script, tool);
struct perf_session *session = script->session;
struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
struct thread *thread;
thread = machine__findnew_thread(machine, sample->pid,
sample->tid);
if (thread == NULL)
return -1;
if (!filter_cpu(sample)) {
perf_sample__fprintf_start(sample, thread, evsel,
PERF_RECORD_LOST, stdout);
perf_event__fprintf(event, stdout);
}
thread__put(thread);
return 0;
return print_event(tool, event, sample, machine, sample->pid,
sample->tid);
}
static int
@ -2462,33 +2299,11 @@ process_bpf_events(struct perf_tool *tool __maybe_unused,
struct perf_sample *sample,
struct machine *machine)
{
struct thread *thread;
struct perf_script *script = container_of(tool, struct perf_script, tool);
struct perf_session *session = script->session;
struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
if (machine__process_ksymbol(machine, event, sample) < 0)
return -1;
if (!evsel->core.attr.sample_id_all) {
perf_event__fprintf(event, stdout);
return 0;
}
thread = machine__findnew_thread(machine, sample->pid, sample->tid);
if (thread == NULL) {
pr_debug("problem processing MMAP event, skipping it.\n");
return -1;
}
if (!filter_cpu(sample)) {
perf_sample__fprintf_start(sample, thread, evsel,
event->header.type, stdout);
perf_event__fprintf(event, stdout);
}
thread__put(thread);
return 0;
return print_event(tool, event, sample, machine, sample->pid,
sample->tid);
}
static void sig_handler(int sig __maybe_unused)
@ -3145,7 +2960,7 @@ static int check_ev_match(char *dir_name, char *scriptname,
match = 0;
evlist__for_each_entry(session->evlist, pos) {
if (!strcmp(perf_evsel__name(pos), evname)) {
if (!strcmp(evsel__name(pos), evname)) {
match = 1;
break;
}
@ -3342,6 +3157,12 @@ static void script__setup_sample_type(struct perf_script *script)
else
callchain_param.record_mode = CALLCHAIN_FP;
}
if (script->stitch_lbr && (callchain_param.record_mode != CALLCHAIN_LBR)) {
pr_warning("Can't find LBR callchain. Switch off --stitch-lbr.\n"
"Please apply --call-graph lbr when recording.\n");
script->stitch_lbr = false;
}
}
static int process_stat_round_event(struct perf_session *session,
@ -3653,6 +3474,8 @@ int cmd_script(int argc, const char **argv)
"file", "file saving guest os /proc/kallsyms"),
OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules,
"file", "file saving guest os /proc/modules"),
OPT_BOOLEAN('\0', "stitch-lbr", &script.stitch_lbr,
"Enable LBR callgraph stitching approach"),
OPTS_EVSWITCH(&script.evswitch),
OPT_END()
};
@ -3709,7 +3532,7 @@ int cmd_script(int argc, const char **argv)
return -1;
}
if (itrace_synth_opts.callchain &&
if ((itrace_synth_opts.callchain || itrace_synth_opts.add_callchain) &&
itrace_synth_opts.callchain_sz > scripting_max_stack)
scripting_max_stack = itrace_synth_opts.callchain_sz;

View File

@ -238,9 +238,8 @@ static int write_stat_round_event(u64 tm, u64 type)
#define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
static int
perf_evsel__write_stat_event(struct evsel *counter, u32 cpu, u32 thread,
struct perf_counts_values *count)
static int evsel__write_stat_event(struct evsel *counter, u32 cpu, u32 thread,
struct perf_counts_values *count)
{
struct perf_sample_id *sid = SID(counter, cpu, thread);
@ -259,7 +258,7 @@ static int read_single_counter(struct evsel *counter, int cpu,
count->val = val;
return 0;
}
return perf_evsel__read_counter(counter, cpu, thread);
return evsel__read_counter(counter, cpu, thread);
}
/*
@ -284,7 +283,7 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu)
/*
* The leader's group read loads data into its group members
* (via perf_evsel__read_counter()) and sets their count->loaded.
* (via evsel__read_counter()) and sets their count->loaded.
*/
if (!perf_counts__is_loaded(counter->counts, cpu, thread) &&
read_single_counter(counter, cpu, thread, rs)) {
@ -297,7 +296,7 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu)
perf_counts__set_loaded(counter->counts, cpu, thread, false);
if (STAT_RECORD) {
if (perf_evsel__write_stat_event(counter, cpu, thread, count)) {
if (evsel__write_stat_event(counter, cpu, thread, count)) {
pr_err("failed to write stat event\n");
return -1;
}
@ -306,7 +305,7 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu)
if (verbose > 1) {
fprintf(stat_config.output,
"%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
perf_evsel__name(counter),
evsel__name(counter),
cpu,
count->val, count->ena, count->run);
}
@ -359,6 +358,7 @@ static void process_interval(void)
clock_gettime(CLOCK_MONOTONIC, &ts);
diff_timespec(&rs, &ts, &ref_time);
perf_stat__reset_shadow_per_stat(&rt_stat);
read_counters(&rs);
if (STAT_RECORD) {
@ -409,7 +409,7 @@ static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *inf
workload_exec_errno = info->si_value.sival_int;
}
static bool perf_evsel__should_store_id(struct evsel *counter)
static bool evsel__should_store_id(struct evsel *counter)
{
return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID;
}
@ -454,7 +454,7 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
errno == ENXIO) {
if (verbose > 0)
ui__warning("%s event is not supported by the kernel.\n",
perf_evsel__name(counter));
evsel__name(counter));
counter->supported = false;
/*
* errored is a sticky flag that means one of the counter's
@ -465,7 +465,7 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
if ((counter->leader != counter) ||
!(counter->leader->core.nr_members > 1))
return COUNTER_SKIP;
} else if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
} else if (evsel__fallback(counter, errno, msg, sizeof(msg))) {
if (verbose > 0)
ui__warning("%s\n", msg);
return COUNTER_RETRY;
@ -483,8 +483,7 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
}
}
perf_evsel__open_strerror(counter, &target,
errno, msg, sizeof(msg));
evsel__open_strerror(counter, &target, errno, msg, sizeof(msg));
ui__error("%s\n", msg);
if (child_pid != -1)
@ -604,7 +603,7 @@ try_again:
if (!counter->reset_group)
continue;
try_again_reset:
pr_debug2("reopening weak %s\n", perf_evsel__name(counter));
pr_debug2("reopening weak %s\n", evsel__name(counter));
if (create_perf_stat_counter(counter, &stat_config, &target,
counter->cpu_iter - 1) < 0) {
@ -635,14 +634,14 @@ try_again_reset:
if (l > stat_config.unit_width)
stat_config.unit_width = l;
if (perf_evsel__should_store_id(counter) &&
perf_evsel__store_ids(counter, evsel_list))
if (evsel__should_store_id(counter) &&
evsel__store_ids(counter, evsel_list))
return -1;
}
if (perf_evlist__apply_filters(evsel_list, &counter)) {
pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
counter->filter, perf_evsel__name(counter), errno,
counter->filter, evsel__name(counter), errno,
str_error_r(errno, msg, sizeof(msg)));
return -1;
}
@ -686,8 +685,11 @@ try_again_reset:
break;
}
}
if (child_pid != -1)
if (child_pid != -1) {
if (timeout)
kill(child_pid, SIGTERM);
wait4(child_pid, &status, 0, &stat_config.ru_data);
}
if (workload_exec_errno) {
const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));

View File

@ -579,8 +579,8 @@ process_sample_cpu_idle(struct timechart *tchart __maybe_unused,
struct perf_sample *sample,
const char *backtrace __maybe_unused)
{
u32 state = perf_evsel__intval(evsel, sample, "state");
u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
u32 state = evsel__intval(evsel, sample, "state");
u32 cpu_id = evsel__intval(evsel, sample, "cpu_id");
if (state == (u32)PWR_EVENT_EXIT)
c_state_end(tchart, cpu_id, sample->time);
@ -595,8 +595,8 @@ process_sample_cpu_frequency(struct timechart *tchart,
struct perf_sample *sample,
const char *backtrace __maybe_unused)
{
u32 state = perf_evsel__intval(evsel, sample, "state");
u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
u32 state = evsel__intval(evsel, sample, "state");
u32 cpu_id = evsel__intval(evsel, sample, "cpu_id");
p_state_change(tchart, cpu_id, sample->time, state);
return 0;
@ -608,9 +608,9 @@ process_sample_sched_wakeup(struct timechart *tchart,
struct perf_sample *sample,
const char *backtrace)
{
u8 flags = perf_evsel__intval(evsel, sample, "common_flags");
int waker = perf_evsel__intval(evsel, sample, "common_pid");
int wakee = perf_evsel__intval(evsel, sample, "pid");
u8 flags = evsel__intval(evsel, sample, "common_flags");
int waker = evsel__intval(evsel, sample, "common_pid");
int wakee = evsel__intval(evsel, sample, "pid");
sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, backtrace);
return 0;
@ -622,9 +622,9 @@ process_sample_sched_switch(struct timechart *tchart,
struct perf_sample *sample,
const char *backtrace)
{
int prev_pid = perf_evsel__intval(evsel, sample, "prev_pid");
int next_pid = perf_evsel__intval(evsel, sample, "next_pid");
u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
int prev_pid = evsel__intval(evsel, sample, "prev_pid");
int next_pid = evsel__intval(evsel, sample, "next_pid");
u64 prev_state = evsel__intval(evsel, sample, "prev_state");
sched_switch(tchart, sample->cpu, sample->time, prev_pid, next_pid,
prev_state, backtrace);
@ -638,8 +638,8 @@ process_sample_power_start(struct timechart *tchart __maybe_unused,
struct perf_sample *sample,
const char *backtrace __maybe_unused)
{
u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
u64 value = perf_evsel__intval(evsel, sample, "value");
u64 cpu_id = evsel__intval(evsel, sample, "cpu_id");
u64 value = evsel__intval(evsel, sample, "value");
c_state_start(cpu_id, sample->time, value);
return 0;
@ -661,8 +661,8 @@ process_sample_power_frequency(struct timechart *tchart,
struct perf_sample *sample,
const char *backtrace __maybe_unused)
{
u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
u64 value = perf_evsel__intval(evsel, sample, "value");
u64 cpu_id = evsel__intval(evsel, sample, "cpu_id");
u64 value = evsel__intval(evsel, sample, "value");
p_state_change(tchart, cpu_id, sample->time, value);
return 0;
@ -843,7 +843,7 @@ process_enter_read(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
long fd = perf_evsel__intval(evsel, sample, "fd");
long fd = evsel__intval(evsel, sample, "fd");
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_READ,
sample->time, fd);
}
@ -853,7 +853,7 @@ process_exit_read(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
long ret = perf_evsel__intval(evsel, sample, "ret");
long ret = evsel__intval(evsel, sample, "ret");
return pid_end_io_sample(tchart, sample->tid, IOTYPE_READ,
sample->time, ret);
}
@ -863,7 +863,7 @@ process_enter_write(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
long fd = perf_evsel__intval(evsel, sample, "fd");
long fd = evsel__intval(evsel, sample, "fd");
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_WRITE,
sample->time, fd);
}
@ -873,7 +873,7 @@ process_exit_write(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
long ret = perf_evsel__intval(evsel, sample, "ret");
long ret = evsel__intval(evsel, sample, "ret");
return pid_end_io_sample(tchart, sample->tid, IOTYPE_WRITE,
sample->time, ret);
}
@ -883,7 +883,7 @@ process_enter_sync(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
long fd = perf_evsel__intval(evsel, sample, "fd");
long fd = evsel__intval(evsel, sample, "fd");
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_SYNC,
sample->time, fd);
}
@ -893,7 +893,7 @@ process_exit_sync(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
long ret = perf_evsel__intval(evsel, sample, "ret");
long ret = evsel__intval(evsel, sample, "ret");
return pid_end_io_sample(tchart, sample->tid, IOTYPE_SYNC,
sample->time, ret);
}
@ -903,7 +903,7 @@ process_enter_tx(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
long fd = perf_evsel__intval(evsel, sample, "fd");
long fd = evsel__intval(evsel, sample, "fd");
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_TX,
sample->time, fd);
}
@ -913,7 +913,7 @@ process_exit_tx(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
long ret = perf_evsel__intval(evsel, sample, "ret");
long ret = evsel__intval(evsel, sample, "ret");
return pid_end_io_sample(tchart, sample->tid, IOTYPE_TX,
sample->time, ret);
}
@ -923,7 +923,7 @@ process_enter_rx(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
long fd = perf_evsel__intval(evsel, sample, "fd");
long fd = evsel__intval(evsel, sample, "fd");
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_RX,
sample->time, fd);
}
@ -933,7 +933,7 @@ process_exit_rx(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
long ret = perf_evsel__intval(evsel, sample, "ret");
long ret = evsel__intval(evsel, sample, "ret");
return pid_end_io_sample(tchart, sample->tid, IOTYPE_RX,
sample->time, ret);
}
@ -943,7 +943,7 @@ process_enter_poll(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
long fd = perf_evsel__intval(evsel, sample, "fd");
long fd = evsel__intval(evsel, sample, "fd");
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_POLL,
sample->time, fd);
}
@ -953,7 +953,7 @@ process_exit_poll(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
long ret = perf_evsel__intval(evsel, sample, "ret");
long ret = evsel__intval(evsel, sample, "ret");
return pid_end_io_sample(tchart, sample->tid, IOTYPE_POLL,
sample->time, ret);
}

View File

@ -33,6 +33,7 @@
#include "util/map.h"
#include "util/mmap.h"
#include "util/session.h"
#include "util/thread.h"
#include "util/symbol.h"
#include "util/synthetic-events.h"
#include "util/top.h"
@ -254,7 +255,7 @@ static void perf_top__show_details(struct perf_top *top)
if (notes->src == NULL)
goto out_unlock;
printf("Showing %s for %s\n", perf_evsel__name(top->sym_evsel), symbol->name);
printf("Showing %s for %s\n", evsel__name(top->sym_evsel), symbol->name);
printf(" Events Pcnt (>=%d%%)\n", top->annotation_opts.min_pcnt);
more = symbol__annotate_printf(&he->ms, top->sym_evsel, &top->annotation_opts);
@ -297,8 +298,7 @@ static void perf_top__resort_hists(struct perf_top *t)
hists__collapse_resort(hists, NULL);
/* Non-group events are considered as leader */
if (symbol_conf.event_group &&
!perf_evsel__is_group_leader(pos)) {
if (symbol_conf.event_group && !evsel__is_group_leader(pos)) {
struct hists *leader_hists = evsel__hists(pos->leader);
hists__match(leader_hists, hists);
@ -441,7 +441,7 @@ static void perf_top__print_mapped_keys(struct perf_top *top)
fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top->print_entries);
if (top->evlist->core.nr_entries > 1)
fprintf(stdout, "\t[E] active event counter. \t(%s)\n", perf_evsel__name(top->sym_evsel));
fprintf(stdout, "\t[E] active event counter. \t(%s)\n", evsel__name(top->sym_evsel));
fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter);
@ -528,13 +528,13 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
fprintf(stderr, "\nAvailable events:");
evlist__for_each_entry(top->evlist, top->sym_evsel)
fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, perf_evsel__name(top->sym_evsel));
fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, evsel__name(top->sym_evsel));
prompt_integer(&counter, "Enter details event counter");
if (counter >= top->evlist->core.nr_entries) {
top->sym_evsel = evlist__first(top->evlist);
fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel));
fprintf(stderr, "Sorry, no such event, using %s.\n", evsel__name(top->sym_evsel));
sleep(1);
break;
}
@ -775,6 +775,9 @@ static void perf_event__process_sample(struct perf_tool *tool,
if (machine__resolve(machine, &al, sample) < 0)
return;
if (top->stitch_lbr)
al.thread->lbr_stitch_enable = true;
if (!machine->kptr_restrict_warned &&
symbol_conf.kptr_restrict &&
al.cpumode == PERF_RECORD_MISC_KERNEL) {
@ -1042,14 +1045,13 @@ try_again:
perf_top_overwrite_fallback(top, counter))
goto try_again;
if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
if (evsel__fallback(counter, errno, msg, sizeof(msg))) {
if (verbose > 0)
ui__warning("%s\n", msg);
goto try_again;
}
perf_evsel__open_strerror(counter, &opts->target,
errno, msg, sizeof(msg));
evsel__open_strerror(counter, &opts->target, errno, msg, sizeof(msg));
ui__error("%s\n", msg);
goto out_err;
}
@ -1571,10 +1573,11 @@ int cmd_top(int argc, const char **argv)
"Sort the output by the event at the index n in group. "
"If n is invalid, sort by the first event. "
"WARNING: should be used on grouped events."),
OPT_BOOLEAN(0, "stitch-lbr", &top.stitch_lbr,
"Enable LBR callgraph stitching approach"),
OPTS_EVSWITCH(&top.evswitch),
OPT_END()
};
struct evlist *sb_evlist = NULL;
const char * const top_usage[] = {
"perf top [<options>]",
NULL
@ -1640,6 +1643,11 @@ int cmd_top(int argc, const char **argv)
}
}
if (top.stitch_lbr && !(callchain_param.record_mode == CALLCHAIN_LBR)) {
pr_err("Error: --stitch-lbr must be used with --call-graph lbr\n");
goto out_delete_evlist;
}
if (opts->branch_stack && callchain_param.enabled)
symbol_conf.show_branchflag_count = true;
@ -1732,10 +1740,21 @@ int cmd_top(int argc, const char **argv)
goto out_delete_evlist;
}
if (!top.record_opts.no_bpf_event)
bpf_event__add_sb_event(&sb_evlist, &perf_env);
if (!top.record_opts.no_bpf_event) {
top.sb_evlist = evlist__new();
if (perf_evlist__start_sb_thread(sb_evlist, target)) {
if (top.sb_evlist == NULL) {
pr_err("Couldn't create side band evlist.\n.");
goto out_delete_evlist;
}
if (evlist__add_bpf_sb_event(top.sb_evlist, &perf_env)) {
pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
goto out_delete_evlist;
}
}
if (perf_evlist__start_sb_thread(top.sb_evlist, target)) {
pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
opts->no_bpf_event = true;
}
@ -1743,7 +1762,7 @@ int cmd_top(int argc, const char **argv)
status = __cmd_top(&top);
if (!opts->no_bpf_event)
perf_evlist__stop_sb_thread(sb_evlist);
perf_evlist__stop_sb_thread(top.sb_evlist);
out_delete_evlist:
evlist__delete(top.evlist);

View File

@ -366,11 +366,9 @@ out_delete:
return NULL;
}
static int perf_evsel__init_tp_uint_field(struct evsel *evsel,
struct tp_field *field,
const char *name)
static int evsel__init_tp_uint_field(struct evsel *evsel, struct tp_field *field, const char *name)
{
struct tep_format_field *format_field = perf_evsel__field(evsel, name);
struct tep_format_field *format_field = evsel__field(evsel, name);
if (format_field == NULL)
return -1;
@ -380,13 +378,11 @@ static int perf_evsel__init_tp_uint_field(struct evsel *evsel,
#define perf_evsel__init_sc_tp_uint_field(evsel, name) \
({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\
perf_evsel__init_tp_uint_field(evsel, &sc->name, #name); })
evsel__init_tp_uint_field(evsel, &sc->name, #name); })
static int perf_evsel__init_tp_ptr_field(struct evsel *evsel,
struct tp_field *field,
const char *name)
static int evsel__init_tp_ptr_field(struct evsel *evsel, struct tp_field *field, const char *name)
{
struct tep_format_field *format_field = perf_evsel__field(evsel, name);
struct tep_format_field *format_field = evsel__field(evsel, name);
if (format_field == NULL)
return -1;
@ -396,7 +392,7 @@ static int perf_evsel__init_tp_ptr_field(struct evsel *evsel,
#define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\
perf_evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
static void evsel__delete_priv(struct evsel *evsel)
{
@ -404,13 +400,13 @@ static void evsel__delete_priv(struct evsel *evsel)
evsel__delete(evsel);
}
static int perf_evsel__init_syscall_tp(struct evsel *evsel)
static int evsel__init_syscall_tp(struct evsel *evsel)
{
struct syscall_tp *sc = evsel__syscall_tp(evsel);
if (sc != NULL) {
if (perf_evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") &&
perf_evsel__init_tp_uint_field(evsel, &sc->id, "nr"))
if (evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") &&
evsel__init_tp_uint_field(evsel, &sc->id, "nr"))
return -ENOENT;
return 0;
}
@ -418,14 +414,14 @@ static int perf_evsel__init_syscall_tp(struct evsel *evsel)
return -ENOMEM;
}
static int perf_evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp)
static int evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp)
{
struct syscall_tp *sc = evsel__syscall_tp(evsel);
if (sc != NULL) {
struct tep_format_field *syscall_id = perf_evsel__field(tp, "id");
struct tep_format_field *syscall_id = evsel__field(tp, "id");
if (syscall_id == NULL)
syscall_id = perf_evsel__field(tp, "__syscall_nr");
syscall_id = evsel__field(tp, "__syscall_nr");
if (syscall_id == NULL ||
__tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap))
return -EINVAL;
@ -436,21 +432,21 @@ static int perf_evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evs
return -ENOMEM;
}
static int perf_evsel__init_augmented_syscall_tp_args(struct evsel *evsel)
static int evsel__init_augmented_syscall_tp_args(struct evsel *evsel)
{
struct syscall_tp *sc = __evsel__syscall_tp(evsel);
return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64));
}
static int perf_evsel__init_augmented_syscall_tp_ret(struct evsel *evsel)
static int evsel__init_augmented_syscall_tp_ret(struct evsel *evsel)
{
struct syscall_tp *sc = __evsel__syscall_tp(evsel);
return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap);
}
static int perf_evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler)
static int evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler)
{
if (evsel__syscall_tp(evsel) != NULL) {
if (perf_evsel__init_sc_tp_uint_field(evsel, id))
@ -474,7 +470,7 @@ static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *
if (IS_ERR(evsel))
return NULL;
if (perf_evsel__init_raw_syscall_tp(evsel, handler))
if (evsel__init_raw_syscall_tp(evsel, handler))
goto out_delete;
return evsel;
@ -1801,7 +1797,7 @@ static int trace__read_syscall_info(struct trace *trace, int id)
return syscall__set_arg_fmts(sc);
}
static int perf_evsel__init_tp_arg_scnprintf(struct evsel *evsel)
static int evsel__init_tp_arg_scnprintf(struct evsel *evsel)
{
struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel);
@ -2074,7 +2070,7 @@ static struct syscall *trace__syscall_info(struct trace *trace,
if (verbose > 1) {
static u64 n;
fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
id, perf_evsel__name(evsel), ++n);
id, evsel__name(evsel), ++n);
}
return NULL;
}
@ -2206,7 +2202,7 @@ static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel,
double ts = (double)sample->time / NSEC_PER_MSEC;
printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
perf_evsel__name(evsel), ts,
evsel__name(evsel), ts,
thread__comm_str(thread),
sample->pid, sample->tid, sample->cpu);
}
@ -2382,7 +2378,7 @@ static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sam
static const char *errno_to_name(struct evsel *evsel, int err)
{
struct perf_env *env = perf_evsel__env(evsel);
struct perf_env *env = evsel__env(evsel);
const char *arch_name = perf_env__arch(env);
return arch_syscalls__strerrno(arch_name, err);
@ -2513,7 +2509,7 @@ errno_print: {
if (callchain_ret > 0)
trace__fprintf_callchain(trace, sample);
else if (callchain_ret < 0)
pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
out:
ttrace->entry_pending = false;
err = 0;
@ -2531,7 +2527,7 @@ static int trace__vfs_getname(struct trace *trace, struct evsel *evsel,
size_t filename_len, entry_str_len, to_move;
ssize_t remaining_space;
char *pos;
const char *filename = perf_evsel__rawptr(evsel, sample, "pathname");
const char *filename = evsel__rawptr(evsel, sample, "pathname");
if (!thread)
goto out;
@ -2587,7 +2583,7 @@ static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
{
u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
u64 runtime = evsel__intval(evsel, sample, "runtime");
double runtime_ms = (double)runtime / NSEC_PER_MSEC;
struct thread *thread = machine__findnew_thread(trace->host,
sample->pid,
@ -2606,10 +2602,10 @@ out_put:
out_dump:
fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
evsel->name,
perf_evsel__strval(evsel, sample, "comm"),
(pid_t)perf_evsel__intval(evsel, sample, "pid"),
evsel__strval(evsel, sample, "comm"),
(pid_t)evsel__intval(evsel, sample, "pid"),
runtime,
perf_evsel__intval(evsel, sample, "vruntime"));
evsel__intval(evsel, sample, "vruntime"));
goto out_put;
}
@ -2774,7 +2770,7 @@ static int trace__event_handler(struct trace *trace, struct evsel *evsel,
fprintf(trace->output, "%s(", evsel->name);
if (perf_evsel__is_bpf_output(evsel)) {
if (evsel__is_bpf_output(evsel)) {
bpf_output__fprintf(trace, sample);
} else if (evsel->tp_format) {
if (strncmp(evsel->tp_format->name, "sys_enter_", 10) ||
@ -2795,7 +2791,7 @@ newline:
if (callchain_ret > 0)
trace__fprintf_callchain(trace, sample);
else if (callchain_ret < 0)
pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
++trace->nr_events_printed;
@ -2890,7 +2886,7 @@ static int trace__pgfault(struct trace *trace,
if (callchain_ret > 0)
trace__fprintf_callchain(trace, sample);
else if (callchain_ret < 0)
pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
++trace->nr_events_printed;
out:
@ -3032,10 +3028,10 @@ static bool evlist__add_vfs_getname(struct evlist *evlist)
}
evlist__for_each_entry_safe(evlist, evsel, tmp) {
if (!strstarts(perf_evsel__name(evsel), "probe:vfs_getname"))
if (!strstarts(evsel__name(evsel), "probe:vfs_getname"))
continue;
if (perf_evsel__field(evsel, "pathname")) {
if (evsel__field(evsel, "pathname")) {
evsel->handler = trace__vfs_getname;
found = true;
continue;
@ -3093,7 +3089,7 @@ static void trace__handle_event(struct trace *trace, union perf_event *event, st
if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
sample->raw_data == NULL) {
fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
perf_evsel__name(evsel), sample->tid,
evsel__name(evsel), sample->tid,
sample->cpu, sample->raw_size);
} else {
tracepoint_handler handler = evsel->handler;
@ -3124,8 +3120,8 @@ static int trace__add_syscall_newtp(struct trace *trace)
if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
goto out_delete_sys_exit;
perf_evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
perf_evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
evlist__add(evlist, sys_enter);
evlist__add(evlist, sys_exit);
@ -3164,10 +3160,9 @@ static int trace__set_ev_qualifier_tp_filter(struct trace *trace)
if (filter == NULL)
goto out_enomem;
if (!perf_evsel__append_tp_filter(trace->syscalls.events.sys_enter,
filter)) {
if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) {
sys_exit = trace->syscalls.events.sys_exit;
err = perf_evsel__append_tp_filter(sys_exit, filter);
err = evsel__append_tp_filter(sys_exit, filter);
}
free(filter);
@ -3695,7 +3690,7 @@ static int ordered_events__deliver_event(struct ordered_events *oe,
return __trace__deliver_event(trace, event->event);
}
static struct syscall_arg_fmt *perf_evsel__syscall_arg_fmt(struct evsel *evsel, char *arg)
static struct syscall_arg_fmt *evsel__find_syscall_arg_fmt_by_name(struct evsel *evsel, char *arg)
{
struct tep_format_field *field;
struct syscall_arg_fmt *fmt = __evsel__syscall_arg_fmt(evsel);
@ -3750,7 +3745,7 @@ static int trace__expand_filter(struct trace *trace __maybe_unused, struct evsel
scnprintf(arg, sizeof(arg), "%.*s", left_size, left);
fmt = perf_evsel__syscall_arg_fmt(evsel, arg);
fmt = evsel__find_syscall_arg_fmt_by_name(evsel, arg);
if (fmt == NULL) {
pr_err("\"%s\" not found in \"%s\", can't set filter \"%s\"\n",
arg, evsel->name, evsel->filter);
@ -3801,7 +3796,7 @@ static int trace__expand_filter(struct trace *trace __maybe_unused, struct evsel
if (new_filter != evsel->filter) {
pr_debug("New filter for %s: %s\n", evsel->name, new_filter);
perf_evsel__set_filter(evsel, new_filter);
evsel__set_filter(evsel, new_filter);
free(new_filter);
}
@ -3849,7 +3844,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
pgfault_maj = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
if (pgfault_maj == NULL)
goto out_error_mem;
perf_evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
evlist__add(evlist, pgfault_maj);
}
@ -3857,7 +3852,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
pgfault_min = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
if (pgfault_min == NULL)
goto out_error_mem;
perf_evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
evlist__add(evlist, pgfault_min);
}
@ -4108,7 +4103,7 @@ out_error:
out_error_apply_filters:
fprintf(trace->output,
"Failed to set filter \"%s\" on event %s with %d (%s)\n",
evsel->filter, perf_evsel__name(evsel), errno,
evsel->filter, evsel__name(evsel), errno,
str_error_r(errno, errbuf, sizeof(errbuf)));
goto out_delete_evlist;
}
@ -4179,7 +4174,7 @@ static int trace__replay(struct trace *trace)
"syscalls:sys_enter");
if (evsel &&
(perf_evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 ||
(evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 ||
perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
pr_err("Error during initialize raw_syscalls:sys_enter event\n");
goto out;
@ -4191,7 +4186,7 @@ static int trace__replay(struct trace *trace)
evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
"syscalls:sys_exit");
if (evsel &&
(perf_evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 ||
(evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 ||
perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
pr_err("Error during initialize raw_syscalls:sys_exit event\n");
goto out;
@ -4471,11 +4466,11 @@ static int evlist__set_syscall_tp_fields(struct evlist *evlist)
continue;
if (strcmp(evsel->tp_format->system, "syscalls")) {
perf_evsel__init_tp_arg_scnprintf(evsel);
evsel__init_tp_arg_scnprintf(evsel);
continue;
}
if (perf_evsel__init_syscall_tp(evsel))
if (evsel__init_syscall_tp(evsel))
return -1;
if (!strncmp(evsel->tp_format->name, "sys_enter_", 10)) {
@ -4989,7 +4984,7 @@ int cmd_trace(int argc, const char **argv)
*/
if (trace.syscalls.events.augmented) {
evlist__for_each_entry(trace.evlist, evsel) {
bool raw_syscalls_sys_exit = strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
bool raw_syscalls_sys_exit = strcmp(evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
if (raw_syscalls_sys_exit) {
trace.raw_augmented_syscalls = true;
@ -4997,10 +4992,10 @@ int cmd_trace(int argc, const char **argv)
}
if (trace.syscalls.events.augmented->priv == NULL &&
strstr(perf_evsel__name(evsel), "syscalls:sys_enter")) {
strstr(evsel__name(evsel), "syscalls:sys_enter")) {
struct evsel *augmented = trace.syscalls.events.augmented;
if (perf_evsel__init_augmented_syscall_tp(augmented, evsel) ||
perf_evsel__init_augmented_syscall_tp_args(augmented))
if (evsel__init_augmented_syscall_tp(augmented, evsel) ||
evsel__init_augmented_syscall_tp_args(augmented))
goto out;
/*
* Augmented is __augmented_syscalls__ BPF_OUTPUT event
@ -5014,16 +5009,16 @@ int cmd_trace(int argc, const char **argv)
* as not to filter it, then we'll handle it just like we would
* for the BPF_OUTPUT one:
*/
if (perf_evsel__init_augmented_syscall_tp(evsel, evsel) ||
perf_evsel__init_augmented_syscall_tp_args(evsel))
if (evsel__init_augmented_syscall_tp(evsel, evsel) ||
evsel__init_augmented_syscall_tp_args(evsel))
goto out;
evsel->handler = trace__sys_enter;
}
if (strstarts(perf_evsel__name(evsel), "syscalls:sys_exit_")) {
if (strstarts(evsel__name(evsel), "syscalls:sys_exit_")) {
struct syscall_tp *sc;
init_augmented_syscall_tp:
if (perf_evsel__init_augmented_syscall_tp(evsel, evsel))
if (evsel__init_augmented_syscall_tp(evsel, evsel))
goto out;
sc = __evsel__syscall_tp(evsel);
/*
@ -5047,7 +5042,7 @@ init_augmented_syscall_tp:
*/
if (trace.raw_augmented_syscalls)
trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset;
perf_evsel__init_augmented_syscall_tp_ret(evsel);
evsel__init_augmented_syscall_tp_ret(evsel);
evsel->handler = trace__sys_exit;
}
}

View File

@ -258,7 +258,8 @@ gets schedule to. Per task counters can be created by any user, for
their own tasks.
A 'pid == -1' and 'cpu == x' counter is a per CPU counter that counts
all events on CPU-x. Per CPU counters need CAP_SYS_ADMIN privilege.
all events on CPU-x. Per CPU counters need CAP_PERFMON or CAP_SYS_ADMIN
privilege.
The 'flags' parameter is currently unused and must be zero.

View File

@ -0,0 +1,19 @@
[
{
"MetricExpr": "(hv_24x7@PM_MCS01_128B_RD_DISP_PORT01\\,chip\\=?@ + hv_24x7@PM_MCS01_128B_RD_DISP_PORT23\\,chip\\=?@ + hv_24x7@PM_MCS23_128B_RD_DISP_PORT01\\,chip\\=?@ + hv_24x7@PM_MCS23_128B_RD_DISP_PORT23\\,chip\\=?@)",
"MetricName": "Memory_RD_BW_Chip",
"MetricGroup": "Memory_BW",
"ScaleUnit": "1.6e-2MB"
},
{
"MetricExpr": "(hv_24x7@PM_MCS01_128B_WR_DISP_PORT01\\,chip\\=?@ + hv_24x7@PM_MCS01_128B_WR_DISP_PORT23\\,chip\\=?@ + hv_24x7@PM_MCS23_128B_WR_DISP_PORT01\\,chip\\=?@ + hv_24x7@PM_MCS23_128B_WR_DISP_PORT23\\,chip\\=?@ )",
"MetricName": "Memory_WR_BW_Chip",
"MetricGroup": "Memory_BW",
"ScaleUnit": "1.6e-2MB"
},
{
"MetricExpr": "(hv_24x7@PM_PB_CYC\\,chip\\=?@ )",
"MetricName": "PowerBUS_Frequency",
"ScaleUnit": "2.5e-7GHz"
}
]

View File

@ -26,7 +26,7 @@ struct pmu_event {
* Map a CPU to its table of PMU events. The CPU is identified by the
* cpuid field, which is an arch-specific identifier for the CPU.
* The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile
* must match the get_cpustr() in tools/perf/arch/xxx/util/header.c)
* must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c)
*
* The cpuid can contain any character other than the comma.
*/

View File

@ -0,0 +1,2 @@
#!/bin/bash
perf record -g "$@"

View File

@ -0,0 +1,3 @@
#!/bin/bash
# description: create flame graphs
perf script -s "$PERF_EXEC_PATH"/scripts/python/flamegraph.py -- "$@"

View File

@ -0,0 +1,124 @@
# flamegraph.py - create flame graphs from perf samples
# SPDX-License-Identifier: GPL-2.0
#
# Usage:
#
# perf record -a -g -F 99 sleep 60
# perf script report flamegraph
#
# Combined:
#
# perf script flamegraph -a -F 99 sleep 60
#
# Written by Andreas Gerstmayr <agerstmayr@redhat.com>
# Flame Graphs invented by Brendan Gregg <bgregg@netflix.com>
# Works in tandem with d3-flame-graph by Martin Spier <mspier@netflix.com>
from __future__ import print_function
import sys
import os
import argparse
import json
class Node:
def __init__(self, name, libtype=""):
self.name = name
self.libtype = libtype
self.value = 0
self.children = []
def toJSON(self):
return {
"n": self.name,
"l": self.libtype,
"v": self.value,
"c": self.children
}
class FlameGraphCLI:
def __init__(self, args):
self.args = args
self.stack = Node("root")
if self.args.format == "html" and \
not os.path.isfile(self.args.template):
print("Flame Graph template {} does not exist. Please install "
"the js-d3-flame-graph (RPM) or libjs-d3-flame-graph (deb) "
"package, specify an existing flame graph template "
"(--template PATH) or another output format "
"(--format FORMAT).".format(self.args.template),
file=sys.stderr)
sys.exit(1)
def find_or_create_node(self, node, name, dso):
libtype = "kernel" if dso == "[kernel.kallsyms]" else ""
if name is None:
name = "[unknown]"
for child in node.children:
if child.name == name and child.libtype == libtype:
return child
child = Node(name, libtype)
node.children.append(child)
return child
def process_event(self, event):
node = self.find_or_create_node(self.stack, event["comm"], None)
if "callchain" in event:
for entry in reversed(event['callchain']):
node = self.find_or_create_node(
node, entry.get("sym", {}).get("name"), event.get("dso"))
else:
node = self.find_or_create_node(
node, entry.get("symbol"), event.get("dso"))
node.value += 1
def trace_end(self):
json_str = json.dumps(self.stack, default=lambda x: x.toJSON())
if self.args.format == "html":
try:
with open(self.args.template) as f:
output_str = f.read().replace("/** @flamegraph_json **/",
json_str)
except IOError as e:
print("Error reading template file: {}".format(e), file=sys.stderr)
sys.exit(1)
output_fn = self.args.output or "flamegraph.html"
else:
output_str = json_str
output_fn = self.args.output or "stacks.json"
if output_fn == "-":
sys.stdout.write(output_str)
else:
print("dumping data to {}".format(output_fn))
try:
with open(output_fn, "w") as out:
out.write(output_str)
except IOError as e:
print("Error writing output file: {}".format(e), file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create flame graphs.")
parser.add_argument("-f", "--format",
default="html", choices=["json", "html"],
help="output file format")
parser.add_argument("-o", "--output",
help="output file name")
parser.add_argument("--template",
default="/usr/share/d3-flame-graph/d3-flamegraph-base.html",
help="path to flamegraph HTML template")
parser.add_argument("-i", "--input",
help=argparse.SUPPRESS)
args = parser.parse_args()
cli = FlameGraphCLI(args)
process_event = cli.process_event
trace_end = cli.trace_end

View File

@ -56,6 +56,7 @@ perf-y += mem2node.o
perf-y += maps.o
perf-y += time-utils-test.o
perf-y += genelf.o
perf-y += api-io.o
$(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c tests/Build
$(call rule_mkdir)

304
tools/perf/tests/api-io.c Normal file
View File

@ -0,0 +1,304 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "debug.h"
#include "tests.h"
#include <api/io.h>
#include <linux/kernel.h>
#define TEMPL "/tmp/perf-test-XXXXXX"
#define EXPECT_EQUAL(val, expected) \
do { \
if (val != expected) { \
pr_debug("%s:%d: %d != %d\n", \
__FILE__, __LINE__, val, expected); \
ret = -1; \
} \
} while (0)
#define EXPECT_EQUAL64(val, expected) \
do { \
if (val != expected) { \
pr_debug("%s:%d: %lld != %lld\n", \
__FILE__, __LINE__, val, expected); \
ret = -1; \
} \
} while (0)
static int make_test_file(char path[PATH_MAX], const char *contents)
{
ssize_t contents_len = strlen(contents);
int fd;
strcpy(path, TEMPL);
fd = mkstemp(path);
if (fd < 0) {
pr_debug("mkstemp failed");
return -1;
}
if (write(fd, contents, contents_len) < contents_len) {
pr_debug("short write");
close(fd);
unlink(path);
return -1;
}
close(fd);
return 0;
}
static int setup_test(char path[PATH_MAX], const char *contents,
size_t buf_size, struct io *io)
{
if (make_test_file(path, contents))
return -1;
io->fd = open(path, O_RDONLY);
if (io->fd < 0) {
pr_debug("Failed to open '%s'\n", path);
unlink(path);
return -1;
}
io->buf = malloc(buf_size);
if (io->buf == NULL) {
pr_debug("Failed to allocate memory");
close(io->fd);
unlink(path);
return -1;
}
io__init(io, io->fd, io->buf, buf_size);
return 0;
}
static void cleanup_test(char path[PATH_MAX], struct io *io)
{
free(io->buf);
close(io->fd);
unlink(path);
}
static int do_test_get_char(const char *test_string, size_t buf_size)
{
char path[PATH_MAX];
struct io io;
int ch, ret = 0;
size_t i;
if (setup_test(path, test_string, buf_size, &io))
return -1;
for (i = 0; i < strlen(test_string); i++) {
ch = io__get_char(&io);
EXPECT_EQUAL(ch, test_string[i]);
EXPECT_EQUAL(io.eof, false);
}
ch = io__get_char(&io);
EXPECT_EQUAL(ch, -1);
EXPECT_EQUAL(io.eof, true);
cleanup_test(path, &io);
return ret;
}
static int test_get_char(void)
{
int i, ret = 0;
size_t j;
static const char *const test_strings[] = {
"12345678abcdef90",
"a\nb\nc\nd\n",
"\a\b\t\v\f\r",
};
for (i = 0; i <= 10; i++) {
for (j = 0; j < ARRAY_SIZE(test_strings); j++) {
if (do_test_get_char(test_strings[j], 1 << i))
ret = -1;
}
}
return ret;
}
static int do_test_get_hex(const char *test_string,
__u64 val1, int ch1,
__u64 val2, int ch2,
__u64 val3, int ch3,
bool end_eof)
{
char path[PATH_MAX];
struct io io;
int ch, ret = 0;
__u64 hex;
if (setup_test(path, test_string, 4, &io))
return -1;
ch = io__get_hex(&io, &hex);
EXPECT_EQUAL64(hex, val1);
EXPECT_EQUAL(ch, ch1);
ch = io__get_hex(&io, &hex);
EXPECT_EQUAL64(hex, val2);
EXPECT_EQUAL(ch, ch2);
ch = io__get_hex(&io, &hex);
EXPECT_EQUAL64(hex, val3);
EXPECT_EQUAL(ch, ch3);
EXPECT_EQUAL(io.eof, end_eof);
cleanup_test(path, &io);
return ret;
}
static int test_get_hex(void)
{
int ret = 0;
if (do_test_get_hex("12345678abcdef90",
0x12345678abcdef90, -1,
0, -1,
0, -1,
true))
ret = -1;
if (do_test_get_hex("1\n2\n3\n",
1, '\n',
2, '\n',
3, '\n',
false))
ret = -1;
if (do_test_get_hex("12345678ABCDEF90;a;b",
0x12345678abcdef90, ';',
0xa, ';',
0xb, -1,
true))
ret = -1;
if (do_test_get_hex("0x1x2x",
0, 'x',
1, 'x',
2, 'x',
false))
ret = -1;
if (do_test_get_hex("x1x",
0, -2,
1, 'x',
0, -1,
true))
ret = -1;
if (do_test_get_hex("10000000000000000000000000000abcdefgh99i",
0xabcdef, 'g',
0, -2,
0x99, 'i',
false))
ret = -1;
return ret;
}
static int do_test_get_dec(const char *test_string,
__u64 val1, int ch1,
__u64 val2, int ch2,
__u64 val3, int ch3,
bool end_eof)
{
char path[PATH_MAX];
struct io io;
int ch, ret = 0;
__u64 dec;
if (setup_test(path, test_string, 4, &io))
return -1;
ch = io__get_dec(&io, &dec);
EXPECT_EQUAL64(dec, val1);
EXPECT_EQUAL(ch, ch1);
ch = io__get_dec(&io, &dec);
EXPECT_EQUAL64(dec, val2);
EXPECT_EQUAL(ch, ch2);
ch = io__get_dec(&io, &dec);
EXPECT_EQUAL64(dec, val3);
EXPECT_EQUAL(ch, ch3);
EXPECT_EQUAL(io.eof, end_eof);
cleanup_test(path, &io);
return ret;
}
static int test_get_dec(void)
{
int ret = 0;
if (do_test_get_dec("12345678abcdef90",
12345678, 'a',
0, -2,
0, -2,
false))
ret = -1;
if (do_test_get_dec("1\n2\n3\n",
1, '\n',
2, '\n',
3, '\n',
false))
ret = -1;
if (do_test_get_dec("12345678;1;2",
12345678, ';',
1, ';',
2, -1,
true))
ret = -1;
if (do_test_get_dec("0x1x2x",
0, 'x',
1, 'x',
2, 'x',
false))
ret = -1;
if (do_test_get_dec("x1x",
0, -2,
1, 'x',
0, -1,
true))
ret = -1;
if (do_test_get_dec("10000000000000000000000000000000000000000000000000000000000123456789ab99c",
123456789, 'a',
0, -2,
99, 'c',
false))
ret = -1;
return ret;
}
int test__api_io(struct test *test __maybe_unused,
int subtest __maybe_unused)
{
int ret = 0;
if (test_get_char())
ret = TEST_FAIL;
if (test_get_hex())
ret = TEST_FAIL;
if (test_get_dec())
ret = TEST_FAIL;
return ret;
}

View File

@ -309,6 +309,10 @@ static struct test generic_tests[] = {
.desc = "Test jit_write_elf",
.func = test__jit_write_elf,
},
{
.desc = "Test api io",
.func = test__api_io,
},
{
.desc = "maps__merge_in",
.func = test__maps__merge_in,

Some files were not shown because too many files have changed in this diff Show More