linux/arch/arm64/kernel/perf_event.c

1132 lines
36 KiB
C
Raw Normal View History

/*
* PMU support
*
* Copyright (C) 2012 ARM Limited
* Author: Will Deacon <will.deacon@arm.com>
*
* This code is based heavily on the ARMv7 perf event code.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <asm/irq_regs.h>
#include <asm/perf_event.h>
#include <asm/sysreg.h>
#include <asm/virt.h>
#include <linux/acpi.h>
#include <linux/of.h>
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
/*
* ARMv8 PMUv3 Performance Events handling code.
* Common event types (some are defined in asm/perf_event.h).
*/
/* At least one of the following is required. */
#define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x08
#define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x1B
/* Common architectural events. */
#define ARMV8_PMUV3_PERFCTR_LD_RETIRED 0x06
#define ARMV8_PMUV3_PERFCTR_ST_RETIRED 0x07
#define ARMV8_PMUV3_PERFCTR_EXC_TAKEN 0x09
#define ARMV8_PMUV3_PERFCTR_EXC_RETURN 0x0A
#define ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED 0x0B
#define ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED 0x0C
#define ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED 0x0D
#define ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED 0x0E
#define ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED 0x0F
#define ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED 0x1C
#define ARMV8_PMUV3_PERFCTR_CHAIN 0x1E
#define ARMV8_PMUV3_PERFCTR_BR_RETIRED 0x21
/* Common microarchitectural events. */
#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL 0x01
#define ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL 0x02
#define ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL 0x05
#define ARMV8_PMUV3_PERFCTR_MEM_ACCESS 0x13
#define ARMV8_PMUV3_PERFCTR_L1I_CACHE 0x14
#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB 0x15
#define ARMV8_PMUV3_PERFCTR_L2D_CACHE 0x16
#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL 0x17
#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB 0x18
#define ARMV8_PMUV3_PERFCTR_BUS_ACCESS 0x19
#define ARMV8_PMUV3_PERFCTR_MEMORY_ERROR 0x1A
#define ARMV8_PMUV3_PERFCTR_BUS_CYCLES 0x1D
#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE 0x1F
#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE 0x20
#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED 0x22
#define ARMV8_PMUV3_PERFCTR_STALL_FRONTEND 0x23
#define ARMV8_PMUV3_PERFCTR_STALL_BACKEND 0x24
#define ARMV8_PMUV3_PERFCTR_L1D_TLB 0x25
#define ARMV8_PMUV3_PERFCTR_L1I_TLB 0x26
#define ARMV8_PMUV3_PERFCTR_L2I_CACHE 0x27
#define ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL 0x28
#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE 0x29
#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL 0x2A
#define ARMV8_PMUV3_PERFCTR_L3D_CACHE 0x2B
#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB 0x2C
#define ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL 0x2D
#define ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL 0x2E
#define ARMV8_PMUV3_PERFCTR_L2D_TLB 0x2F
#define ARMV8_PMUV3_PERFCTR_L2I_TLB 0x30
/* ARMv8 recommended implementation defined event types */
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD 0x40
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR 0x41
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD 0x42
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR 0x43
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_INNER 0x44
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_OUTER 0x45
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_VICTIM 0x46
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_CLEAN 0x47
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_INVAL 0x48
#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD 0x4C
#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR 0x4D
#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD 0x4E
#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR 0x4F
#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_RD 0x50
#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WR 0x51
#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_RD 0x52
#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_WR 0x53
#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_VICTIM 0x56
#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_CLEAN 0x57
#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_INVAL 0x58
#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_RD 0x5C
#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_WR 0x5D
#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_RD 0x5E
#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_WR 0x5F
#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD 0x60
#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR 0x61
#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_SHARED 0x62
#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NOT_SHARED 0x63
#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NORMAL 0x64
#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_PERIPH 0x65
#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_RD 0x66
#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_WR 0x67
#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LD_SPEC 0x68
#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_ST_SPEC 0x69
#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LDST_SPEC 0x6A
#define ARMV8_IMPDEF_PERFCTR_LDREX_SPEC 0x6C
#define ARMV8_IMPDEF_PERFCTR_STREX_PASS_SPEC 0x6D
#define ARMV8_IMPDEF_PERFCTR_STREX_FAIL_SPEC 0x6E
#define ARMV8_IMPDEF_PERFCTR_STREX_SPEC 0x6F
#define ARMV8_IMPDEF_PERFCTR_LD_SPEC 0x70
#define ARMV8_IMPDEF_PERFCTR_ST_SPEC 0x71
#define ARMV8_IMPDEF_PERFCTR_LDST_SPEC 0x72
#define ARMV8_IMPDEF_PERFCTR_DP_SPEC 0x73
#define ARMV8_IMPDEF_PERFCTR_ASE_SPEC 0x74
#define ARMV8_IMPDEF_PERFCTR_VFP_SPEC 0x75
#define ARMV8_IMPDEF_PERFCTR_PC_WRITE_SPEC 0x76
#define ARMV8_IMPDEF_PERFCTR_CRYPTO_SPEC 0x77
#define ARMV8_IMPDEF_PERFCTR_BR_IMMED_SPEC 0x78
#define ARMV8_IMPDEF_PERFCTR_BR_RETURN_SPEC 0x79
#define ARMV8_IMPDEF_PERFCTR_BR_INDIRECT_SPEC 0x7A
#define ARMV8_IMPDEF_PERFCTR_ISB_SPEC 0x7C
#define ARMV8_IMPDEF_PERFCTR_DSB_SPEC 0x7D
#define ARMV8_IMPDEF_PERFCTR_DMB_SPEC 0x7E
#define ARMV8_IMPDEF_PERFCTR_EXC_UNDEF 0x81
#define ARMV8_IMPDEF_PERFCTR_EXC_SVC 0x82
#define ARMV8_IMPDEF_PERFCTR_EXC_PABORT 0x83
#define ARMV8_IMPDEF_PERFCTR_EXC_DABORT 0x84
#define ARMV8_IMPDEF_PERFCTR_EXC_IRQ 0x86
#define ARMV8_IMPDEF_PERFCTR_EXC_FIQ 0x87
#define ARMV8_IMPDEF_PERFCTR_EXC_SMC 0x88
#define ARMV8_IMPDEF_PERFCTR_EXC_HVC 0x8A
#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_PABORT 0x8B
#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_DABORT 0x8C
#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_OTHER 0x8D
#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_IRQ 0x8E
#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_FIQ 0x8F
#define ARMV8_IMPDEF_PERFCTR_RC_LD_SPEC 0x90
#define ARMV8_IMPDEF_PERFCTR_RC_ST_SPEC 0x91
#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_RD 0xA0
#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WR 0xA1
#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_RD 0xA2
#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_WR 0xA3
#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_VICTIM 0xA6
#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_CLEAN 0xA7
#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_INVAL 0xA8
/* ARMv8 Cortex-A53 specific event types. */
#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
/* ARMv8 Cavium ThunderX specific event types. */
#define ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST 0xE9
#define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS 0xEA
#define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS 0xEB
#define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS 0xEC
#define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS 0xED
/* PMUv3 HW events mapping. */
/*
* ARMv8 Architectural defined events, not all of these may
* be supported on any given implementation. Undefined events will
* be disabled at run-time.
*/
static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
PERF_MAP_ALL_UNSUPPORTED,
[PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND,
};
static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
PERF_CACHE_MAP_ALL_UNSUPPORTED,
[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
[C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE,
[C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL,
[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB,
[C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB,
[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
[C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
};
static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
PERF_CACHE_MAP_ALL_UNSUPPORTED,
[C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREF_LINEFILL,
[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
};
static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
PERF_CACHE_MAP_ALL_UNSUPPORTED,
[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
[C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
};
static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
PERF_CACHE_MAP_ALL_UNSUPPORTED,
[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
};
static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
PERF_CACHE_MAP_ALL_UNSUPPORTED,
[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
[C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST,
[C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS,
[C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS,
[C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS,
[C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS,
[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
[C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
};
static const unsigned armv8_vulcan_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
PERF_CACHE_MAP_ALL_UNSUPPORTED,
[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
[C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
[C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
};
static ssize_t
armv8pmu_events_sysfs_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
return sprintf(page, "event=0x%03llx\n", pmu_attr->id);
}
#define ARMV8_EVENT_ATTR_RESOLVE(m) #m
#define ARMV8_EVENT_ATTR(name, config) \
PMU_EVENT_ATTR(name, armv8_event_attr_##name, \
config, armv8pmu_events_sysfs_show)
ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR);
ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL);
ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL);
ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL);
ARMV8_EVENT_ATTR(l1d_cache, ARMV8_PMUV3_PERFCTR_L1D_CACHE);
ARMV8_EVENT_ATTR(l1d_tlb_refill, ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL);
ARMV8_EVENT_ATTR(ld_retired, ARMV8_PMUV3_PERFCTR_LD_RETIRED);
ARMV8_EVENT_ATTR(st_retired, ARMV8_PMUV3_PERFCTR_ST_RETIRED);
ARMV8_EVENT_ATTR(inst_retired, ARMV8_PMUV3_PERFCTR_INST_RETIRED);
ARMV8_EVENT_ATTR(exc_taken, ARMV8_PMUV3_PERFCTR_EXC_TAKEN);
ARMV8_EVENT_ATTR(exc_return, ARMV8_PMUV3_PERFCTR_EXC_RETURN);
ARMV8_EVENT_ATTR(cid_write_retired, ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED);
ARMV8_EVENT_ATTR(pc_write_retired, ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED);
ARMV8_EVENT_ATTR(br_immed_retired, ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED);
ARMV8_EVENT_ATTR(br_return_retired, ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED);
ARMV8_EVENT_ATTR(unaligned_ldst_retired, ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED);
ARMV8_EVENT_ATTR(br_mis_pred, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED);
ARMV8_EVENT_ATTR(cpu_cycles, ARMV8_PMUV3_PERFCTR_CPU_CYCLES);
ARMV8_EVENT_ATTR(br_pred, ARMV8_PMUV3_PERFCTR_BR_PRED);
ARMV8_EVENT_ATTR(mem_access, ARMV8_PMUV3_PERFCTR_MEM_ACCESS);
ARMV8_EVENT_ATTR(l1i_cache, ARMV8_PMUV3_PERFCTR_L1I_CACHE);
ARMV8_EVENT_ATTR(l1d_cache_wb, ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB);
ARMV8_EVENT_ATTR(l2d_cache, ARMV8_PMUV3_PERFCTR_L2D_CACHE);
ARMV8_EVENT_ATTR(l2d_cache_refill, ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL);
ARMV8_EVENT_ATTR(l2d_cache_wb, ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB);
ARMV8_EVENT_ATTR(bus_access, ARMV8_PMUV3_PERFCTR_BUS_ACCESS);
ARMV8_EVENT_ATTR(memory_error, ARMV8_PMUV3_PERFCTR_MEMORY_ERROR);
ARMV8_EVENT_ATTR(inst_spec, ARMV8_PMUV3_PERFCTR_INST_SPEC);
ARMV8_EVENT_ATTR(ttbr_write_retired, ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED);
ARMV8_EVENT_ATTR(bus_cycles, ARMV8_PMUV3_PERFCTR_BUS_CYCLES);
/* Don't expose the chain event in /sys, since it's useless in isolation */
ARMV8_EVENT_ATTR(l1d_cache_allocate, ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE);
ARMV8_EVENT_ATTR(l2d_cache_allocate, ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE);
ARMV8_EVENT_ATTR(br_retired, ARMV8_PMUV3_PERFCTR_BR_RETIRED);
ARMV8_EVENT_ATTR(br_mis_pred_retired, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED);
ARMV8_EVENT_ATTR(stall_frontend, ARMV8_PMUV3_PERFCTR_STALL_FRONTEND);
ARMV8_EVENT_ATTR(stall_backend, ARMV8_PMUV3_PERFCTR_STALL_BACKEND);
ARMV8_EVENT_ATTR(l1d_tlb, ARMV8_PMUV3_PERFCTR_L1D_TLB);
ARMV8_EVENT_ATTR(l1i_tlb, ARMV8_PMUV3_PERFCTR_L1I_TLB);
ARMV8_EVENT_ATTR(l2i_cache, ARMV8_PMUV3_PERFCTR_L2I_CACHE);
ARMV8_EVENT_ATTR(l2i_cache_refill, ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL);
ARMV8_EVENT_ATTR(l3d_cache_allocate, ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE);
ARMV8_EVENT_ATTR(l3d_cache_refill, ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL);
ARMV8_EVENT_ATTR(l3d_cache, ARMV8_PMUV3_PERFCTR_L3D_CACHE);
ARMV8_EVENT_ATTR(l3d_cache_wb, ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB);
ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL);
ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL);
ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB);
ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB);
static struct attribute *armv8_pmuv3_event_attrs[] = {
&armv8_event_attr_sw_incr.attr.attr,
&armv8_event_attr_l1i_cache_refill.attr.attr,
&armv8_event_attr_l1i_tlb_refill.attr.attr,
&armv8_event_attr_l1d_cache_refill.attr.attr,
&armv8_event_attr_l1d_cache.attr.attr,
&armv8_event_attr_l1d_tlb_refill.attr.attr,
&armv8_event_attr_ld_retired.attr.attr,
&armv8_event_attr_st_retired.attr.attr,
&armv8_event_attr_inst_retired.attr.attr,
&armv8_event_attr_exc_taken.attr.attr,
&armv8_event_attr_exc_return.attr.attr,
&armv8_event_attr_cid_write_retired.attr.attr,
&armv8_event_attr_pc_write_retired.attr.attr,
&armv8_event_attr_br_immed_retired.attr.attr,
&armv8_event_attr_br_return_retired.attr.attr,
&armv8_event_attr_unaligned_ldst_retired.attr.attr,
&armv8_event_attr_br_mis_pred.attr.attr,
&armv8_event_attr_cpu_cycles.attr.attr,
&armv8_event_attr_br_pred.attr.attr,
&armv8_event_attr_mem_access.attr.attr,
&armv8_event_attr_l1i_cache.attr.attr,
&armv8_event_attr_l1d_cache_wb.attr.attr,
&armv8_event_attr_l2d_cache.attr.attr,
&armv8_event_attr_l2d_cache_refill.attr.attr,
&armv8_event_attr_l2d_cache_wb.attr.attr,
&armv8_event_attr_bus_access.attr.attr,
&armv8_event_attr_memory_error.attr.attr,
&armv8_event_attr_inst_spec.attr.attr,
&armv8_event_attr_ttbr_write_retired.attr.attr,
&armv8_event_attr_bus_cycles.attr.attr,
&armv8_event_attr_l1d_cache_allocate.attr.attr,
&armv8_event_attr_l2d_cache_allocate.attr.attr,
&armv8_event_attr_br_retired.attr.attr,
&armv8_event_attr_br_mis_pred_retired.attr.attr,
&armv8_event_attr_stall_frontend.attr.attr,
&armv8_event_attr_stall_backend.attr.attr,
&armv8_event_attr_l1d_tlb.attr.attr,
&armv8_event_attr_l1i_tlb.attr.attr,
&armv8_event_attr_l2i_cache.attr.attr,
&armv8_event_attr_l2i_cache_refill.attr.attr,
&armv8_event_attr_l3d_cache_allocate.attr.attr,
&armv8_event_attr_l3d_cache_refill.attr.attr,
&armv8_event_attr_l3d_cache.attr.attr,
&armv8_event_attr_l3d_cache_wb.attr.attr,
&armv8_event_attr_l2d_tlb_refill.attr.attr,
&armv8_event_attr_l2i_tlb_refill.attr.attr,
&armv8_event_attr_l2d_tlb.attr.attr,
&armv8_event_attr_l2i_tlb.attr.attr,
NULL,
};
static umode_t
armv8pmu_event_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int unused)
{
struct device *dev = kobj_to_dev(kobj);
struct pmu *pmu = dev_get_drvdata(dev);
struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
if (test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
return attr->mode;
return 0;
}
static struct attribute_group armv8_pmuv3_events_attr_group = {
.name = "events",
.attrs = armv8_pmuv3_event_attrs,
.is_visible = armv8pmu_event_attr_is_visible,
};
PMU_FORMAT_ATTR(event, "config:0-15");
static struct attribute *armv8_pmuv3_format_attrs[] = {
&format_attr_event.attr,
NULL,
};
static struct attribute_group armv8_pmuv3_format_attr_group = {
.name = "format",
.attrs = armv8_pmuv3_format_attrs,
};
/*
* Perf Events' indices
*/
#define ARMV8_IDX_CYCLE_COUNTER 0
#define ARMV8_IDX_COUNTER0 1
#define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
(ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
/*
* ARMv8 low level PMU access
*/
/*
* Perf Event to low level counters mapping
*/
#define ARMV8_IDX_TO_COUNTER(x) \
(((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
static inline u32 armv8pmu_pmcr_read(void)
{
return read_sysreg(pmcr_el0);
}
static inline void armv8pmu_pmcr_write(u32 val)
{
val &= ARMV8_PMU_PMCR_MASK;
isb();
write_sysreg(val, pmcr_el0);
}
static inline int armv8pmu_has_overflowed(u32 pmovsr)
{
return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
}
static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
{
return idx >= ARMV8_IDX_CYCLE_COUNTER &&
idx <= ARMV8_IDX_COUNTER_LAST(cpu_pmu);
}
static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
{
return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
}
static inline int armv8pmu_select_counter(int idx)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
write_sysreg(counter, pmselr_el0);
isb();
return idx;
}
static inline u32 armv8pmu_read_counter(struct perf_event *event)
{
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
u32 value = 0;
if (!armv8pmu_counter_valid(cpu_pmu, idx))
pr_err("CPU%u reading wrong counter %d\n",
smp_processor_id(), idx);
else if (idx == ARMV8_IDX_CYCLE_COUNTER)
value = read_sysreg(pmccntr_el0);
else if (armv8pmu_select_counter(idx) == idx)
value = read_sysreg(pmxevcntr_el0);
return value;
}
static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
{
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
if (!armv8pmu_counter_valid(cpu_pmu, idx))
pr_err("CPU%u writing wrong counter %d\n",
smp_processor_id(), idx);
else if (idx == ARMV8_IDX_CYCLE_COUNTER) {
/*
* Set the upper 32bits as this is a 64bit counter but we only
* count using the lower 32bits and we want an interrupt when
* it overflows.
*/
u64 value64 = 0xffffffff00000000ULL | value;
write_sysreg(value64, pmccntr_el0);
} else if (armv8pmu_select_counter(idx) == idx)
write_sysreg(value, pmxevcntr_el0);
}
static inline void armv8pmu_write_evtype(int idx, u32 val)
{
if (armv8pmu_select_counter(idx) == idx) {
val &= ARMV8_PMU_EVTYPE_MASK;
write_sysreg(val, pmxevtyper_el0);
}
}
static inline int armv8pmu_enable_counter(int idx)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
write_sysreg(BIT(counter), pmcntenset_el0);
return idx;
}
static inline int armv8pmu_disable_counter(int idx)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
write_sysreg(BIT(counter), pmcntenclr_el0);
return idx;
}
static inline int armv8pmu_enable_intens(int idx)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
write_sysreg(BIT(counter), pmintenset_el1);
return idx;
}
static inline int armv8pmu_disable_intens(int idx)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
write_sysreg(BIT(counter), pmintenclr_el1);
isb();
/* Clear the overflow flag in case an interrupt is pending. */
write_sysreg(BIT(counter), pmovsclr_el0);
isb();
return idx;
}
static inline u32 armv8pmu_getreset_flags(void)
{
u32 value;
/* Read */
value = read_sysreg(pmovsclr_el0);
/* Write to clear flags */
value &= ARMV8_PMU_OVSR_MASK;
write_sysreg(value, pmovsclr_el0);
return value;
}
static void armv8pmu_enable_event(struct perf_event *event)
{
unsigned long flags;
struct hw_perf_event *hwc = &event->hw;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
int idx = hwc->idx;
/*
* Enable counter and interrupt, and set the counter to count
* the event that we're interested in.
*/
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/*
* Disable counter
*/
armv8pmu_disable_counter(idx);
/*
* Set event (if destined for PMNx counters).
*/
armv8pmu_write_evtype(idx, hwc->config_base);
/*
* Enable interrupt for this counter
*/
armv8pmu_enable_intens(idx);
/*
* Enable counter
*/
armv8pmu_enable_counter(idx);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void armv8pmu_disable_event(struct perf_event *event)
{
unsigned long flags;
struct hw_perf_event *hwc = &event->hw;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
int idx = hwc->idx;
/*
* Disable counter and interrupt
*/
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/*
* Disable counter
*/
armv8pmu_disable_counter(idx);
/*
* Disable interrupt for this counter
*/
armv8pmu_disable_intens(idx);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
{
u32 pmovsr;
struct perf_sample_data data;
struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
struct pt_regs *regs;
int idx;
/*
* Get and reset the IRQ flags
*/
pmovsr = armv8pmu_getreset_flags();
/*
* Did an overflow occur?
*/
if (!armv8pmu_has_overflowed(pmovsr))
return IRQ_NONE;
/*
* Handle the counter(s) overflow(s)
*/
regs = get_irq_regs();
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
/* Ignore if we don't have an event. */
if (!event)
continue;
/*
* We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it.
*/
if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
continue;
hwc = &event->hw;
armpmu_event_update(event);
perf_sample_data_init(&data, 0, hwc->last_period);
if (!armpmu_event_set_period(event))
continue;
if (perf_event_overflow(event, &data, regs))
cpu_pmu->disable(event);
}
/*
* Handle the pending perf events.
*
* Note: this call *must* be run with interrupts disabled. For
* platforms that can have the PMU interrupts raised as an NMI, this
* will not work.
*/
irq_work_run();
return IRQ_HANDLED;
}
static void armv8pmu_start(struct arm_pmu *cpu_pmu)
{
unsigned long flags;
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Enable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
{
unsigned long flags;
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct perf_event *event)
{
int idx;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
/* Always prefer to place a cycle counter into the cycle counter. */
if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
return ARMV8_IDX_CYCLE_COUNTER;
}
/*
* Otherwise use events counters
*/
for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
if (!test_and_set_bit(idx, cpuc->used_mask))
return idx;
}
/* The counters are all in use. */
return -EAGAIN;
}
/*
* Add an event filter to a given event. This will only work for PMUv2 PMUs.
*/
static int armv8pmu_set_event_filter(struct hw_perf_event *event,
struct perf_event_attr *attr)
{
unsigned long config_base = 0;
if (attr->exclude_idle)
return -EPERM;
/*
* If we're running in hyp mode, then we *are* the hypervisor.
* Therefore we ignore exclude_hv in this configuration, since
* there's no hypervisor to sample anyway. This is consistent
* with other architectures (x86 and Power).
*/
if (is_kernel_in_hyp_mode()) {
if (!attr->exclude_kernel)
config_base |= ARMV8_PMU_INCLUDE_EL2;
} else {
if (attr->exclude_kernel)
config_base |= ARMV8_PMU_EXCLUDE_EL1;
if (!attr->exclude_hv)
config_base |= ARMV8_PMU_INCLUDE_EL2;
}
if (attr->exclude_user)
config_base |= ARMV8_PMU_EXCLUDE_EL0;
/*
* Install the filter into config_base as this is used to
* construct the event type.
*/
event->config_base = config_base;
return 0;
}
static void armv8pmu_reset(void *info)
{
struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
u32 idx, nb_cnt = cpu_pmu->num_events;
/* The counter and interrupt enable registers are unknown at reset. */
for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
armv8pmu_disable_counter(idx);
armv8pmu_disable_intens(idx);
}
/*
* Initialize & Reset PMNC. Request overflow interrupt for
* 64 bit cycle counter but cheat in armv8pmu_write_counter().
*/
armv8pmu_pmcr_write(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C |
ARMV8_PMU_PMCR_LC);
}
static int __armv8_pmuv3_map_event(struct perf_event *event,
const unsigned (*extra_event_map)
[PERF_COUNT_HW_MAX],
const unsigned (*extra_cache_map)
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX])
{
int hw_event_id;
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map,
&armv8_pmuv3_perf_cache_map,
ARMV8_PMU_EVTYPE_EVENT);
/* Onl expose micro/arch events supported by this PMU */
if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
&& test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
return hw_event_id;
}
return armpmu_map_event(event, extra_event_map, extra_cache_map,
ARMV8_PMU_EVTYPE_EVENT);
}
static int armv8_pmuv3_map_event(struct perf_event *event)
{
return __armv8_pmuv3_map_event(event, NULL, NULL);
}
static int armv8_a53_map_event(struct perf_event *event)
{
return __armv8_pmuv3_map_event(event, NULL, &armv8_a53_perf_cache_map);
}
static int armv8_a57_map_event(struct perf_event *event)
{
return __armv8_pmuv3_map_event(event, NULL, &armv8_a57_perf_cache_map);
}
static int armv8_a73_map_event(struct perf_event *event)
{
return __armv8_pmuv3_map_event(event, NULL, &armv8_a73_perf_cache_map);
}
static int armv8_thunder_map_event(struct perf_event *event)
{
return __armv8_pmuv3_map_event(event, NULL,
&armv8_thunder_perf_cache_map);
}
static int armv8_vulcan_map_event(struct perf_event *event)
{
return __armv8_pmuv3_map_event(event, NULL,
&armv8_vulcan_perf_cache_map);
}
struct armv8pmu_probe_info {
struct arm_pmu *pmu;
bool present;
};
static void __armv8pmu_probe_pmu(void *info)
{
struct armv8pmu_probe_info *probe = info;
struct arm_pmu *cpu_pmu = probe->pmu;
u64 dfr0;
u32 pmceid[2];
int pmuver;
dfr0 = read_sysreg(id_aa64dfr0_el1);
pmuver = cpuid_feature_extract_signed_field(dfr0,
ID_AA64DFR0_PMUVER_SHIFT);
if (pmuver < 1)
return;
probe->present = true;
/* Read the nb of CNTx counters supported from PMNC */
cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT)
& ARMV8_PMU_PMCR_N_MASK;
/* Add the CPU cycles counter */
cpu_pmu->num_events += 1;
pmceid[0] = read_sysreg(pmceid0_el0);
pmceid[1] = read_sysreg(pmceid1_el0);
bitmap_from_u32array(cpu_pmu->pmceid_bitmap,
ARMV8_PMUV3_MAX_COMMON_EVENTS, pmceid,
ARRAY_SIZE(pmceid));
}
static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
{
struct armv8pmu_probe_info probe = {
.pmu = cpu_pmu,
.present = false,
};
int ret;
ret = smp_call_function_any(&cpu_pmu->supported_cpus,
__armv8pmu_probe_pmu,
&probe, 1);
if (ret)
return ret;
return probe.present ? 0 : -ENODEV;
}
static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
{
int ret = armv8pmu_probe_pmu(cpu_pmu);
if (ret)
return ret;
cpu_pmu->handle_irq = armv8pmu_handle_irq,
cpu_pmu->enable = armv8pmu_enable_event,
cpu_pmu->disable = armv8pmu_disable_event,
cpu_pmu->read_counter = armv8pmu_read_counter,
cpu_pmu->write_counter = armv8pmu_write_counter,
cpu_pmu->get_event_idx = armv8pmu_get_event_idx,
cpu_pmu->start = armv8pmu_start,
cpu_pmu->stop = armv8pmu_stop,
cpu_pmu->reset = armv8pmu_reset,
cpu_pmu->max_period = (1LLU << 32) - 1,
cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
return 0;
}
static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
{
int ret = armv8_pmu_init(cpu_pmu);
if (ret)
return ret;
cpu_pmu->name = "armv8_pmuv3";
cpu_pmu->map_event = armv8_pmuv3_map_event;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
&armv8_pmuv3_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
&armv8_pmuv3_format_attr_group;
return 0;
}
static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
{
int ret = armv8_pmu_init(cpu_pmu);
if (ret)
return ret;
cpu_pmu->name = "armv8_cortex_a35";
cpu_pmu->map_event = armv8_a53_map_event;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
&armv8_pmuv3_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
&armv8_pmuv3_format_attr_group;
return 0;
}
static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
{
int ret = armv8_pmu_init(cpu_pmu);
if (ret)
return ret;
cpu_pmu->name = "armv8_cortex_a53";
cpu_pmu->map_event = armv8_a53_map_event;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
&armv8_pmuv3_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
&armv8_pmuv3_format_attr_group;
return 0;
}
static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
{
int ret = armv8_pmu_init(cpu_pmu);
if (ret)
return ret;
cpu_pmu->name = "armv8_cortex_a57";
cpu_pmu->map_event = armv8_a57_map_event;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
&armv8_pmuv3_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
&armv8_pmuv3_format_attr_group;
return 0;
}
static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
{
int ret = armv8_pmu_init(cpu_pmu);
if (ret)
return ret;
cpu_pmu->name = "armv8_cortex_a72";
cpu_pmu->map_event = armv8_a57_map_event;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
&armv8_pmuv3_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
&armv8_pmuv3_format_attr_group;
return 0;
}
static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
{
int ret = armv8_pmu_init(cpu_pmu);
if (ret)
return ret;
cpu_pmu->name = "armv8_cortex_a73";
cpu_pmu->map_event = armv8_a73_map_event;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
&armv8_pmuv3_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
&armv8_pmuv3_format_attr_group;
return 0;
}
static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
{
int ret = armv8_pmu_init(cpu_pmu);
if (ret)
return ret;
cpu_pmu->name = "armv8_cavium_thunder";
cpu_pmu->map_event = armv8_thunder_map_event;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
&armv8_pmuv3_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
&armv8_pmuv3_format_attr_group;
return 0;
}
static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
{
int ret = armv8_pmu_init(cpu_pmu);
if (ret)
return ret;
cpu_pmu->name = "armv8_brcm_vulcan";
cpu_pmu->map_event = armv8_vulcan_map_event;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
&armv8_pmuv3_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
&armv8_pmuv3_format_attr_group;
return 0;
}
static const struct of_device_id armv8_pmu_of_device_ids[] = {
{.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init},
{.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init},
{.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
{.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
{.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init},
{.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init},
{.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init},
{.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init},
{},
};
static int armv8_pmu_device_probe(struct platform_device *pdev)
{
return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
}
static struct platform_driver armv8_pmu_driver = {
.driver = {
.name = ARMV8_PMU_PDEV_NAME,
.of_match_table = armv8_pmu_of_device_ids,
},
.probe = armv8_pmu_device_probe,
};
static int __init armv8_pmu_driver_init(void)
{
if (acpi_disabled)
return platform_driver_register(&armv8_pmu_driver);
else
return arm_pmu_acpi_probe(armv8_pmuv3_init);
}
device_initcall(armv8_pmu_driver_init)