mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-27 14:14:24 +08:00
Perf changes for v6.4:
- Add Intel Granite Rapids support - Add uncore events for Intel SPR IMC PMU - Fix perf IRQ throttling bug Signed-off-by: Ingo Molnar <mingo@kernel.org> -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmRK3GoRHG1pbmdvQGtl cm5lbC5vcmcACgkQEnMQ0APhK1hqghAAwZyNLY8oAu/izkp5DKML7okLa48nh1+K JWsM4GT16ldx6MBqxX4V7tiYfAeo6ydCkd/LbxPklAk4Fmcwt5HWotOEGHab7N7B iTOix481TIa+E7ERuDLU5vS0chtdE5CrVfmBwtkI4WEv0c8vBwQHvRzy6RaoGG+2 XSqwFhkDG7l6N6rIz/V8zJKFo0hNPou0bllYJMM+A+BRdOthKhgXNjM6yuuKXgst TWxByDCoAG59uWvzq3WmKRLk/bJ4VC2lRDFpo01xtPvn1hA/Bs8MDF1940NG8uEh u7aTI1ZyaJJnnxaTk98r/aZIMYlHHWIIKnymtVFT52ZLeKARDt6MmX5ttec16Cd1 a5IXuiWPkiSfGlmN7Q4sK89eg606WQ/i+eIKfNMG0ZruvKFiT0uD+gNUlQvE/4fk 6OgJot/iF2B5+UWcBmEOMYV4jagSWLFkIQ3vGVT6E3Zp8gEa1/R3ek1aSbQeb8Gi kvMjcwthUNOgGIXrFKbdDTN/3seoNmrgIP2wetUzXhVaAOIYz6mpxIcXLc+vNiDh soTRtkLgNTkrVQ8AKp7snKNRLjazL6CqUd+RILH4tM+bshFnyeH6K4Eck+Fo//Dn XtEHry6nJ0ZPhsJPRwFHOoYCAuDQdy5DG7QQ07qXYW9DyJV7ZmefDJYOEjgcip6G f3/sUe7ekRA= =41Sa -----END PGP SIGNATURE----- Merge tag 'perf-core-2023-04-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull perf updates from Ingo Molnar: - Add Intel Granite Rapids support - Add uncore events for Intel SPR IMC PMU - Fix perf IRQ throttling bug * tag 'perf-core-2023-04-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/x86/intel/uncore: Add events for Intel SPR IMC PMU perf/core: Fix hardlockup failure caused by perf throttle perf/x86/cstate: Add Granite Rapids support perf/x86/msr: Add Granite Rapids perf/x86/intel: Add Granite Rapids
This commit is contained in:
commit
7c339778f9
@ -5469,6 +5469,15 @@ pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i)
|
||||
return x86_pmu.pebs ? attr->mode : 0;
|
||||
}
|
||||
|
||||
static umode_t
|
||||
mem_is_visible(struct kobject *kobj, struct attribute *attr, int i)
|
||||
{
|
||||
if (attr == &event_attr_mem_ld_aux.attr.attr)
|
||||
return x86_pmu.flags & PMU_FL_MEM_LOADS_AUX ? attr->mode : 0;
|
||||
|
||||
return pebs_is_visible(kobj, attr, i);
|
||||
}
|
||||
|
||||
static umode_t
|
||||
lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i)
|
||||
{
|
||||
@ -5496,7 +5505,7 @@ static struct attribute_group group_events_td = {
|
||||
|
||||
static struct attribute_group group_events_mem = {
|
||||
.name = "events",
|
||||
.is_visible = pebs_is_visible,
|
||||
.is_visible = mem_is_visible,
|
||||
};
|
||||
|
||||
static struct attribute_group group_events_tsx = {
|
||||
@ -6486,6 +6495,10 @@ __init int intel_pmu_init(void)
|
||||
|
||||
case INTEL_FAM6_SAPPHIRERAPIDS_X:
|
||||
case INTEL_FAM6_EMERALDRAPIDS_X:
|
||||
x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
|
||||
fallthrough;
|
||||
case INTEL_FAM6_GRANITERAPIDS_X:
|
||||
case INTEL_FAM6_GRANITERAPIDS_D:
|
||||
pmem = true;
|
||||
x86_pmu.late_ack = true;
|
||||
memcpy(hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(hw_cache_event_ids));
|
||||
@ -6502,7 +6515,6 @@ __init int intel_pmu_init(void)
|
||||
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
|
||||
x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
|
||||
x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
|
||||
x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
|
||||
|
||||
x86_pmu.hw_config = hsw_hw_config;
|
||||
x86_pmu.get_event_constraints = spr_get_event_constraints;
|
||||
|
@ -678,6 +678,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &icx_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &icx_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &icx_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, &icx_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, &icx_cstates),
|
||||
|
||||
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &icl_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &icl_cstates),
|
||||
|
@ -6068,6 +6068,17 @@ static struct intel_uncore_ops spr_uncore_mmio_ops = {
|
||||
.read_counter = uncore_mmio_read_counter,
|
||||
};
|
||||
|
||||
static struct uncore_event_desc spr_uncore_imc_events[] = {
|
||||
INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x01,umask=0x00"),
|
||||
INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x05,umask=0xcf"),
|
||||
INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
|
||||
INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x05,umask=0xf0"),
|
||||
INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
|
||||
INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
|
||||
{ /* end: all zeroes */ },
|
||||
};
|
||||
|
||||
static struct intel_uncore_type spr_uncore_imc = {
|
||||
SPR_UNCORE_COMMON_FORMAT(),
|
||||
.name = "imc",
|
||||
@ -6075,6 +6086,7 @@ static struct intel_uncore_type spr_uncore_imc = {
|
||||
.fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
|
||||
.fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
|
||||
.ops = &spr_uncore_mmio_ops,
|
||||
.event_descs = spr_uncore_imc_events,
|
||||
};
|
||||
|
||||
static void spr_uncore_pci_enable_event(struct intel_uncore_box *box,
|
||||
|
@ -70,6 +70,8 @@ static bool test_intel(int idx, void *data)
|
||||
case INTEL_FAM6_BROADWELL_X:
|
||||
case INTEL_FAM6_SAPPHIRERAPIDS_X:
|
||||
case INTEL_FAM6_EMERALDRAPIDS_X:
|
||||
case INTEL_FAM6_GRANITERAPIDS_X:
|
||||
case INTEL_FAM6_GRANITERAPIDS_D:
|
||||
|
||||
case INTEL_FAM6_ATOM_SILVERMONT:
|
||||
case INTEL_FAM6_ATOM_SILVERMONT_D:
|
||||
|
@ -9433,8 +9433,8 @@ __perf_event_account_interrupt(struct perf_event *event, int throttle)
|
||||
hwc->interrupts = 1;
|
||||
} else {
|
||||
hwc->interrupts++;
|
||||
if (unlikely(throttle
|
||||
&& hwc->interrupts >= max_samples_per_tick)) {
|
||||
if (unlikely(throttle &&
|
||||
hwc->interrupts > max_samples_per_tick)) {
|
||||
__this_cpu_inc(perf_throttled_count);
|
||||
tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
|
||||
hwc->interrupts = MAX_INTERRUPTS;
|
||||
|
Loading…
Reference in New Issue
Block a user