2019-05-19 20:08:55 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2011-08-31 07:41:05 +08:00
|
|
|
#include <linux/perf_event.h>
|
2012-02-29 21:57:32 +08:00
|
|
|
#include <linux/export.h>
|
2011-08-31 07:41:05 +08:00
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/slab.h>
|
x86/perf/amd: Resolve race condition when disabling PMC
On AMD processors, the detection of an overflowed counter in the NMI
handler relies on the current value of the counter. So, for example, to
check for overflow on a 48 bit counter, bit 47 is checked to see if it
is 1 (not overflowed) or 0 (overflowed).
There is currently a race condition present when disabling and then
updating the PMC. Increased NMI latency in newer AMD processors makes this
race condition more pronounced. If the counter value has overflowed, it is
possible to update the PMC value before the NMI handler can run. The
updated PMC value is not an overflowed value, so when the perf NMI handler
does run, it will not find an overflowed counter. This may appear as an
unknown NMI resulting in either a panic or a series of messages, depending
on how the kernel is configured.
To eliminate this race condition, the PMC value must be checked after
disabling the counter. Add an AMD function, amd_pmu_disable_all(), that
will wait for the NMI handler to reset any active and overflowed counter
after calling x86_pmu_disable_all().
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: <stable@vger.kernel.org> # 4.14.x-
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Link: https://lkml.kernel.org/r/Message-ID:
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-04-02 23:21:14 +08:00
|
|
|
#include <linux/delay.h>
|
2019-08-02 02:57:41 +08:00
|
|
|
#include <linux/jiffies.h>
|
2011-09-28 01:00:40 +08:00
|
|
|
#include <asm/apicdef.h>
|
2019-04-02 23:21:18 +08:00
|
|
|
#include <asm/nmi.h>
|
2011-08-31 07:41:05 +08:00
|
|
|
|
2016-02-10 17:55:23 +08:00
|
|
|
#include "../perf_event.h"
|
2010-02-26 19:05:05 +08:00
|
|
|
|
2019-08-02 02:57:41 +08:00
|
|
|
static DEFINE_PER_CPU(unsigned long, perf_nmi_tstamp);
|
|
|
|
static unsigned long perf_nmi_window;
|
x86/perf/amd: Resolve NMI latency issues for active PMCs
On AMD processors, the detection of an overflowed PMC counter in the NMI
handler relies on the current value of the PMC. So, for example, to check
for overflow on a 48-bit counter, bit 47 is checked to see if it is 1 (not
overflowed) or 0 (overflowed).
When the perf NMI handler executes it does not know in advance which PMC
counters have overflowed. As such, the NMI handler will process all active
PMC counters that have overflowed. NMI latency in newer AMD processors can
result in multiple overflowed PMC counters being processed in one NMI and
then a subsequent NMI, that does not appear to be a back-to-back NMI, not
finding any PMC counters that have overflowed. This may appear to be an
unhandled NMI resulting in either a panic or a series of messages,
depending on how the kernel was configured.
To mitigate this issue, add an AMD handle_irq callback function,
amd_pmu_handle_irq(), that will invoke the common x86_pmu_handle_irq()
function and upon return perform some additional processing that will
indicate if the NMI has been handled or would have been handled had an
earlier NMI not handled the overflowed PMC. Using a per-CPU variable, a
minimum value of the number of active PMCs or 2 will be set whenever a
PMC is active. This is used to indicate the possible number of NMIs that
can still occur. The value of 2 is used for when an NMI does not arrive
at the LAPIC in time to be collapsed into an already pending NMI. Each
time the function is called without having handled an overflowed counter,
the per-CPU value is checked. If the value is non-zero, it is decremented
and the NMI indicates that it handled the NMI. If the value is zero, then
the NMI indicates that it did not handle the NMI.
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: <stable@vger.kernel.org> # 4.14.x-
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Link: https://lkml.kernel.org/r/Message-ID:
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-04-02 23:21:16 +08:00
|
|
|
|
2010-03-29 19:09:53 +08:00
|
|
|
static __initconst const u64 amd_hw_cache_event_ids
|
2010-02-26 19:05:05 +08:00
|
|
|
[PERF_COUNT_HW_CACHE_MAX]
|
|
|
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
|
|
|
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
|
|
|
{
|
|
|
|
[ C(L1D) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
|
2011-04-16 08:27:53 +08:00
|
|
|
[ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */
|
2010-02-26 19:05:05 +08:00
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
2015-12-10 00:34:45 +08:00
|
|
|
[ C(RESULT_ACCESS) ] = 0,
|
2010-02-26 19:05:05 +08:00
|
|
|
[ C(RESULT_MISS) ] = 0,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(L1I ) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
|
|
|
|
[ C(RESULT_MISS) ] = 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(LL ) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
|
|
|
|
[ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
|
|
|
|
[ C(RESULT_MISS) ] = 0,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0,
|
|
|
|
[ C(RESULT_MISS) ] = 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(DTLB) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
|
2010-10-15 21:15:01 +08:00
|
|
|
[ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
|
2010-02-26 19:05:05 +08:00
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0,
|
|
|
|
[ C(RESULT_MISS) ] = 0,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0,
|
|
|
|
[ C(RESULT_MISS) ] = 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(ITLB) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
|
2010-10-15 21:15:01 +08:00
|
|
|
[ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
|
2010-02-26 19:05:05 +08:00
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(BPU ) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
|
|
|
|
[ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
},
|
2011-04-23 05:37:06 +08:00
|
|
|
[ C(NODE) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */
|
|
|
|
[ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
},
|
2010-02-26 19:05:05 +08:00
|
|
|
};
|
|
|
|
|
perf/x86/amd: Update generic hardware cache events for Family 17h
Add a new amd_hw_cache_event_ids_f17h assignment structure set
for AMD families 17h and above, since a lot has changed. Specifically:
L1 Data Cache
The data cache access counter remains the same on Family 17h.
For DC misses, PMCx041's definition changes with Family 17h,
so instead we use the L2 cache accesses from L1 data cache
misses counter (PMCx060,umask=0xc8).
For DC hardware prefetch events, Family 17h breaks compatibility
for PMCx067 "Data Prefetcher", so instead, we use PMCx05a "Hardware
Prefetch DC Fills."
L1 Instruction Cache
PMCs 0x80 and 0x81 (32-byte IC fetches and misses) are backward
compatible on Family 17h.
For prefetches, we remove the erroneous PMCx04B assignment which
counts how many software data cache prefetch load instructions were
dispatched.
LL - Last Level Cache
Removing PMCs 7D, 7E, and 7F assignments, as they do not exist
on Family 17h, where the last level cache is L3. L3 counters
can be accessed using the existing AMD Uncore driver.
Data TLB
On Intel machines, data TLB accesses ("dTLB-loads") are assigned
to counters that count load/store instructions retired. This
is inconsistent with instruction TLB accesses, where Intel
implementations report iTLB misses that hit in the STLB.
Ideally, dTLB-loads would count higher level dTLB misses that hit
in lower level TLBs, and dTLB-load-misses would report those
that also missed in those lower-level TLBs, therefore causing
a page table walk. That would be consistent with instruction
TLB operation, remove the redundancy between dTLB-loads and
L1-dcache-loads, and prevent perf from producing artificially
low percentage ratios, i.e. the "0.01%" below:
42,550,869 L1-dcache-loads
41,591,860 dTLB-loads
4,802 dTLB-load-misses # 0.01% of all dTLB cache hits
7,283,682 L1-dcache-stores
7,912,392 dTLB-stores
310 dTLB-store-misses
On AMD Families prior to 17h, the "Data Cache Accesses" counter is
used, which is slightly better than load/store instructions retired,
but still counts in terms of individual load/store operations
instead of TLB operations.
So, for AMD Families 17h and higher, this patch assigns "dTLB-loads"
to a counter for L1 dTLB misses that hit in the L2 dTLB, and
"dTLB-load-misses" to a counter for L1 DTLB misses that caused
L2 DTLB misses and therefore also caused page table walks. This
results in a much more accurate view of data TLB performance:
60,961,781 L1-dcache-loads
4,601 dTLB-loads
963 dTLB-load-misses # 20.93% of all dTLB cache hits
Note that for all AMD families, data loads and stores are combined
in a single accesses counter, so no 'L1-dcache-stores' are reported
separately, and stores are counted with loads in 'L1-dcache-loads'.
Also note that the "% of all dTLB cache hits" string is misleading
because (a) "dTLB cache": although TLBs can be considered caches for
page tables, in this context, it can be misinterpreted as data cache
hits because the figures are similar (at least on Intel), and (b) not
all those loads (technically accesses) technically "hit" at that
hardware level. "% of all dTLB accesses" would be more clear/accurate.
Instruction TLB
On Intel machines, 'iTLB-loads' measure iTLB misses that hit in the
STLB, and 'iTLB-load-misses' measure iTLB misses that also missed in
the STLB and completed a page table walk.
For AMD Family 17h and above, for 'iTLB-loads' we replace the
erroneous instruction cache fetches counter with PMCx084
"L1 ITLB Miss, L2 ITLB Hit".
For 'iTLB-load-misses' we still use PMCx085 "L1 ITLB Miss,
L2 ITLB Miss", but set a 0xff umask because without it the event
does not get counted.
Branch Predictor (BPU)
PMCs 0xc2 and 0xc3 continue to be valid across all AMD Families.
Node Level Events
Family 17h does not have a PMCx0e9 counter, and corresponding counters
have not been made available publicly, so for now, we mark them as
unsupported for Families 17h and above.
Reference:
"Open-Source Register Reference For AMD Family 17h Processors Models 00h-2Fh"
Released 7/17/2018, Publication #56255, Revision 3.03:
https://www.amd.com/system/files/TechDocs/56255_OSRR.pdf
[ mingo: tidied up the line breaks. ]
Signed-off-by: Kim Phillips <kim.phillips@amd.com>
Cc: <stable@vger.kernel.org> # v4.9+
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Martin Liška <mliska@suse.cz>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Pu Wen <puwen@hygon.cn>
Cc: Stephane Eranian <eranian@google.com>
Cc: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Lendacky <Thomas.Lendacky@amd.com>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: linux-kernel@vger.kernel.org
Cc: linux-perf-users@vger.kernel.org
Fixes: e40ed1542dd7 ("perf/x86: Add perf support for AMD family-17h processors")
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-05-02 23:29:47 +08:00
|
|
|
static __initconst const u64 amd_hw_cache_event_ids_f17h
|
|
|
|
[PERF_COUNT_HW_CACHE_MAX]
|
|
|
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
|
|
|
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
|
|
|
|
[C(L1D)] = {
|
|
|
|
[C(OP_READ)] = {
|
|
|
|
[C(RESULT_ACCESS)] = 0x0040, /* Data Cache Accesses */
|
|
|
|
[C(RESULT_MISS)] = 0xc860, /* L2$ access from DC Miss */
|
|
|
|
},
|
|
|
|
[C(OP_WRITE)] = {
|
|
|
|
[C(RESULT_ACCESS)] = 0,
|
|
|
|
[C(RESULT_MISS)] = 0,
|
|
|
|
},
|
|
|
|
[C(OP_PREFETCH)] = {
|
|
|
|
[C(RESULT_ACCESS)] = 0xff5a, /* h/w prefetch DC Fills */
|
|
|
|
[C(RESULT_MISS)] = 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[C(L1I)] = {
|
|
|
|
[C(OP_READ)] = {
|
|
|
|
[C(RESULT_ACCESS)] = 0x0080, /* Instruction cache fetches */
|
|
|
|
[C(RESULT_MISS)] = 0x0081, /* Instruction cache misses */
|
|
|
|
},
|
|
|
|
[C(OP_WRITE)] = {
|
|
|
|
[C(RESULT_ACCESS)] = -1,
|
|
|
|
[C(RESULT_MISS)] = -1,
|
|
|
|
},
|
|
|
|
[C(OP_PREFETCH)] = {
|
|
|
|
[C(RESULT_ACCESS)] = 0,
|
|
|
|
[C(RESULT_MISS)] = 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[C(LL)] = {
|
|
|
|
[C(OP_READ)] = {
|
|
|
|
[C(RESULT_ACCESS)] = 0,
|
|
|
|
[C(RESULT_MISS)] = 0,
|
|
|
|
},
|
|
|
|
[C(OP_WRITE)] = {
|
|
|
|
[C(RESULT_ACCESS)] = 0,
|
|
|
|
[C(RESULT_MISS)] = 0,
|
|
|
|
},
|
|
|
|
[C(OP_PREFETCH)] = {
|
|
|
|
[C(RESULT_ACCESS)] = 0,
|
|
|
|
[C(RESULT_MISS)] = 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[C(DTLB)] = {
|
|
|
|
[C(OP_READ)] = {
|
|
|
|
[C(RESULT_ACCESS)] = 0xff45, /* All L2 DTLB accesses */
|
|
|
|
[C(RESULT_MISS)] = 0xf045, /* L2 DTLB misses (PT walks) */
|
|
|
|
},
|
|
|
|
[C(OP_WRITE)] = {
|
|
|
|
[C(RESULT_ACCESS)] = 0,
|
|
|
|
[C(RESULT_MISS)] = 0,
|
|
|
|
},
|
|
|
|
[C(OP_PREFETCH)] = {
|
|
|
|
[C(RESULT_ACCESS)] = 0,
|
|
|
|
[C(RESULT_MISS)] = 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[C(ITLB)] = {
|
|
|
|
[C(OP_READ)] = {
|
|
|
|
[C(RESULT_ACCESS)] = 0x0084, /* L1 ITLB misses, L2 ITLB hits */
|
|
|
|
[C(RESULT_MISS)] = 0xff85, /* L1 ITLB misses, L2 misses */
|
|
|
|
},
|
|
|
|
[C(OP_WRITE)] = {
|
|
|
|
[C(RESULT_ACCESS)] = -1,
|
|
|
|
[C(RESULT_MISS)] = -1,
|
|
|
|
},
|
|
|
|
[C(OP_PREFETCH)] = {
|
|
|
|
[C(RESULT_ACCESS)] = -1,
|
|
|
|
[C(RESULT_MISS)] = -1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[C(BPU)] = {
|
|
|
|
[C(OP_READ)] = {
|
|
|
|
[C(RESULT_ACCESS)] = 0x00c2, /* Retired Branch Instr. */
|
|
|
|
[C(RESULT_MISS)] = 0x00c3, /* Retired Mispredicted BI */
|
|
|
|
},
|
|
|
|
[C(OP_WRITE)] = {
|
|
|
|
[C(RESULT_ACCESS)] = -1,
|
|
|
|
[C(RESULT_MISS)] = -1,
|
|
|
|
},
|
|
|
|
[C(OP_PREFETCH)] = {
|
|
|
|
[C(RESULT_ACCESS)] = -1,
|
|
|
|
[C(RESULT_MISS)] = -1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[C(NODE)] = {
|
|
|
|
[C(OP_READ)] = {
|
|
|
|
[C(RESULT_ACCESS)] = 0,
|
|
|
|
[C(RESULT_MISS)] = 0,
|
|
|
|
},
|
|
|
|
[C(OP_WRITE)] = {
|
|
|
|
[C(RESULT_ACCESS)] = -1,
|
|
|
|
[C(RESULT_MISS)] = -1,
|
|
|
|
},
|
|
|
|
[C(OP_PREFETCH)] = {
|
|
|
|
[C(RESULT_ACCESS)] = -1,
|
|
|
|
[C(RESULT_MISS)] = -1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2010-02-26 19:05:05 +08:00
|
|
|
/*
|
2019-03-22 05:15:22 +08:00
|
|
|
* AMD Performance Monitor K7 and later, up to and including Family 16h:
|
2010-02-26 19:05:05 +08:00
|
|
|
*/
|
2016-04-27 17:35:31 +08:00
|
|
|
static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
|
2010-02-26 19:05:05 +08:00
|
|
|
{
|
2019-03-22 05:15:22 +08:00
|
|
|
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
|
|
|
|
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
|
|
|
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
|
|
|
|
[PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
|
|
|
|
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
|
|
|
|
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
|
|
|
|
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
|
|
|
|
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* AMD Performance Monitor Family 17h and later:
|
|
|
|
*/
|
|
|
|
static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
|
|
|
|
{
|
|
|
|
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
|
|
|
|
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
|
|
|
[PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
|
|
|
|
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
|
|
|
|
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
|
|
|
|
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
|
|
|
|
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187,
|
2010-02-26 19:05:05 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static u64 amd_pmu_event_map(int hw_event)
|
|
|
|
{
|
2019-03-22 05:15:22 +08:00
|
|
|
if (boot_cpu_data.x86 >= 0x17)
|
|
|
|
return amd_f17h_perfmon_event_map[hw_event];
|
|
|
|
|
2010-02-26 19:05:05 +08:00
|
|
|
return amd_perfmon_event_map[hw_event];
|
|
|
|
}
|
|
|
|
|
2013-02-07 01:26:27 +08:00
|
|
|
/*
|
|
|
|
* Previously calculated offsets
|
|
|
|
*/
|
|
|
|
static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly;
|
|
|
|
static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Legacy CPUs:
|
|
|
|
* 4 counters starting at 0xc0010000 each offset by 1
|
|
|
|
*
|
|
|
|
* CPUs with core performance counter extensions:
|
|
|
|
* 6 counters starting at 0xc0010200 each offset by 2
|
|
|
|
*/
|
|
|
|
static inline int amd_pmu_addr_offset(int index, bool eventsel)
|
|
|
|
{
|
2013-04-16 01:21:22 +08:00
|
|
|
int offset;
|
2013-02-07 01:26:27 +08:00
|
|
|
|
|
|
|
if (!index)
|
|
|
|
return index;
|
|
|
|
|
|
|
|
if (eventsel)
|
|
|
|
offset = event_offsets[index];
|
|
|
|
else
|
|
|
|
offset = count_offsets[index];
|
|
|
|
|
|
|
|
if (offset)
|
|
|
|
return offset;
|
|
|
|
|
2015-12-07 17:39:41 +08:00
|
|
|
if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
|
2013-02-07 01:26:27 +08:00
|
|
|
offset = index;
|
|
|
|
else
|
|
|
|
offset = index << 1;
|
|
|
|
|
|
|
|
if (eventsel)
|
|
|
|
event_offsets[index] = offset;
|
|
|
|
else
|
|
|
|
count_offsets[index] = offset;
|
|
|
|
|
|
|
|
return offset;
|
|
|
|
}
|
|
|
|
|
2013-02-07 01:26:29 +08:00
|
|
|
static int amd_core_hw_config(struct perf_event *event)
|
|
|
|
{
|
2011-10-05 20:01:17 +08:00
|
|
|
if (event->attr.exclude_host && event->attr.exclude_guest)
|
|
|
|
/*
|
|
|
|
* When HO == GO == 1 the hardware treats that as GO == HO == 0
|
|
|
|
* and will count in both modes. We don't want to count in that
|
|
|
|
* case so we emulate no-counting by setting US = OS = 0.
|
|
|
|
*/
|
|
|
|
event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
|
|
|
|
ARCH_PERFMON_EVENTSEL_OS);
|
|
|
|
else if (event->attr.exclude_host)
|
2013-02-07 01:26:26 +08:00
|
|
|
event->hw.config |= AMD64_EVENTSEL_GUESTONLY;
|
2011-10-05 20:01:17 +08:00
|
|
|
else if (event->attr.exclude_guest)
|
2013-02-07 01:26:26 +08:00
|
|
|
event->hw.config |= AMD64_EVENTSEL_HOSTONLY;
|
2011-10-05 20:01:17 +08:00
|
|
|
|
2013-02-07 01:26:29 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2010-03-30 23:00:06 +08:00
|
|
|
|
2010-02-26 19:05:05 +08:00
|
|
|
/*
|
|
|
|
* AMD64 events are detected based on their event codes.
|
|
|
|
*/
|
2011-02-03 00:36:12 +08:00
|
|
|
static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
|
|
|
|
{
|
|
|
|
return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
|
|
|
|
}
|
|
|
|
|
2010-02-26 19:05:05 +08:00
|
|
|
static inline int amd_is_nb_event(struct hw_perf_event *hwc)
|
|
|
|
{
|
|
|
|
return (hwc->config & 0xe0) == 0xe0;
|
|
|
|
}
|
|
|
|
|
2010-03-24 02:31:15 +08:00
|
|
|
static inline int amd_has_nb(struct cpu_hw_events *cpuc)
|
|
|
|
{
|
|
|
|
struct amd_nb *nb = cpuc->amd_nb;
|
|
|
|
|
|
|
|
return nb && nb->nb_id != -1;
|
|
|
|
}
|
|
|
|
|
2013-02-07 01:26:29 +08:00
|
|
|
static int amd_pmu_hw_config(struct perf_event *event)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* pass precise event sampling to ibs: */
|
|
|
|
if (event->attr.precise_ip && get_ibs_caps())
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
if (has_branch_stack(event))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
ret = x86_pmu_hw_config(event);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (event->attr.type == PERF_TYPE_RAW)
|
|
|
|
event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
|
|
|
|
|
|
|
|
return amd_core_hw_config(event);
|
|
|
|
}
|
|
|
|
|
2013-02-07 01:26:25 +08:00
|
|
|
static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
|
|
|
|
struct perf_event *event)
|
2010-02-26 19:05:05 +08:00
|
|
|
{
|
|
|
|
struct amd_nb *nb = cpuc->amd_nb;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* need to scan whole list because event may not have
|
|
|
|
* been assigned during scheduling
|
|
|
|
*
|
|
|
|
* no race condition possible because event can only
|
|
|
|
* be removed on one CPU at a time AND PMU is disabled
|
|
|
|
* when we come here
|
|
|
|
*/
|
2010-03-30 00:36:50 +08:00
|
|
|
for (i = 0; i < x86_pmu.num_counters; i++) {
|
2012-04-06 00:24:42 +08:00
|
|
|
if (cmpxchg(nb->owners + i, event, NULL) == event)
|
2010-02-26 19:05:05 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* AMD64 NorthBridge events need special treatment because
|
|
|
|
* counter access needs to be synchronized across all cores
|
|
|
|
* of a package. Refer to BKDG section 3.12
|
|
|
|
*
|
|
|
|
* NB events are events measuring L3 cache, Hypertransport
|
|
|
|
* traffic. They are identified by an event code >= 0xe00.
|
|
|
|
* They measure events on the NorthBride which is shared
|
|
|
|
* by all cores on a package. NB events are counted on a
|
|
|
|
* shared set of counters. When a NB event is programmed
|
|
|
|
* in a counter, the data actually comes from a shared
|
|
|
|
* counter. Thus, access to those counters needs to be
|
|
|
|
* synchronized.
|
|
|
|
*
|
|
|
|
* We implement the synchronization such that no two cores
|
|
|
|
* can be measuring NB events using the same counters. Thus,
|
|
|
|
* we maintain a per-NB allocation table. The available slot
|
|
|
|
* is propagated using the event_constraint structure.
|
|
|
|
*
|
|
|
|
* We provide only one choice for each NB event based on
|
|
|
|
* the fact that only NB events have restrictions. Consequently,
|
|
|
|
* if a counter is available, there is a guarantee the NB event
|
|
|
|
* will be assigned to it. If no slot is available, an empty
|
|
|
|
* constraint is returned and scheduling will eventually fail
|
|
|
|
* for this event.
|
|
|
|
*
|
|
|
|
* Note that all cores attached the same NB compete for the same
|
|
|
|
* counters to host NB events, this is why we use atomic ops. Some
|
|
|
|
* multi-chip CPUs may have more than one NB.
|
|
|
|
*
|
|
|
|
* Given that resources are allocated (cmpxchg), they must be
|
|
|
|
* eventually freed for others to use. This is accomplished by
|
2013-02-07 01:26:25 +08:00
|
|
|
* calling __amd_put_nb_event_constraints()
|
2010-02-26 19:05:05 +08:00
|
|
|
*
|
|
|
|
* Non NB events are not impacted by this restriction.
|
|
|
|
*/
|
|
|
|
static struct event_constraint *
|
2013-02-07 01:26:25 +08:00
|
|
|
__amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
|
|
|
|
struct event_constraint *c)
|
2010-02-26 19:05:05 +08:00
|
|
|
{
|
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
struct amd_nb *nb = cpuc->amd_nb;
|
2013-02-07 01:26:24 +08:00
|
|
|
struct perf_event *old;
|
|
|
|
int idx, new = -1;
|
2010-02-26 19:05:05 +08:00
|
|
|
|
2013-02-07 01:26:29 +08:00
|
|
|
if (!c)
|
|
|
|
c = &unconstrained;
|
|
|
|
|
|
|
|
if (cpuc->is_fake)
|
|
|
|
return c;
|
|
|
|
|
2010-02-26 19:05:05 +08:00
|
|
|
/*
|
|
|
|
* detect if already present, if so reuse
|
|
|
|
*
|
|
|
|
* cannot merge with actual allocation
|
|
|
|
* because of possible holes
|
|
|
|
*
|
|
|
|
* event can already be present yet not assigned (in hwc->idx)
|
|
|
|
* because of successive calls to x86_schedule_events() from
|
|
|
|
* hw_perf_group_sched_in() without hw_perf_enable()
|
|
|
|
*/
|
2013-02-07 01:26:25 +08:00
|
|
|
for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) {
|
2013-02-07 01:26:24 +08:00
|
|
|
if (new == -1 || hwc->idx == idx)
|
|
|
|
/* assign free slot, prefer hwc->idx */
|
|
|
|
old = cmpxchg(nb->owners + idx, NULL, event);
|
|
|
|
else if (nb->owners[idx] == event)
|
|
|
|
/* event already present */
|
|
|
|
old = event;
|
|
|
|
else
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (old && old != event)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* reassign to this slot */
|
|
|
|
if (new != -1)
|
|
|
|
cmpxchg(nb->owners + new, event, NULL);
|
|
|
|
new = idx;
|
2010-02-26 19:05:05 +08:00
|
|
|
|
|
|
|
/* already present, reuse */
|
2013-02-07 01:26:24 +08:00
|
|
|
if (old == event)
|
2010-02-26 19:05:05 +08:00
|
|
|
break;
|
2013-02-07 01:26:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (new == -1)
|
|
|
|
return &emptyconstraint;
|
|
|
|
|
|
|
|
return &nb->event_constraints[new];
|
2010-02-26 19:05:05 +08:00
|
|
|
}
|
|
|
|
|
2010-11-25 15:56:17 +08:00
|
|
|
static struct amd_nb *amd_alloc_nb(int cpu)
|
2010-02-26 19:05:05 +08:00
|
|
|
{
|
|
|
|
struct amd_nb *nb;
|
|
|
|
int i;
|
|
|
|
|
2013-08-30 04:59:17 +08:00
|
|
|
nb = kzalloc_node(sizeof(struct amd_nb), GFP_KERNEL, cpu_to_node(cpu));
|
2010-02-26 19:05:05 +08:00
|
|
|
if (!nb)
|
|
|
|
return NULL;
|
|
|
|
|
2010-11-25 15:56:17 +08:00
|
|
|
nb->nb_id = -1;
|
2010-02-26 19:05:05 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* initialize all possible NB constraints
|
|
|
|
*/
|
2010-03-30 00:36:50 +08:00
|
|
|
for (i = 0; i < x86_pmu.num_counters; i++) {
|
2010-03-03 04:16:55 +08:00
|
|
|
__set_bit(i, nb->event_constraints[i].idxmsk);
|
2010-02-26 19:05:05 +08:00
|
|
|
nb->event_constraints[i].weight = 1;
|
|
|
|
}
|
|
|
|
return nb;
|
|
|
|
}
|
|
|
|
|
2010-03-24 02:31:15 +08:00
|
|
|
static int amd_pmu_cpu_prepare(int cpu)
|
|
|
|
{
|
|
|
|
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
|
|
|
|
|
|
|
WARN_ON_ONCE(cpuc->amd_nb);
|
|
|
|
|
2016-03-25 22:52:35 +08:00
|
|
|
if (!x86_pmu.amd_nb_constraints)
|
2016-07-14 01:16:10 +08:00
|
|
|
return 0;
|
2010-03-24 02:31:15 +08:00
|
|
|
|
2010-11-25 15:56:17 +08:00
|
|
|
cpuc->amd_nb = amd_alloc_nb(cpu);
|
2010-03-24 02:31:15 +08:00
|
|
|
if (!cpuc->amd_nb)
|
2016-07-14 01:16:10 +08:00
|
|
|
return -ENOMEM;
|
2010-03-24 02:31:15 +08:00
|
|
|
|
2016-07-14 01:16:10 +08:00
|
|
|
return 0;
|
2010-03-24 02:31:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void amd_pmu_cpu_starting(int cpu)
|
2010-02-26 19:05:05 +08:00
|
|
|
{
|
2010-03-24 02:31:15 +08:00
|
|
|
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
2014-11-18 03:06:54 +08:00
|
|
|
void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
|
2010-03-24 02:31:15 +08:00
|
|
|
struct amd_nb *nb;
|
2010-02-26 19:05:05 +08:00
|
|
|
int i, nb_id;
|
|
|
|
|
2013-02-07 01:26:26 +08:00
|
|
|
cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
|
2012-02-29 21:57:32 +08:00
|
|
|
|
2016-03-25 22:52:35 +08:00
|
|
|
if (!x86_pmu.amd_nb_constraints)
|
2010-02-26 19:05:05 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
nb_id = amd_get_nb_id(cpu);
|
2010-03-24 02:31:15 +08:00
|
|
|
WARN_ON_ONCE(nb_id == BAD_APICID);
|
2010-02-26 19:05:05 +08:00
|
|
|
|
|
|
|
for_each_online_cpu(i) {
|
2010-03-24 02:31:15 +08:00
|
|
|
nb = per_cpu(cpu_hw_events, i).amd_nb;
|
|
|
|
if (WARN_ON_ONCE(!nb))
|
2010-02-26 19:05:05 +08:00
|
|
|
continue;
|
|
|
|
|
2010-03-24 02:31:15 +08:00
|
|
|
if (nb->nb_id == nb_id) {
|
2014-11-18 03:06:54 +08:00
|
|
|
*onln = cpuc->amd_nb;
|
2010-03-24 02:31:15 +08:00
|
|
|
cpuc->amd_nb = nb;
|
|
|
|
break;
|
|
|
|
}
|
2010-02-26 19:05:05 +08:00
|
|
|
}
|
2010-03-24 02:31:15 +08:00
|
|
|
|
|
|
|
cpuc->amd_nb->nb_id = nb_id;
|
|
|
|
cpuc->amd_nb->refcnt++;
|
2010-02-26 19:05:05 +08:00
|
|
|
}
|
|
|
|
|
2010-03-24 02:31:15 +08:00
|
|
|
static void amd_pmu_cpu_dead(int cpu)
|
2010-02-26 19:05:05 +08:00
|
|
|
{
|
|
|
|
struct cpu_hw_events *cpuhw;
|
|
|
|
|
2016-03-25 22:52:35 +08:00
|
|
|
if (!x86_pmu.amd_nb_constraints)
|
2010-02-26 19:05:05 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
cpuhw = &per_cpu(cpu_hw_events, cpu);
|
|
|
|
|
2010-03-22 04:51:51 +08:00
|
|
|
if (cpuhw->amd_nb) {
|
2010-03-24 02:31:15 +08:00
|
|
|
struct amd_nb *nb = cpuhw->amd_nb;
|
|
|
|
|
|
|
|
if (nb->nb_id == -1 || --nb->refcnt == 0)
|
|
|
|
kfree(nb);
|
2010-02-26 19:05:05 +08:00
|
|
|
|
2010-03-22 04:51:51 +08:00
|
|
|
cpuhw->amd_nb = NULL;
|
|
|
|
}
|
2010-02-26 19:05:05 +08:00
|
|
|
}
|
|
|
|
|
x86/perf/amd: Resolve race condition when disabling PMC
On AMD processors, the detection of an overflowed counter in the NMI
handler relies on the current value of the counter. So, for example, to
check for overflow on a 48 bit counter, bit 47 is checked to see if it
is 1 (not overflowed) or 0 (overflowed).
There is currently a race condition present when disabling and then
updating the PMC. Increased NMI latency in newer AMD processors makes this
race condition more pronounced. If the counter value has overflowed, it is
possible to update the PMC value before the NMI handler can run. The
updated PMC value is not an overflowed value, so when the perf NMI handler
does run, it will not find an overflowed counter. This may appear as an
unknown NMI resulting in either a panic or a series of messages, depending
on how the kernel is configured.
To eliminate this race condition, the PMC value must be checked after
disabling the counter. Add an AMD function, amd_pmu_disable_all(), that
will wait for the NMI handler to reset any active and overflowed counter
after calling x86_pmu_disable_all().
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: <stable@vger.kernel.org> # 4.14.x-
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Link: https://lkml.kernel.org/r/Message-ID:
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-04-02 23:21:14 +08:00
|
|
|
/*
|
|
|
|
* When a PMC counter overflows, an NMI is used to process the event and
|
|
|
|
* reset the counter. NMI latency can result in the counter being updated
|
|
|
|
* before the NMI can run, which can result in what appear to be spurious
|
|
|
|
* NMIs. This function is intended to wait for the NMI to run and reset
|
|
|
|
* the counter to avoid possible unhandled NMI messages.
|
|
|
|
*/
|
|
|
|
#define OVERFLOW_WAIT_COUNT 50
|
|
|
|
|
|
|
|
static void amd_pmu_wait_on_overflow(int idx)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
u64 counter;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait for the counter to be reset if it has overflowed. This loop
|
|
|
|
* should exit very, very quickly, but just in case, don't wait
|
|
|
|
* forever...
|
|
|
|
*/
|
|
|
|
for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
|
|
|
|
rdmsrl(x86_pmu_event_addr(idx), counter);
|
|
|
|
if (counter & (1ULL << (x86_pmu.cntval_bits - 1)))
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Might be in IRQ context, so can't sleep */
|
|
|
|
udelay(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void amd_pmu_disable_all(void)
|
|
|
|
{
|
|
|
|
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
|
|
|
int idx;
|
|
|
|
|
|
|
|
x86_pmu_disable_all();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This shouldn't be called from NMI context, but add a safeguard here
|
|
|
|
* to return, since if we're in NMI context we can't wait for an NMI
|
|
|
|
* to reset an overflowed counter value.
|
|
|
|
*/
|
|
|
|
if (in_nmi())
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check each counter for overflow and wait for it to be reset by the
|
|
|
|
* NMI if it has overflowed. This relies on the fact that all active
|
|
|
|
* counters are always enabled when this function is caled and
|
|
|
|
* ARCH_PERFMON_EVENTSEL_INT is always set.
|
|
|
|
*/
|
|
|
|
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
|
|
|
if (!test_bit(idx, cpuc->active_mask))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
amd_pmu_wait_on_overflow(idx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-02 23:21:18 +08:00
|
|
|
static void amd_pmu_disable_event(struct perf_event *event)
|
|
|
|
{
|
|
|
|
x86_pmu_disable_event(event);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This can be called from NMI context (via x86_pmu_stop). The counter
|
|
|
|
* may have overflowed, but either way, we'll never see it get reset
|
|
|
|
* by the NMI if we're already in the NMI. And the NMI latency support
|
|
|
|
* below will take care of any pending NMI that might have been
|
|
|
|
* generated by the overflow.
|
|
|
|
*/
|
|
|
|
if (in_nmi())
|
|
|
|
return;
|
|
|
|
|
|
|
|
amd_pmu_wait_on_overflow(event->hw.idx);
|
|
|
|
}
|
|
|
|
|
x86/perf/amd: Resolve NMI latency issues for active PMCs
On AMD processors, the detection of an overflowed PMC counter in the NMI
handler relies on the current value of the PMC. So, for example, to check
for overflow on a 48-bit counter, bit 47 is checked to see if it is 1 (not
overflowed) or 0 (overflowed).
When the perf NMI handler executes it does not know in advance which PMC
counters have overflowed. As such, the NMI handler will process all active
PMC counters that have overflowed. NMI latency in newer AMD processors can
result in multiple overflowed PMC counters being processed in one NMI and
then a subsequent NMI, that does not appear to be a back-to-back NMI, not
finding any PMC counters that have overflowed. This may appear to be an
unhandled NMI resulting in either a panic or a series of messages,
depending on how the kernel was configured.
To mitigate this issue, add an AMD handle_irq callback function,
amd_pmu_handle_irq(), that will invoke the common x86_pmu_handle_irq()
function and upon return perform some additional processing that will
indicate if the NMI has been handled or would have been handled had an
earlier NMI not handled the overflowed PMC. Using a per-CPU variable, a
minimum value of the number of active PMCs or 2 will be set whenever a
PMC is active. This is used to indicate the possible number of NMIs that
can still occur. The value of 2 is used for when an NMI does not arrive
at the LAPIC in time to be collapsed into an already pending NMI. Each
time the function is called without having handled an overflowed counter,
the per-CPU value is checked. If the value is non-zero, it is decremented
and the NMI indicates that it handled the NMI. If the value is zero, then
the NMI indicates that it did not handle the NMI.
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: <stable@vger.kernel.org> # 4.14.x-
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Link: https://lkml.kernel.org/r/Message-ID:
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-04-02 23:21:16 +08:00
|
|
|
/*
|
|
|
|
* Because of NMI latency, if multiple PMC counters are active or other sources
|
|
|
|
* of NMIs are received, the perf NMI handler can handle one or more overflowed
|
|
|
|
* PMC counters outside of the NMI associated with the PMC overflow. If the NMI
|
|
|
|
* doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel
|
|
|
|
* back-to-back NMI support won't be active. This PMC handler needs to take into
|
|
|
|
* account that this can occur, otherwise this could result in unknown NMI
|
|
|
|
* messages being issued. Examples of this is PMC overflow while in the NMI
|
|
|
|
* handler when multiple PMCs are active or PMC overflow while handling some
|
|
|
|
* other source of an NMI.
|
|
|
|
*
|
2019-08-02 02:57:41 +08:00
|
|
|
* Attempt to mitigate this by creating an NMI window in which un-handled NMIs
|
|
|
|
* received during this window will be claimed. This prevents extending the
|
|
|
|
* window past when it is possible that latent NMIs should be received. The
|
|
|
|
* per-CPU perf_nmi_tstamp will be set to the window end time whenever perf has
|
|
|
|
* handled a counter. When an un-handled NMI is received, it will be claimed
|
|
|
|
* only if arriving within that window.
|
x86/perf/amd: Resolve NMI latency issues for active PMCs
On AMD processors, the detection of an overflowed PMC counter in the NMI
handler relies on the current value of the PMC. So, for example, to check
for overflow on a 48-bit counter, bit 47 is checked to see if it is 1 (not
overflowed) or 0 (overflowed).
When the perf NMI handler executes it does not know in advance which PMC
counters have overflowed. As such, the NMI handler will process all active
PMC counters that have overflowed. NMI latency in newer AMD processors can
result in multiple overflowed PMC counters being processed in one NMI and
then a subsequent NMI, that does not appear to be a back-to-back NMI, not
finding any PMC counters that have overflowed. This may appear to be an
unhandled NMI resulting in either a panic or a series of messages,
depending on how the kernel was configured.
To mitigate this issue, add an AMD handle_irq callback function,
amd_pmu_handle_irq(), that will invoke the common x86_pmu_handle_irq()
function and upon return perform some additional processing that will
indicate if the NMI has been handled or would have been handled had an
earlier NMI not handled the overflowed PMC. Using a per-CPU variable, a
minimum value of the number of active PMCs or 2 will be set whenever a
PMC is active. This is used to indicate the possible number of NMIs that
can still occur. The value of 2 is used for when an NMI does not arrive
at the LAPIC in time to be collapsed into an already pending NMI. Each
time the function is called without having handled an overflowed counter,
the per-CPU value is checked. If the value is non-zero, it is decremented
and the NMI indicates that it handled the NMI. If the value is zero, then
the NMI indicates that it did not handle the NMI.
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: <stable@vger.kernel.org> # 4.14.x-
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Link: https://lkml.kernel.org/r/Message-ID:
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-04-02 23:21:16 +08:00
|
|
|
*/
|
|
|
|
static int amd_pmu_handle_irq(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
|
|
|
int active, handled;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Obtain the active count before calling x86_pmu_handle_irq() since
|
|
|
|
* it is possible that x86_pmu_handle_irq() may make a counter
|
|
|
|
* inactive (through x86_pmu_stop).
|
|
|
|
*/
|
|
|
|
active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX);
|
|
|
|
|
|
|
|
/* Process any counter overflows */
|
|
|
|
handled = x86_pmu_handle_irq(regs);
|
|
|
|
|
|
|
|
/*
|
2019-08-02 02:57:41 +08:00
|
|
|
* If a counter was handled, record a timestamp such that un-handled
|
|
|
|
* NMIs will be claimed if arriving within that window.
|
x86/perf/amd: Resolve NMI latency issues for active PMCs
On AMD processors, the detection of an overflowed PMC counter in the NMI
handler relies on the current value of the PMC. So, for example, to check
for overflow on a 48-bit counter, bit 47 is checked to see if it is 1 (not
overflowed) or 0 (overflowed).
When the perf NMI handler executes it does not know in advance which PMC
counters have overflowed. As such, the NMI handler will process all active
PMC counters that have overflowed. NMI latency in newer AMD processors can
result in multiple overflowed PMC counters being processed in one NMI and
then a subsequent NMI, that does not appear to be a back-to-back NMI, not
finding any PMC counters that have overflowed. This may appear to be an
unhandled NMI resulting in either a panic or a series of messages,
depending on how the kernel was configured.
To mitigate this issue, add an AMD handle_irq callback function,
amd_pmu_handle_irq(), that will invoke the common x86_pmu_handle_irq()
function and upon return perform some additional processing that will
indicate if the NMI has been handled or would have been handled had an
earlier NMI not handled the overflowed PMC. Using a per-CPU variable, a
minimum value of the number of active PMCs or 2 will be set whenever a
PMC is active. This is used to indicate the possible number of NMIs that
can still occur. The value of 2 is used for when an NMI does not arrive
at the LAPIC in time to be collapsed into an already pending NMI. Each
time the function is called without having handled an overflowed counter,
the per-CPU value is checked. If the value is non-zero, it is decremented
and the NMI indicates that it handled the NMI. If the value is zero, then
the NMI indicates that it did not handle the NMI.
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: <stable@vger.kernel.org> # 4.14.x-
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Link: https://lkml.kernel.org/r/Message-ID:
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-04-02 23:21:16 +08:00
|
|
|
*/
|
|
|
|
if (handled) {
|
2019-08-02 02:57:41 +08:00
|
|
|
this_cpu_write(perf_nmi_tstamp,
|
|
|
|
jiffies + perf_nmi_window);
|
x86/perf/amd: Resolve NMI latency issues for active PMCs
On AMD processors, the detection of an overflowed PMC counter in the NMI
handler relies on the current value of the PMC. So, for example, to check
for overflow on a 48-bit counter, bit 47 is checked to see if it is 1 (not
overflowed) or 0 (overflowed).
When the perf NMI handler executes it does not know in advance which PMC
counters have overflowed. As such, the NMI handler will process all active
PMC counters that have overflowed. NMI latency in newer AMD processors can
result in multiple overflowed PMC counters being processed in one NMI and
then a subsequent NMI, that does not appear to be a back-to-back NMI, not
finding any PMC counters that have overflowed. This may appear to be an
unhandled NMI resulting in either a panic or a series of messages,
depending on how the kernel was configured.
To mitigate this issue, add an AMD handle_irq callback function,
amd_pmu_handle_irq(), that will invoke the common x86_pmu_handle_irq()
function and upon return perform some additional processing that will
indicate if the NMI has been handled or would have been handled had an
earlier NMI not handled the overflowed PMC. Using a per-CPU variable, a
minimum value of the number of active PMCs or 2 will be set whenever a
PMC is active. This is used to indicate the possible number of NMIs that
can still occur. The value of 2 is used for when an NMI does not arrive
at the LAPIC in time to be collapsed into an already pending NMI. Each
time the function is called without having handled an overflowed counter,
the per-CPU value is checked. If the value is non-zero, it is decremented
and the NMI indicates that it handled the NMI. If the value is zero, then
the NMI indicates that it did not handle the NMI.
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: <stable@vger.kernel.org> # 4.14.x-
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Link: https://lkml.kernel.org/r/Message-ID:
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-04-02 23:21:16 +08:00
|
|
|
|
|
|
|
return handled;
|
|
|
|
}
|
|
|
|
|
2019-08-02 02:57:41 +08:00
|
|
|
if (time_after(jiffies, this_cpu_read(perf_nmi_tstamp)))
|
x86/perf/amd: Resolve NMI latency issues for active PMCs
On AMD processors, the detection of an overflowed PMC counter in the NMI
handler relies on the current value of the PMC. So, for example, to check
for overflow on a 48-bit counter, bit 47 is checked to see if it is 1 (not
overflowed) or 0 (overflowed).
When the perf NMI handler executes it does not know in advance which PMC
counters have overflowed. As such, the NMI handler will process all active
PMC counters that have overflowed. NMI latency in newer AMD processors can
result in multiple overflowed PMC counters being processed in one NMI and
then a subsequent NMI, that does not appear to be a back-to-back NMI, not
finding any PMC counters that have overflowed. This may appear to be an
unhandled NMI resulting in either a panic or a series of messages,
depending on how the kernel was configured.
To mitigate this issue, add an AMD handle_irq callback function,
amd_pmu_handle_irq(), that will invoke the common x86_pmu_handle_irq()
function and upon return perform some additional processing that will
indicate if the NMI has been handled or would have been handled had an
earlier NMI not handled the overflowed PMC. Using a per-CPU variable, a
minimum value of the number of active PMCs or 2 will be set whenever a
PMC is active. This is used to indicate the possible number of NMIs that
can still occur. The value of 2 is used for when an NMI does not arrive
at the LAPIC in time to be collapsed into an already pending NMI. Each
time the function is called without having handled an overflowed counter,
the per-CPU value is checked. If the value is non-zero, it is decremented
and the NMI indicates that it handled the NMI. If the value is zero, then
the NMI indicates that it did not handle the NMI.
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: <stable@vger.kernel.org> # 4.14.x-
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Link: https://lkml.kernel.org/r/Message-ID:
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-04-02 23:21:16 +08:00
|
|
|
return NMI_DONE;
|
|
|
|
|
|
|
|
return NMI_HANDLED;
|
|
|
|
}
|
|
|
|
|
2013-02-07 01:26:25 +08:00
|
|
|
static struct event_constraint *
|
2014-11-18 03:06:56 +08:00
|
|
|
amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
|
|
|
|
struct perf_event *event)
|
2013-02-07 01:26:25 +08:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* if not NB event or no NB, then no constraints
|
|
|
|
*/
|
|
|
|
if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)))
|
|
|
|
return &unconstrained;
|
|
|
|
|
2013-04-16 01:21:22 +08:00
|
|
|
return __amd_get_nb_event_constraints(cpuc, event, NULL);
|
2013-02-07 01:26:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
|
|
|
|
struct perf_event *event)
|
|
|
|
{
|
|
|
|
if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))
|
|
|
|
__amd_put_nb_event_constraints(cpuc, event);
|
|
|
|
}
|
|
|
|
|
2012-03-16 03:09:14 +08:00
|
|
|
PMU_FORMAT_ATTR(event, "config:0-7,32-35");
|
|
|
|
PMU_FORMAT_ATTR(umask, "config:8-15" );
|
|
|
|
PMU_FORMAT_ATTR(edge, "config:18" );
|
|
|
|
PMU_FORMAT_ATTR(inv, "config:23" );
|
|
|
|
PMU_FORMAT_ATTR(cmask, "config:24-31" );
|
|
|
|
|
|
|
|
static struct attribute *amd_format_attr[] = {
|
|
|
|
&format_attr_event.attr,
|
|
|
|
&format_attr_umask.attr,
|
|
|
|
&format_attr_edge.attr,
|
|
|
|
&format_attr_inv.attr,
|
|
|
|
&format_attr_cmask.attr,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2011-02-03 00:36:12 +08:00
|
|
|
/* AMD Family 15h */
|
|
|
|
|
|
|
|
#define AMD_EVENT_TYPE_MASK 0x000000F0ULL
|
|
|
|
|
|
|
|
#define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL
|
|
|
|
#define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL
|
|
|
|
#define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL
|
|
|
|
#define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL
|
|
|
|
#define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL
|
|
|
|
#define AMD_EVENT_EX_LS 0x000000C0ULL
|
|
|
|
#define AMD_EVENT_DE 0x000000D0ULL
|
|
|
|
#define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL
|
|
|
|
|
|
|
|
/*
|
|
|
|
* AMD family 15h event code/PMC mappings:
|
|
|
|
*
|
|
|
|
* type = event_code & 0x0F0:
|
|
|
|
*
|
|
|
|
* 0x000 FP PERF_CTL[5:3]
|
|
|
|
* 0x010 FP PERF_CTL[5:3]
|
|
|
|
* 0x020 LS PERF_CTL[5:0]
|
|
|
|
* 0x030 LS PERF_CTL[5:0]
|
|
|
|
* 0x040 DC PERF_CTL[5:0]
|
|
|
|
* 0x050 DC PERF_CTL[5:0]
|
|
|
|
* 0x060 CU PERF_CTL[2:0]
|
|
|
|
* 0x070 CU PERF_CTL[2:0]
|
|
|
|
* 0x080 IC/DE PERF_CTL[2:0]
|
|
|
|
* 0x090 IC/DE PERF_CTL[2:0]
|
|
|
|
* 0x0A0 ---
|
|
|
|
* 0x0B0 ---
|
|
|
|
* 0x0C0 EX/LS PERF_CTL[5:0]
|
|
|
|
* 0x0D0 DE PERF_CTL[2:0]
|
|
|
|
* 0x0E0 NB NB_PERF_CTL[3:0]
|
|
|
|
* 0x0F0 NB NB_PERF_CTL[3:0]
|
|
|
|
*
|
|
|
|
* Exceptions:
|
|
|
|
*
|
2011-04-16 08:27:54 +08:00
|
|
|
* 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*)
|
2011-02-03 00:36:12 +08:00
|
|
|
* 0x003 FP PERF_CTL[3]
|
2011-04-16 08:27:54 +08:00
|
|
|
* 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*)
|
2011-02-03 00:36:12 +08:00
|
|
|
* 0x00B FP PERF_CTL[3]
|
|
|
|
* 0x00D FP PERF_CTL[3]
|
|
|
|
* 0x023 DE PERF_CTL[2:0]
|
|
|
|
* 0x02D LS PERF_CTL[3]
|
|
|
|
* 0x02E LS PERF_CTL[3,0]
|
2012-05-18 18:40:42 +08:00
|
|
|
* 0x031 LS PERF_CTL[2:0] (**)
|
2011-02-03 00:36:12 +08:00
|
|
|
* 0x043 CU PERF_CTL[2:0]
|
|
|
|
* 0x045 CU PERF_CTL[2:0]
|
|
|
|
* 0x046 CU PERF_CTL[2:0]
|
|
|
|
* 0x054 CU PERF_CTL[2:0]
|
|
|
|
* 0x055 CU PERF_CTL[2:0]
|
|
|
|
* 0x08F IC PERF_CTL[0]
|
|
|
|
* 0x187 DE PERF_CTL[0]
|
|
|
|
* 0x188 DE PERF_CTL[0]
|
|
|
|
* 0x0DB EX PERF_CTL[5:0]
|
|
|
|
* 0x0DC LS PERF_CTL[5:0]
|
|
|
|
* 0x0DD LS PERF_CTL[5:0]
|
|
|
|
* 0x0DE LS PERF_CTL[5:0]
|
|
|
|
* 0x0DF LS PERF_CTL[5:0]
|
2012-05-18 18:40:42 +08:00
|
|
|
* 0x1C0 EX PERF_CTL[5:3]
|
2011-02-03 00:36:12 +08:00
|
|
|
* 0x1D6 EX PERF_CTL[5:0]
|
|
|
|
* 0x1D8 EX PERF_CTL[5:0]
|
2011-04-16 08:27:54 +08:00
|
|
|
*
|
2012-05-18 18:40:42 +08:00
|
|
|
* (*) depending on the umask all FPU counters may be used
|
|
|
|
* (**) only one unitmask enabled at a time
|
2011-02-03 00:36:12 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
|
|
|
|
static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
|
|
|
|
static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0);
|
2011-11-18 19:35:22 +08:00
|
|
|
static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
|
2011-02-03 00:36:12 +08:00
|
|
|
static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
|
|
|
|
static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
|
|
|
|
|
|
|
|
static struct event_constraint *
|
2014-11-18 03:06:56 +08:00
|
|
|
amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx,
|
|
|
|
struct perf_event *event)
|
2011-02-03 00:36:12 +08:00
|
|
|
{
|
2011-04-16 08:27:54 +08:00
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
unsigned int event_code = amd_get_event_code(hwc);
|
2011-02-03 00:36:12 +08:00
|
|
|
|
|
|
|
switch (event_code & AMD_EVENT_TYPE_MASK) {
|
|
|
|
case AMD_EVENT_FP:
|
|
|
|
switch (event_code) {
|
2011-04-16 08:27:54 +08:00
|
|
|
case 0x000:
|
|
|
|
if (!(hwc->config & 0x0000F000ULL))
|
|
|
|
break;
|
|
|
|
if (!(hwc->config & 0x00000F00ULL))
|
|
|
|
break;
|
|
|
|
return &amd_f15_PMC3;
|
|
|
|
case 0x004:
|
|
|
|
if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
|
|
|
|
break;
|
|
|
|
return &amd_f15_PMC3;
|
2011-02-03 00:36:12 +08:00
|
|
|
case 0x003:
|
|
|
|
case 0x00B:
|
|
|
|
case 0x00D:
|
|
|
|
return &amd_f15_PMC3;
|
|
|
|
}
|
2011-04-16 08:27:54 +08:00
|
|
|
return &amd_f15_PMC53;
|
2011-02-03 00:36:12 +08:00
|
|
|
case AMD_EVENT_LS:
|
|
|
|
case AMD_EVENT_DC:
|
|
|
|
case AMD_EVENT_EX_LS:
|
|
|
|
switch (event_code) {
|
|
|
|
case 0x023:
|
|
|
|
case 0x043:
|
|
|
|
case 0x045:
|
|
|
|
case 0x046:
|
|
|
|
case 0x054:
|
|
|
|
case 0x055:
|
|
|
|
return &amd_f15_PMC20;
|
|
|
|
case 0x02D:
|
|
|
|
return &amd_f15_PMC3;
|
|
|
|
case 0x02E:
|
|
|
|
return &amd_f15_PMC30;
|
2012-05-18 18:40:42 +08:00
|
|
|
case 0x031:
|
|
|
|
if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
|
|
|
|
return &amd_f15_PMC20;
|
|
|
|
return &emptyconstraint;
|
|
|
|
case 0x1C0:
|
|
|
|
return &amd_f15_PMC53;
|
2011-02-03 00:36:12 +08:00
|
|
|
default:
|
|
|
|
return &amd_f15_PMC50;
|
|
|
|
}
|
|
|
|
case AMD_EVENT_CU:
|
|
|
|
case AMD_EVENT_IC_DE:
|
|
|
|
case AMD_EVENT_DE:
|
|
|
|
switch (event_code) {
|
|
|
|
case 0x08F:
|
|
|
|
case 0x187:
|
|
|
|
case 0x188:
|
|
|
|
return &amd_f15_PMC0;
|
|
|
|
case 0x0DB ... 0x0DF:
|
|
|
|
case 0x1D6:
|
|
|
|
case 0x1D8:
|
|
|
|
return &amd_f15_PMC50;
|
|
|
|
default:
|
|
|
|
return &amd_f15_PMC20;
|
|
|
|
}
|
|
|
|
case AMD_EVENT_NB:
|
2017-02-18 19:31:40 +08:00
|
|
|
/* moved to uncore.c */
|
2013-04-16 01:21:22 +08:00
|
|
|
return &emptyconstraint;
|
2011-02-03 00:36:12 +08:00
|
|
|
default:
|
|
|
|
return &emptyconstraint;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-10 20:53:14 +08:00
|
|
|
static ssize_t amd_event_sysfs_show(char *page, u64 config)
|
|
|
|
{
|
|
|
|
u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) |
|
|
|
|
(config & AMD64_EVENTSEL_EVENT) >> 24;
|
|
|
|
|
|
|
|
return x86_event_sysfs_show(page, config, event);
|
|
|
|
}
|
|
|
|
|
2012-06-21 02:46:35 +08:00
|
|
|
static __initconst const struct x86_pmu amd_pmu = {
|
|
|
|
.name = "AMD",
|
x86/perf/amd: Resolve NMI latency issues for active PMCs
On AMD processors, the detection of an overflowed PMC counter in the NMI
handler relies on the current value of the PMC. So, for example, to check
for overflow on a 48-bit counter, bit 47 is checked to see if it is 1 (not
overflowed) or 0 (overflowed).
When the perf NMI handler executes it does not know in advance which PMC
counters have overflowed. As such, the NMI handler will process all active
PMC counters that have overflowed. NMI latency in newer AMD processors can
result in multiple overflowed PMC counters being processed in one NMI and
then a subsequent NMI, that does not appear to be a back-to-back NMI, not
finding any PMC counters that have overflowed. This may appear to be an
unhandled NMI resulting in either a panic or a series of messages,
depending on how the kernel was configured.
To mitigate this issue, add an AMD handle_irq callback function,
amd_pmu_handle_irq(), that will invoke the common x86_pmu_handle_irq()
function and upon return perform some additional processing that will
indicate if the NMI has been handled or would have been handled had an
earlier NMI not handled the overflowed PMC. Using a per-CPU variable, a
minimum value of the number of active PMCs or 2 will be set whenever a
PMC is active. This is used to indicate the possible number of NMIs that
can still occur. The value of 2 is used for when an NMI does not arrive
at the LAPIC in time to be collapsed into an already pending NMI. Each
time the function is called without having handled an overflowed counter,
the per-CPU value is checked. If the value is non-zero, it is decremented
and the NMI indicates that it handled the NMI. If the value is zero, then
the NMI indicates that it did not handle the NMI.
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: <stable@vger.kernel.org> # 4.14.x-
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Link: https://lkml.kernel.org/r/Message-ID:
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-04-02 23:21:16 +08:00
|
|
|
.handle_irq = amd_pmu_handle_irq,
|
x86/perf/amd: Resolve race condition when disabling PMC
On AMD processors, the detection of an overflowed counter in the NMI
handler relies on the current value of the counter. So, for example, to
check for overflow on a 48 bit counter, bit 47 is checked to see if it
is 1 (not overflowed) or 0 (overflowed).
There is currently a race condition present when disabling and then
updating the PMC. Increased NMI latency in newer AMD processors makes this
race condition more pronounced. If the counter value has overflowed, it is
possible to update the PMC value before the NMI handler can run. The
updated PMC value is not an overflowed value, so when the perf NMI handler
does run, it will not find an overflowed counter. This may appear as an
unknown NMI resulting in either a panic or a series of messages, depending
on how the kernel is configured.
To eliminate this race condition, the PMC value must be checked after
disabling the counter. Add an AMD function, amd_pmu_disable_all(), that
will wait for the NMI handler to reset any active and overflowed counter
after calling x86_pmu_disable_all().
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: <stable@vger.kernel.org> # 4.14.x-
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Link: https://lkml.kernel.org/r/Message-ID:
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-04-02 23:21:14 +08:00
|
|
|
.disable_all = amd_pmu_disable_all,
|
2011-02-03 00:36:12 +08:00
|
|
|
.enable_all = x86_pmu_enable_all,
|
|
|
|
.enable = x86_pmu_enable_event,
|
2019-04-02 23:21:18 +08:00
|
|
|
.disable = amd_pmu_disable_event,
|
2011-02-03 00:36:12 +08:00
|
|
|
.hw_config = amd_pmu_hw_config,
|
|
|
|
.schedule_events = x86_schedule_events,
|
2012-06-21 02:46:35 +08:00
|
|
|
.eventsel = MSR_K7_EVNTSEL0,
|
|
|
|
.perfctr = MSR_K7_PERFCTR0,
|
2013-02-07 01:26:27 +08:00
|
|
|
.addr_offset = amd_pmu_addr_offset,
|
2011-02-03 00:36:12 +08:00
|
|
|
.event_map = amd_pmu_event_map,
|
|
|
|
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
|
2012-06-21 02:46:35 +08:00
|
|
|
.num_counters = AMD64_NUM_COUNTERS,
|
2011-02-03 00:36:12 +08:00
|
|
|
.cntval_bits = 48,
|
|
|
|
.cntval_mask = (1ULL << 48) - 1,
|
|
|
|
.apic = 1,
|
|
|
|
/* use highest bit to detect overflow */
|
|
|
|
.max_period = (1ULL << 47) - 1,
|
2012-06-21 02:46:35 +08:00
|
|
|
.get_event_constraints = amd_get_event_constraints,
|
2011-02-03 00:36:12 +08:00
|
|
|
.put_event_constraints = amd_put_event_constraints,
|
|
|
|
|
2012-06-21 02:46:35 +08:00
|
|
|
.format_attrs = amd_format_attr,
|
2012-10-10 20:53:14 +08:00
|
|
|
.events_sysfs_show = amd_event_sysfs_show,
|
2012-06-21 02:46:35 +08:00
|
|
|
|
2011-02-03 00:36:12 +08:00
|
|
|
.cpu_prepare = amd_pmu_cpu_prepare,
|
2012-02-29 21:57:32 +08:00
|
|
|
.cpu_starting = amd_pmu_cpu_starting,
|
2012-06-21 02:46:35 +08:00
|
|
|
.cpu_dead = amd_pmu_cpu_dead,
|
2016-03-25 22:52:35 +08:00
|
|
|
|
|
|
|
.amd_nb_constraints = 1,
|
2011-02-03 00:36:12 +08:00
|
|
|
};
|
|
|
|
|
2013-05-21 19:05:37 +08:00
|
|
|
static int __init amd_core_pmu_init(void)
|
2012-06-21 02:46:35 +08:00
|
|
|
{
|
2015-12-07 17:39:41 +08:00
|
|
|
if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
|
2013-05-21 19:05:37 +08:00
|
|
|
return 0;
|
|
|
|
|
2019-08-02 02:57:41 +08:00
|
|
|
/* Avoid calulating the value each time in the NMI handler */
|
|
|
|
perf_nmi_window = msecs_to_jiffies(100);
|
|
|
|
|
2013-05-21 19:05:37 +08:00
|
|
|
switch (boot_cpu_data.x86) {
|
|
|
|
case 0x15:
|
|
|
|
pr_cont("Fam15h ");
|
2012-06-21 02:46:35 +08:00
|
|
|
x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
|
2013-05-21 19:05:37 +08:00
|
|
|
break;
|
2016-11-18 00:15:06 +08:00
|
|
|
case 0x17:
|
|
|
|
pr_cont("Fam17h ");
|
|
|
|
/*
|
|
|
|
* In family 17h, there are no event constraints in the PMC hardware.
|
|
|
|
* We fallback to using default amd_get_event_constraints.
|
|
|
|
*/
|
|
|
|
break;
|
2018-09-23 17:34:47 +08:00
|
|
|
case 0x18:
|
|
|
|
pr_cont("Fam18h ");
|
|
|
|
/* Using default amd_get_event_constraints. */
|
|
|
|
break;
|
2013-05-21 19:05:37 +08:00
|
|
|
default:
|
|
|
|
pr_err("core perfctr but no constraints; unknown hardware!\n");
|
2012-06-21 02:46:35 +08:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If core performance counter extensions exists, we must use
|
|
|
|
* MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also
|
2013-05-21 19:05:37 +08:00
|
|
|
* amd_pmu_addr_offset().
|
2012-06-21 02:46:35 +08:00
|
|
|
*/
|
|
|
|
x86_pmu.eventsel = MSR_F15H_PERF_CTL;
|
|
|
|
x86_pmu.perfctr = MSR_F15H_PERF_CTR;
|
|
|
|
x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE;
|
2016-03-25 22:52:35 +08:00
|
|
|
/*
|
|
|
|
* AMD Core perfctr has separate MSRs for the NB events, see
|
|
|
|
* the amd/uncore.c driver.
|
|
|
|
*/
|
|
|
|
x86_pmu.amd_nb_constraints = 0;
|
2012-06-21 02:46:35 +08:00
|
|
|
|
2013-05-21 19:05:37 +08:00
|
|
|
pr_cont("core perfctr, ");
|
2012-06-21 02:46:35 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-08-31 07:41:05 +08:00
|
|
|
__init int amd_pmu_init(void)
|
2010-02-26 19:05:05 +08:00
|
|
|
{
|
2013-05-21 19:05:37 +08:00
|
|
|
int ret;
|
|
|
|
|
2010-02-26 19:05:05 +08:00
|
|
|
/* Performance-monitoring supported from K7 and later: */
|
|
|
|
if (boot_cpu_data.x86 < 6)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2012-06-21 02:46:35 +08:00
|
|
|
x86_pmu = amd_pmu;
|
|
|
|
|
2013-05-21 19:05:37 +08:00
|
|
|
ret = amd_core_pmu_init();
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2010-02-26 19:05:05 +08:00
|
|
|
|
2016-03-25 22:52:35 +08:00
|
|
|
if (num_possible_cpus() == 1) {
|
|
|
|
/*
|
|
|
|
* No point in allocating data structures to serialize
|
|
|
|
* against other CPUs, when there is only the one CPU.
|
|
|
|
*/
|
|
|
|
x86_pmu.amd_nb_constraints = 0;
|
|
|
|
}
|
|
|
|
|
perf/x86/amd: Update generic hardware cache events for Family 17h
Add a new amd_hw_cache_event_ids_f17h assignment structure set
for AMD families 17h and above, since a lot has changed. Specifically:
L1 Data Cache
The data cache access counter remains the same on Family 17h.
For DC misses, PMCx041's definition changes with Family 17h,
so instead we use the L2 cache accesses from L1 data cache
misses counter (PMCx060,umask=0xc8).
For DC hardware prefetch events, Family 17h breaks compatibility
for PMCx067 "Data Prefetcher", so instead, we use PMCx05a "Hardware
Prefetch DC Fills."
L1 Instruction Cache
PMCs 0x80 and 0x81 (32-byte IC fetches and misses) are backward
compatible on Family 17h.
For prefetches, we remove the erroneous PMCx04B assignment which
counts how many software data cache prefetch load instructions were
dispatched.
LL - Last Level Cache
Removing PMCs 7D, 7E, and 7F assignments, as they do not exist
on Family 17h, where the last level cache is L3. L3 counters
can be accessed using the existing AMD Uncore driver.
Data TLB
On Intel machines, data TLB accesses ("dTLB-loads") are assigned
to counters that count load/store instructions retired. This
is inconsistent with instruction TLB accesses, where Intel
implementations report iTLB misses that hit in the STLB.
Ideally, dTLB-loads would count higher level dTLB misses that hit
in lower level TLBs, and dTLB-load-misses would report those
that also missed in those lower-level TLBs, therefore causing
a page table walk. That would be consistent with instruction
TLB operation, remove the redundancy between dTLB-loads and
L1-dcache-loads, and prevent perf from producing artificially
low percentage ratios, i.e. the "0.01%" below:
42,550,869 L1-dcache-loads
41,591,860 dTLB-loads
4,802 dTLB-load-misses # 0.01% of all dTLB cache hits
7,283,682 L1-dcache-stores
7,912,392 dTLB-stores
310 dTLB-store-misses
On AMD Families prior to 17h, the "Data Cache Accesses" counter is
used, which is slightly better than load/store instructions retired,
but still counts in terms of individual load/store operations
instead of TLB operations.
So, for AMD Families 17h and higher, this patch assigns "dTLB-loads"
to a counter for L1 dTLB misses that hit in the L2 dTLB, and
"dTLB-load-misses" to a counter for L1 DTLB misses that caused
L2 DTLB misses and therefore also caused page table walks. This
results in a much more accurate view of data TLB performance:
60,961,781 L1-dcache-loads
4,601 dTLB-loads
963 dTLB-load-misses # 20.93% of all dTLB cache hits
Note that for all AMD families, data loads and stores are combined
in a single accesses counter, so no 'L1-dcache-stores' are reported
separately, and stores are counted with loads in 'L1-dcache-loads'.
Also note that the "% of all dTLB cache hits" string is misleading
because (a) "dTLB cache": although TLBs can be considered caches for
page tables, in this context, it can be misinterpreted as data cache
hits because the figures are similar (at least on Intel), and (b) not
all those loads (technically accesses) technically "hit" at that
hardware level. "% of all dTLB accesses" would be more clear/accurate.
Instruction TLB
On Intel machines, 'iTLB-loads' measure iTLB misses that hit in the
STLB, and 'iTLB-load-misses' measure iTLB misses that also missed in
the STLB and completed a page table walk.
For AMD Family 17h and above, for 'iTLB-loads' we replace the
erroneous instruction cache fetches counter with PMCx084
"L1 ITLB Miss, L2 ITLB Hit".
For 'iTLB-load-misses' we still use PMCx085 "L1 ITLB Miss,
L2 ITLB Miss", but set a 0xff umask because without it the event
does not get counted.
Branch Predictor (BPU)
PMCs 0xc2 and 0xc3 continue to be valid across all AMD Families.
Node Level Events
Family 17h does not have a PMCx0e9 counter, and corresponding counters
have not been made available publicly, so for now, we mark them as
unsupported for Families 17h and above.
Reference:
"Open-Source Register Reference For AMD Family 17h Processors Models 00h-2Fh"
Released 7/17/2018, Publication #56255, Revision 3.03:
https://www.amd.com/system/files/TechDocs/56255_OSRR.pdf
[ mingo: tidied up the line breaks. ]
Signed-off-by: Kim Phillips <kim.phillips@amd.com>
Cc: <stable@vger.kernel.org> # v4.9+
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Martin Liška <mliska@suse.cz>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Pu Wen <puwen@hygon.cn>
Cc: Stephane Eranian <eranian@google.com>
Cc: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Lendacky <Thomas.Lendacky@amd.com>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: linux-kernel@vger.kernel.org
Cc: linux-perf-users@vger.kernel.org
Fixes: e40ed1542dd7 ("perf/x86: Add perf support for AMD family-17h processors")
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-05-02 23:29:47 +08:00
|
|
|
if (boot_cpu_data.x86 >= 0x17)
|
|
|
|
memcpy(hw_cache_event_ids, amd_hw_cache_event_ids_f17h, sizeof(hw_cache_event_ids));
|
|
|
|
else
|
|
|
|
memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids));
|
2010-02-26 19:05:05 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2012-02-29 21:57:32 +08:00
|
|
|
|
|
|
|
void amd_pmu_enable_virt(void)
|
|
|
|
{
|
x86: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86@kernel.org
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-18 01:30:40 +08:00
|
|
|
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
2012-02-29 21:57:32 +08:00
|
|
|
|
|
|
|
cpuc->perf_ctr_virt_mask = 0;
|
|
|
|
|
|
|
|
/* Reload all events */
|
x86/perf/amd: Resolve race condition when disabling PMC
On AMD processors, the detection of an overflowed counter in the NMI
handler relies on the current value of the counter. So, for example, to
check for overflow on a 48 bit counter, bit 47 is checked to see if it
is 1 (not overflowed) or 0 (overflowed).
There is currently a race condition present when disabling and then
updating the PMC. Increased NMI latency in newer AMD processors makes this
race condition more pronounced. If the counter value has overflowed, it is
possible to update the PMC value before the NMI handler can run. The
updated PMC value is not an overflowed value, so when the perf NMI handler
does run, it will not find an overflowed counter. This may appear as an
unknown NMI resulting in either a panic or a series of messages, depending
on how the kernel is configured.
To eliminate this race condition, the PMC value must be checked after
disabling the counter. Add an AMD function, amd_pmu_disable_all(), that
will wait for the NMI handler to reset any active and overflowed counter
after calling x86_pmu_disable_all().
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: <stable@vger.kernel.org> # 4.14.x-
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Link: https://lkml.kernel.org/r/Message-ID:
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-04-02 23:21:14 +08:00
|
|
|
amd_pmu_disable_all();
|
2012-02-29 21:57:32 +08:00
|
|
|
x86_pmu_enable_all(0);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
|
|
|
|
|
|
|
|
void amd_pmu_disable_virt(void)
|
|
|
|
{
|
x86: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86@kernel.org
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-18 01:30:40 +08:00
|
|
|
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
2012-02-29 21:57:32 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We only mask out the Host-only bit so that host-only counting works
|
|
|
|
* when SVM is disabled. If someone sets up a guest-only counter when
|
|
|
|
* SVM is disabled the Guest-only bits still gets set and the counter
|
|
|
|
* will not count anything.
|
|
|
|
*/
|
2013-02-07 01:26:26 +08:00
|
|
|
cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
|
2012-02-29 21:57:32 +08:00
|
|
|
|
|
|
|
/* Reload all events */
|
x86/perf/amd: Resolve race condition when disabling PMC
On AMD processors, the detection of an overflowed counter in the NMI
handler relies on the current value of the counter. So, for example, to
check for overflow on a 48 bit counter, bit 47 is checked to see if it
is 1 (not overflowed) or 0 (overflowed).
There is currently a race condition present when disabling and then
updating the PMC. Increased NMI latency in newer AMD processors makes this
race condition more pronounced. If the counter value has overflowed, it is
possible to update the PMC value before the NMI handler can run. The
updated PMC value is not an overflowed value, so when the perf NMI handler
does run, it will not find an overflowed counter. This may appear as an
unknown NMI resulting in either a panic or a series of messages, depending
on how the kernel is configured.
To eliminate this race condition, the PMC value must be checked after
disabling the counter. Add an AMD function, amd_pmu_disable_all(), that
will wait for the NMI handler to reset any active and overflowed counter
after calling x86_pmu_disable_all().
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: <stable@vger.kernel.org> # 4.14.x-
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Link: https://lkml.kernel.org/r/Message-ID:
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-04-02 23:21:14 +08:00
|
|
|
amd_pmu_disable_all();
|
2012-02-29 21:57:32 +08:00
|
|
|
x86_pmu_enable_all(0);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
|