mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-05 18:14:07 +08:00
perf/x86/intel/uncore: Introduce customized event_read() for client IMC uncore
There are two free-running counters for client IMC uncore. The customized event_init() function hard codes their index to 'UNCORE_PMC_IDX_FIXED' and 'UNCORE_PMC_IDX_FIXED + 1'. To support the index 'UNCORE_PMC_IDX_FIXED + 1', the generic uncore_perf_event_update is obscurely hacked. The code quality issue will bring problems when a new counter index is introduced into the generic code, for example, a new index for free-running counter. Introducing a customized event_read() function for client IMC uncore. The customized function is copied from previous generic uncore_pmu_event_read(). The index 'UNCORE_PMC_IDX_FIXED + 1' will be isolated for client IMC uncore only. Signed-off-by: Kan Liang <kan.liang@intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: acme@kernel.org Cc: eranian@google.com Link: http://lkml.kernel.org/r/1525371913-10597-1-git-send-email-kan.liang@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
c52b5c5f96
commit
2da331465f
@ -450,6 +450,35 @@ static void snb_uncore_imc_event_start(struct perf_event *event, int flags)
|
||||
uncore_pmu_start_hrtimer(box);
|
||||
}
|
||||
|
||||
static void snb_uncore_imc_event_read(struct perf_event *event)
|
||||
{
|
||||
struct intel_uncore_box *box = uncore_event_to_box(event);
|
||||
u64 prev_count, new_count, delta;
|
||||
int shift;
|
||||
|
||||
/*
|
||||
* There are two free running counters in IMC.
|
||||
* The index for the second one is hardcoded to
|
||||
* UNCORE_PMC_IDX_FIXED + 1.
|
||||
*/
|
||||
if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
|
||||
shift = 64 - uncore_fixed_ctr_bits(box);
|
||||
else
|
||||
shift = 64 - uncore_perf_ctr_bits(box);
|
||||
|
||||
/* the hrtimer might modify the previous event value */
|
||||
again:
|
||||
prev_count = local64_read(&event->hw.prev_count);
|
||||
new_count = uncore_read_counter(box, event);
|
||||
if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
|
||||
goto again;
|
||||
|
||||
delta = (new_count << shift) - (prev_count << shift);
|
||||
delta >>= shift;
|
||||
|
||||
local64_add(delta, &event->count);
|
||||
}
|
||||
|
||||
static void snb_uncore_imc_event_stop(struct perf_event *event, int flags)
|
||||
{
|
||||
struct intel_uncore_box *box = uncore_event_to_box(event);
|
||||
@ -472,7 +501,7 @@ static void snb_uncore_imc_event_stop(struct perf_event *event, int flags)
|
||||
* Drain the remaining delta count out of a event
|
||||
* that we are disabling:
|
||||
*/
|
||||
uncore_perf_event_update(box, event);
|
||||
snb_uncore_imc_event_read(event);
|
||||
hwc->state |= PERF_HES_UPTODATE;
|
||||
}
|
||||
}
|
||||
@ -534,7 +563,7 @@ static struct pmu snb_uncore_imc_pmu = {
|
||||
.del = snb_uncore_imc_event_del,
|
||||
.start = snb_uncore_imc_event_start,
|
||||
.stop = snb_uncore_imc_event_stop,
|
||||
.read = uncore_pmu_event_read,
|
||||
.read = snb_uncore_imc_event_read,
|
||||
};
|
||||
|
||||
static struct intel_uncore_ops snb_uncore_imc_ops = {
|
||||
|
Loading…
Reference in New Issue
Block a user