mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-29 23:53:55 +08:00
a23f96ee4d
Commit b9511cd761
("perf/x86: Fix time_shift in perf_event_mmap_page")
altered the time conversion algorithms documented in the perf_event.h
header file, to use 64-bit shifts. That was done to make the code more
future-proof (i.e. some time in the future a 32-bit shift could be
allowed). Reflect those changes in perf tools.
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1457005856-6143-9-git-send-email-adrian.hunter@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
31 lines
620 B
C
31 lines
620 B
C
#include <linux/compiler.h>
|
|
#include <linux/types.h>
|
|
|
|
#include "tsc.h"
|
|
|
|
u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc)
|
|
{
|
|
u64 t, quot, rem;
|
|
|
|
t = ns - tc->time_zero;
|
|
quot = t / tc->time_mult;
|
|
rem = t % tc->time_mult;
|
|
return (quot << tc->time_shift) +
|
|
(rem << tc->time_shift) / tc->time_mult;
|
|
}
|
|
|
|
u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc)
|
|
{
|
|
u64 quot, rem;
|
|
|
|
quot = cyc >> tc->time_shift;
|
|
rem = cyc & (((u64)1 << tc->time_shift) - 1);
|
|
return tc->time_zero + quot * tc->time_mult +
|
|
((rem * tc->time_mult) >> tc->time_shift);
|
|
}
|
|
|
|
u64 __weak rdtsc(void)
|
|
{
|
|
return 0;
|
|
}
|