perf tools: Move x86 barrier.h stuff to tools/arch/x86/include/asm/barrier.h

We will need it for atomic.h, so move it from the ad-hoc tools/perf/
place to a tools/ subset of the kernel arch/ hierarchy.

Other aches will follow, each in a cset.

Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: David Ahern <dsahern@gmail.com>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-vy6bqmsvm6puibpay2cy4wid@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Arnaldo Carvalho de Melo 2015-04-30 12:33:22 -03:00
parent f8bffbf122
commit 361c564eef
4 changed files with 34 additions and 6 deletions

View File

@ -0,0 +1,28 @@
#ifndef _TOOLS_LINUX_ASM_X86_BARRIER_H
#define _TOOLS_LINUX_ASM_X86_BARRIER_H
/*
* Copied from the Linux kernel sources, and also moving code
* out from tools/perf/perf-sys.h so as to make it be located
* in a place similar as in the kernel sources.
*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
*/
#if defined(__i386__)
/*
* Some non-Intel clones support out of order store. wmb() ceases to be a
* nop for these.
*/
#define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#elif defined(__x86_64__)
#define mb() asm volatile("mfence":::"memory")
#define rmb() asm volatile("lfence":::"memory")
#define wmb() asm volatile("sfence" ::: "memory")
#endif
#endif /* _TOOLS_LINUX_ASM_X86_BARRIER_H */

View File

@ -0,0 +1,3 @@
#if defined(__i386__) || defined(__x86_64__)
#include "../../arch/x86/include/asm/barrier.h"
#endif

View File

@ -1,4 +1,5 @@
tools/perf
tools/arch/x86/include/asm/barrier.h
tools/scripts
tools/build
tools/lib/traceevent
@ -6,6 +7,7 @@ tools/lib/api
tools/lib/symbol/kallsyms.c
tools/lib/symbol/kallsyms.h
tools/lib/util/find_next_bit.c
tools/include/asm/barrier.h
tools/include/asm/bug.h
tools/include/asm-generic/bitops/arch_hweight.h
tools/include/asm-generic/bitops/atomic.h

View File

@ -6,11 +6,9 @@
#include <sys/syscall.h>
#include <linux/types.h>
#include <linux/perf_event.h>
#include <asm/barrier.h>
#if defined(__i386__)
#define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#define cpu_relax() asm volatile("rep; nop" ::: "memory");
#define CPUINFO_PROC {"model name"}
#ifndef __NR_perf_event_open
@ -25,9 +23,6 @@
#endif
#if defined(__x86_64__)
#define mb() asm volatile("mfence" ::: "memory")
#define wmb() asm volatile("sfence" ::: "memory")
#define rmb() asm volatile("lfence" ::: "memory")
#define cpu_relax() asm volatile("rep; nop" ::: "memory");
#define CPUINFO_PROC {"model name"}
#ifndef __NR_perf_event_open