xref: /openbmc/linux/tools/perf/perf-sys.h (revision 95acd4c7)
1 #ifndef _PERF_SYS_H
2 #define _PERF_SYS_H
3 
4 #include <unistd.h>
5 #include <sys/types.h>
6 #include <sys/syscall.h>
7 #include <linux/types.h>
8 #include <linux/perf_event.h>
9 #include <asm/unistd.h>
10 
11 #if defined(__i386__)
12 #define mb()		asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
13 #define wmb()		asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
14 #define rmb()		asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
15 #define cpu_relax()	asm volatile("rep; nop" ::: "memory");
16 #define CPUINFO_PROC	{"model name"}
17 #ifndef __NR_perf_event_open
18 # define __NR_perf_event_open 336
19 #endif
20 #ifndef __NR_futex
21 # define __NR_futex 240
22 #endif
23 #ifndef __NR_gettid
24 # define __NR_gettid 224
25 #endif
26 #endif
27 
28 #if defined(__x86_64__)
29 #define mb()		asm volatile("mfence" ::: "memory")
30 #define wmb()		asm volatile("sfence" ::: "memory")
31 #define rmb()		asm volatile("lfence" ::: "memory")
32 #define cpu_relax()	asm volatile("rep; nop" ::: "memory");
33 #define CPUINFO_PROC	{"model name"}
34 #ifndef __NR_perf_event_open
35 # define __NR_perf_event_open 298
36 #endif
37 #ifndef __NR_futex
38 # define __NR_futex 202
39 #endif
40 #ifndef __NR_gettid
41 # define __NR_gettid 186
42 #endif
43 #endif
44 
45 #ifdef __powerpc__
46 #include "../../arch/powerpc/include/uapi/asm/unistd.h"
47 #define mb()		asm volatile ("sync" ::: "memory")
48 #define wmb()		asm volatile ("sync" ::: "memory")
49 #define rmb()		asm volatile ("sync" ::: "memory")
50 #define CPUINFO_PROC	{"cpu"}
51 #endif
52 
53 #ifdef __s390__
54 #define mb()		asm volatile("bcr 15,0" ::: "memory")
55 #define wmb()		asm volatile("bcr 15,0" ::: "memory")
56 #define rmb()		asm volatile("bcr 15,0" ::: "memory")
57 #define CPUINFO_PROC	{"vendor_id"}
58 #endif
59 
60 #ifdef __sh__
61 #if defined(__SH4A__) || defined(__SH5__)
62 # define mb()		asm volatile("synco" ::: "memory")
63 # define wmb()		asm volatile("synco" ::: "memory")
64 # define rmb()		asm volatile("synco" ::: "memory")
65 #else
66 # define mb()		asm volatile("" ::: "memory")
67 # define wmb()		asm volatile("" ::: "memory")
68 # define rmb()		asm volatile("" ::: "memory")
69 #endif
70 #define CPUINFO_PROC	{"cpu type"}
71 #endif
72 
73 #ifdef __hppa__
74 #define mb()		asm volatile("" ::: "memory")
75 #define wmb()		asm volatile("" ::: "memory")
76 #define rmb()		asm volatile("" ::: "memory")
77 #define CPUINFO_PROC	{"cpu"}
78 #endif
79 
80 #ifdef __sparc__
81 #ifdef __LP64__
82 #define mb()		asm volatile("ba,pt %%xcc, 1f\n"	\
83 				     "membar #StoreLoad\n"	\
84 				     "1:\n":::"memory")
85 #else
86 #define mb()		asm volatile("":::"memory")
87 #endif
88 #define wmb()		asm volatile("":::"memory")
89 #define rmb()		asm volatile("":::"memory")
90 #define CPUINFO_PROC	{"cpu"}
91 #endif
92 
93 #ifdef __alpha__
94 #define mb()		asm volatile("mb" ::: "memory")
95 #define wmb()		asm volatile("wmb" ::: "memory")
96 #define rmb()		asm volatile("mb" ::: "memory")
97 #define CPUINFO_PROC	{"cpu model"}
98 #endif
99 
100 #ifdef __ia64__
101 #define mb()		asm volatile ("mf" ::: "memory")
102 #define wmb()		asm volatile ("mf" ::: "memory")
103 #define rmb()		asm volatile ("mf" ::: "memory")
104 #define cpu_relax()	asm volatile ("hint @pause" ::: "memory")
105 #define CPUINFO_PROC	{"model name"}
106 #endif
107 
108 #ifdef __arm__
109 /*
110  * Use the __kuser_memory_barrier helper in the CPU helper page. See
111  * arch/arm/kernel/entry-armv.S in the kernel source for details.
112  */
113 #define mb()		((void(*)(void))0xffff0fa0)()
114 #define wmb()		((void(*)(void))0xffff0fa0)()
115 #define rmb()		((void(*)(void))0xffff0fa0)()
116 #define CPUINFO_PROC	{"model name", "Processor"}
117 #endif
118 
119 #ifdef __aarch64__
120 #define mb()		asm volatile("dmb ish" ::: "memory")
121 #define wmb()		asm volatile("dmb ishst" ::: "memory")
122 #define rmb()		asm volatile("dmb ishld" ::: "memory")
123 #define cpu_relax()	asm volatile("yield" ::: "memory")
124 #endif
125 
126 #ifdef __mips__
127 #define mb()		asm volatile(					\
128 				".set	mips2\n\t"			\
129 				"sync\n\t"				\
130 				".set	mips0"				\
131 				: /* no output */			\
132 				: /* no input */			\
133 				: "memory")
134 #define wmb()	mb()
135 #define rmb()	mb()
136 #define CPUINFO_PROC	{"cpu model"}
137 #endif
138 
139 #ifdef __arc__
140 #define mb()		asm volatile("" ::: "memory")
141 #define wmb()		asm volatile("" ::: "memory")
142 #define rmb()		asm volatile("" ::: "memory")
143 #define CPUINFO_PROC	{"Processor"}
144 #endif
145 
146 #ifdef __metag__
147 #define mb()		asm volatile("" ::: "memory")
148 #define wmb()		asm volatile("" ::: "memory")
149 #define rmb()		asm volatile("" ::: "memory")
150 #define CPUINFO_PROC	{"CPU"}
151 #endif
152 
153 #ifdef __xtensa__
154 #define mb()		asm volatile("memw" ::: "memory")
155 #define wmb()		asm volatile("memw" ::: "memory")
156 #define rmb()		asm volatile("" ::: "memory")
157 #define CPUINFO_PROC	{"core ID"}
158 #endif
159 
160 #ifdef __tile__
161 #define mb()		asm volatile ("mf" ::: "memory")
162 #define wmb()		asm volatile ("mf" ::: "memory")
163 #define rmb()		asm volatile ("mf" ::: "memory")
164 #define cpu_relax()	asm volatile ("mfspr zero, PASS" ::: "memory")
165 #define CPUINFO_PROC    {"model name"}
166 #endif
167 
168 #define barrier() asm volatile ("" ::: "memory")
169 
170 #ifndef cpu_relax
171 #define cpu_relax() barrier()
172 #endif
173 
174 static inline int
175 sys_perf_event_open(struct perf_event_attr *attr,
176 		      pid_t pid, int cpu, int group_fd,
177 		      unsigned long flags)
178 {
179 	int fd;
180 
181 	fd = syscall(__NR_perf_event_open, attr, pid, cpu,
182 		     group_fd, flags);
183 
184 #ifdef HAVE_ATTR_TEST
185 	if (unlikely(test_attr__enabled))
186 		test_attr__open(attr, pid, cpu, fd, group_fd, flags);
187 #endif
188 	return fd;
189 }
190 
191 #endif /* _PERF_SYS_H */
192