1 #ifndef _PERF_PERF_H 2 #define _PERF_PERF_H 3 4 #if defined(__i386__) 5 #include "../../arch/x86/include/asm/unistd.h" 6 #define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") 7 #define cpu_relax() asm volatile("rep; nop" ::: "memory"); 8 #endif 9 10 #if defined(__x86_64__) 11 #include "../../arch/x86/include/asm/unistd.h" 12 #define rmb() asm volatile("lfence" ::: "memory") 13 #define cpu_relax() asm volatile("rep; nop" ::: "memory"); 14 #endif 15 16 #ifdef __powerpc__ 17 #include "../../arch/powerpc/include/asm/unistd.h" 18 #define rmb() asm volatile ("sync" ::: "memory") 19 #define cpu_relax() asm volatile ("" ::: "memory"); 20 #endif 21 22 #ifdef __s390__ 23 #include "../../arch/s390/include/asm/unistd.h" 24 #define rmb() asm volatile("bcr 15,0" ::: "memory") 25 #define cpu_relax() asm volatile("" ::: "memory"); 26 #endif 27 28 #ifdef __sh__ 29 #include "../../arch/sh/include/asm/unistd.h" 30 #if defined(__SH4A__) || defined(__SH5__) 31 # define rmb() asm volatile("synco" ::: "memory") 32 #else 33 # define rmb() asm volatile("" ::: "memory") 34 #endif 35 #define cpu_relax() asm volatile("" ::: "memory") 36 #endif 37 38 #ifdef __hppa__ 39 #include "../../arch/parisc/include/asm/unistd.h" 40 #define rmb() asm volatile("" ::: "memory") 41 #define cpu_relax() asm volatile("" ::: "memory"); 42 #endif 43 44 #ifdef __sparc__ 45 #include "../../arch/sparc/include/asm/unistd.h" 46 #define rmb() asm volatile("":::"memory") 47 #define cpu_relax() asm volatile("":::"memory") 48 #endif 49 50 #ifdef __alpha__ 51 #include "../../arch/alpha/include/asm/unistd.h" 52 #define rmb() asm volatile("mb" ::: "memory") 53 #define cpu_relax() asm volatile("" ::: "memory") 54 #endif 55 56 #ifdef __ia64__ 57 #include "../../arch/ia64/include/asm/unistd.h" 58 #define rmb() asm volatile ("mf" ::: "memory") 59 #define cpu_relax() asm volatile ("hint @pause" ::: "memory") 60 #endif 61 62 #ifdef __arm__ 63 #include "../../arch/arm/include/asm/unistd.h" 64 /* 65 * Use the __kuser_memory_barrier helper in the CPU helper page. See 66 * arch/arm/kernel/entry-armv.S in the kernel source for details. 67 */ 68 #define rmb() asm volatile("mov r0, #0xffff0fff; mov lr, pc;" \ 69 "sub pc, r0, #95" ::: "r0", "lr", "cc", \ 70 "memory") 71 #define cpu_relax() asm volatile("":::"memory") 72 #endif 73 74 #include <time.h> 75 #include <unistd.h> 76 #include <sys/types.h> 77 #include <sys/syscall.h> 78 79 #include "../../include/linux/perf_event.h" 80 #include "util/types.h" 81 82 /* 83 * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all 84 * counters in the current task. 85 */ 86 #define PR_TASK_PERF_EVENTS_DISABLE 31 87 #define PR_TASK_PERF_EVENTS_ENABLE 32 88 89 #ifndef NSEC_PER_SEC 90 # define NSEC_PER_SEC 1000000000ULL 91 #endif 92 93 static inline unsigned long long rdclock(void) 94 { 95 struct timespec ts; 96 97 clock_gettime(CLOCK_MONOTONIC, &ts); 98 return ts.tv_sec * 1000000000ULL + ts.tv_nsec; 99 } 100 101 /* 102 * Pick up some kernel type conventions: 103 */ 104 #define __user 105 #define asmlinkage 106 107 #define __used __attribute__((__unused__)) 108 109 #define unlikely(x) __builtin_expect(!!(x), 0) 110 #define min(x, y) ({ \ 111 typeof(x) _min1 = (x); \ 112 typeof(y) _min2 = (y); \ 113 (void) (&_min1 == &_min2); \ 114 _min1 < _min2 ? _min1 : _min2; }) 115 116 static inline int 117 sys_perf_event_open(struct perf_event_attr *attr, 118 pid_t pid, int cpu, int group_fd, 119 unsigned long flags) 120 { 121 attr->size = sizeof(*attr); 122 return syscall(__NR_perf_event_open, attr, pid, cpu, 123 group_fd, flags); 124 } 125 126 #define MAX_COUNTERS 256 127 #define MAX_NR_CPUS 256 128 129 struct ip_callchain { 130 u64 nr; 131 u64 ips[0]; 132 }; 133 134 #endif 135