1 #ifndef _KERNEL_EVENTS_INTERNAL_H 2 #define _KERNEL_EVENTS_INTERNAL_H 3 4 #include <linux/hardirq.h> 5 #include <linux/uaccess.h> 6 7 /* Buffer handling */ 8 9 #define RING_BUFFER_WRITABLE 0x01 10 11 struct ring_buffer { 12 atomic_t refcount; 13 struct rcu_head rcu_head; 14 #ifdef CONFIG_PERF_USE_VMALLOC 15 struct work_struct work; 16 int page_order; /* allocation order */ 17 #endif 18 int nr_pages; /* nr of data pages */ 19 int overwrite; /* can overwrite itself */ 20 21 atomic_t poll; /* POLL_ for wakeups */ 22 23 local_t head; /* write position */ 24 local_t nest; /* nested writers */ 25 local_t events; /* event limit */ 26 local_t wakeup; /* wakeup stamp */ 27 local_t lost; /* nr records lost */ 28 29 long watermark; /* wakeup watermark */ 30 long aux_watermark; 31 /* poll crap */ 32 spinlock_t event_lock; 33 struct list_head event_list; 34 35 atomic_t mmap_count; 36 unsigned long mmap_locked; 37 struct user_struct *mmap_user; 38 39 /* AUX area */ 40 local_t aux_head; 41 local_t aux_nest; 42 local_t aux_wakeup; 43 unsigned long aux_pgoff; 44 int aux_nr_pages; 45 int aux_overwrite; 46 atomic_t aux_mmap_count; 47 unsigned long aux_mmap_locked; 48 void (*free_aux)(void *); 49 atomic_t aux_refcount; 50 void **aux_pages; 51 void *aux_priv; 52 53 struct perf_event_mmap_page *user_page; 54 void *data_pages[0]; 55 }; 56 57 extern void rb_free(struct ring_buffer *rb); 58 extern struct ring_buffer * 59 rb_alloc(int nr_pages, long watermark, int cpu, int flags); 60 extern void perf_event_wakeup(struct perf_event *event); 61 extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, 62 pgoff_t pgoff, int nr_pages, long watermark, int flags); 63 extern void rb_free_aux(struct ring_buffer *rb); 64 extern struct ring_buffer *ring_buffer_get(struct perf_event *event); 65 extern void ring_buffer_put(struct ring_buffer *rb); 66 67 static inline bool rb_has_aux(struct ring_buffer *rb) 68 { 69 return !!rb->aux_nr_pages; 70 } 71 72 void perf_event_aux_event(struct perf_event *event, unsigned long head, 73 unsigned long size, u64 flags); 74 75 extern void 76 perf_event_header__init_id(struct perf_event_header *header, 77 struct perf_sample_data *data, 78 struct perf_event *event); 79 extern void 80 perf_event__output_id_sample(struct perf_event *event, 81 struct perf_output_handle *handle, 82 struct perf_sample_data *sample); 83 84 extern struct page * 85 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff); 86 87 #ifdef CONFIG_PERF_USE_VMALLOC 88 /* 89 * Back perf_mmap() with vmalloc memory. 90 * 91 * Required for architectures that have d-cache aliasing issues. 92 */ 93 94 static inline int page_order(struct ring_buffer *rb) 95 { 96 return rb->page_order; 97 } 98 99 #else 100 101 static inline int page_order(struct ring_buffer *rb) 102 { 103 return 0; 104 } 105 #endif 106 107 static inline unsigned long perf_data_size(struct ring_buffer *rb) 108 { 109 return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); 110 } 111 112 static inline unsigned long perf_aux_size(struct ring_buffer *rb) 113 { 114 return rb->aux_nr_pages << PAGE_SHIFT; 115 } 116 117 #define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \ 118 static inline unsigned long \ 119 func_name(struct perf_output_handle *handle, \ 120 const void *buf, unsigned long len) \ 121 { \ 122 unsigned long size, written; \ 123 \ 124 do { \ 125 size = min(handle->size, len); \ 126 written = memcpy_func(handle->addr, buf, size); \ 127 written = size - written; \ 128 \ 129 len -= written; \ 130 handle->addr += written; \ 131 buf += written; \ 132 handle->size -= written; \ 133 if (!handle->size) { \ 134 struct ring_buffer *rb = handle->rb; \ 135 \ 136 handle->page++; \ 137 handle->page &= rb->nr_pages - 1; \ 138 handle->addr = rb->data_pages[handle->page]; \ 139 handle->size = PAGE_SIZE << page_order(rb); \ 140 } \ 141 } while (len && written == size); \ 142 \ 143 return len; \ 144 } 145 146 static inline unsigned long 147 memcpy_common(void *dst, const void *src, unsigned long n) 148 { 149 memcpy(dst, src, n); 150 return 0; 151 } 152 153 DEFINE_OUTPUT_COPY(__output_copy, memcpy_common) 154 155 static inline unsigned long 156 memcpy_skip(void *dst, const void *src, unsigned long n) 157 { 158 return 0; 159 } 160 161 DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip) 162 163 #ifndef arch_perf_out_copy_user 164 #define arch_perf_out_copy_user arch_perf_out_copy_user 165 166 static inline unsigned long 167 arch_perf_out_copy_user(void *dst, const void *src, unsigned long n) 168 { 169 unsigned long ret; 170 171 pagefault_disable(); 172 ret = __copy_from_user_inatomic(dst, src, n); 173 pagefault_enable(); 174 175 return ret; 176 } 177 #endif 178 179 DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user) 180 181 /* Callchain handling */ 182 extern struct perf_callchain_entry * 183 perf_callchain(struct perf_event *event, struct pt_regs *regs); 184 extern int get_callchain_buffers(void); 185 extern void put_callchain_buffers(void); 186 187 static inline int get_recursion_context(int *recursion) 188 { 189 int rctx; 190 191 if (in_nmi()) 192 rctx = 3; 193 else if (in_irq()) 194 rctx = 2; 195 else if (in_softirq()) 196 rctx = 1; 197 else 198 rctx = 0; 199 200 if (recursion[rctx]) 201 return -1; 202 203 recursion[rctx]++; 204 barrier(); 205 206 return rctx; 207 } 208 209 static inline void put_recursion_context(int *recursion, int rctx) 210 { 211 barrier(); 212 recursion[rctx]--; 213 } 214 215 #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP 216 static inline bool arch_perf_have_user_stack_dump(void) 217 { 218 return true; 219 } 220 221 #define perf_user_stack_pointer(regs) user_stack_pointer(regs) 222 #else 223 static inline bool arch_perf_have_user_stack_dump(void) 224 { 225 return false; 226 } 227 228 #define perf_user_stack_pointer(regs) 0 229 #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */ 230 231 #endif /* _KERNEL_EVENTS_INTERNAL_H */ 232