1 #ifndef _KERNEL_EVENTS_INTERNAL_H 2 #define _KERNEL_EVENTS_INTERNAL_H 3 4 #include <linux/hardirq.h> 5 #include <linux/uaccess.h> 6 7 /* Buffer handling */ 8 9 #define RING_BUFFER_WRITABLE 0x01 10 11 struct ring_buffer { 12 atomic_t refcount; 13 struct rcu_head rcu_head; 14 #ifdef CONFIG_PERF_USE_VMALLOC 15 struct work_struct work; 16 int page_order; /* allocation order */ 17 #endif 18 int nr_pages; /* nr of data pages */ 19 int overwrite; /* can overwrite itself */ 20 int paused; /* can write into ring buffer */ 21 22 atomic_t poll; /* POLL_ for wakeups */ 23 24 local_t head; /* write position */ 25 local_t nest; /* nested writers */ 26 local_t events; /* event limit */ 27 local_t wakeup; /* wakeup stamp */ 28 local_t lost; /* nr records lost */ 29 30 long watermark; /* wakeup watermark */ 31 long aux_watermark; 32 /* poll crap */ 33 spinlock_t event_lock; 34 struct list_head event_list; 35 36 atomic_t mmap_count; 37 unsigned long mmap_locked; 38 struct user_struct *mmap_user; 39 40 /* AUX area */ 41 local_t aux_head; 42 local_t aux_nest; 43 local_t aux_wakeup; 44 unsigned long aux_pgoff; 45 int aux_nr_pages; 46 int aux_overwrite; 47 atomic_t aux_mmap_count; 48 unsigned long aux_mmap_locked; 49 void (*free_aux)(void *); 50 atomic_t aux_refcount; 51 void **aux_pages; 52 void *aux_priv; 53 54 struct perf_event_mmap_page *user_page; 55 void *data_pages[0]; 56 }; 57 58 extern void rb_free(struct ring_buffer *rb); 59 60 static inline void rb_free_rcu(struct rcu_head *rcu_head) 61 { 62 struct ring_buffer *rb; 63 64 rb = container_of(rcu_head, struct ring_buffer, rcu_head); 65 rb_free(rb); 66 } 67 68 static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause) 69 { 70 if (!pause && rb->nr_pages) 71 rb->paused = 0; 72 else 73 rb->paused = 1; 74 } 75 76 extern struct ring_buffer * 77 rb_alloc(int nr_pages, long watermark, int cpu, int flags); 78 extern void perf_event_wakeup(struct perf_event *event); 79 extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, 80 pgoff_t pgoff, int nr_pages, long watermark, int flags); 81 extern void rb_free_aux(struct ring_buffer *rb); 82 extern struct ring_buffer *ring_buffer_get(struct perf_event *event); 83 extern void ring_buffer_put(struct ring_buffer *rb); 84 85 static inline bool rb_has_aux(struct ring_buffer *rb) 86 { 87 return !!rb->aux_nr_pages; 88 } 89 90 void perf_event_aux_event(struct perf_event *event, unsigned long head, 91 unsigned long size, u64 flags); 92 93 extern struct page * 94 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff); 95 96 #ifdef CONFIG_PERF_USE_VMALLOC 97 /* 98 * Back perf_mmap() with vmalloc memory. 99 * 100 * Required for architectures that have d-cache aliasing issues. 101 */ 102 103 static inline int page_order(struct ring_buffer *rb) 104 { 105 return rb->page_order; 106 } 107 108 #else 109 110 static inline int page_order(struct ring_buffer *rb) 111 { 112 return 0; 113 } 114 #endif 115 116 static inline unsigned long perf_data_size(struct ring_buffer *rb) 117 { 118 return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); 119 } 120 121 static inline unsigned long perf_aux_size(struct ring_buffer *rb) 122 { 123 return rb->aux_nr_pages << PAGE_SHIFT; 124 } 125 126 #define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \ 127 static inline unsigned long \ 128 func_name(struct perf_output_handle *handle, \ 129 const void *buf, unsigned long len) \ 130 { \ 131 unsigned long size, written; \ 132 \ 133 do { \ 134 size = min(handle->size, len); \ 135 written = memcpy_func(handle->addr, buf, size); \ 136 written = size - written; \ 137 \ 138 len -= written; \ 139 handle->addr += written; \ 140 buf += written; \ 141 handle->size -= written; \ 142 if (!handle->size) { \ 143 struct ring_buffer *rb = handle->rb; \ 144 \ 145 handle->page++; \ 146 handle->page &= rb->nr_pages - 1; \ 147 handle->addr = rb->data_pages[handle->page]; \ 148 handle->size = PAGE_SIZE << page_order(rb); \ 149 } \ 150 } while (len && written == size); \ 151 \ 152 return len; \ 153 } 154 155 static inline unsigned long 156 memcpy_common(void *dst, const void *src, unsigned long n) 157 { 158 memcpy(dst, src, n); 159 return 0; 160 } 161 162 DEFINE_OUTPUT_COPY(__output_copy, memcpy_common) 163 164 static inline unsigned long 165 memcpy_skip(void *dst, const void *src, unsigned long n) 166 { 167 return 0; 168 } 169 170 DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip) 171 172 #ifndef arch_perf_out_copy_user 173 #define arch_perf_out_copy_user arch_perf_out_copy_user 174 175 static inline unsigned long 176 arch_perf_out_copy_user(void *dst, const void *src, unsigned long n) 177 { 178 unsigned long ret; 179 180 pagefault_disable(); 181 ret = __copy_from_user_inatomic(dst, src, n); 182 pagefault_enable(); 183 184 return ret; 185 } 186 #endif 187 188 DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user) 189 190 /* Callchain handling */ 191 extern struct perf_callchain_entry * 192 perf_callchain(struct perf_event *event, struct pt_regs *regs); 193 194 static inline int get_recursion_context(int *recursion) 195 { 196 int rctx; 197 198 if (in_nmi()) 199 rctx = 3; 200 else if (in_irq()) 201 rctx = 2; 202 else if (in_softirq()) 203 rctx = 1; 204 else 205 rctx = 0; 206 207 if (recursion[rctx]) 208 return -1; 209 210 recursion[rctx]++; 211 barrier(); 212 213 return rctx; 214 } 215 216 static inline void put_recursion_context(int *recursion, int rctx) 217 { 218 barrier(); 219 recursion[rctx]--; 220 } 221 222 #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP 223 static inline bool arch_perf_have_user_stack_dump(void) 224 { 225 return true; 226 } 227 228 #define perf_user_stack_pointer(regs) user_stack_pointer(regs) 229 #else 230 static inline bool arch_perf_have_user_stack_dump(void) 231 { 232 return false; 233 } 234 235 #define perf_user_stack_pointer(regs) 0 236 #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */ 237 238 #endif /* _KERNEL_EVENTS_INTERNAL_H */ 239