1 #ifndef _KERNEL_EVENTS_INTERNAL_H 2 #define _KERNEL_EVENTS_INTERNAL_H 3 4 #include <linux/hardirq.h> 5 #include <linux/uaccess.h> 6 7 /* Buffer handling */ 8 9 #define RING_BUFFER_WRITABLE 0x01 10 11 struct ring_buffer { 12 atomic_t refcount; 13 struct rcu_head rcu_head; 14 #ifdef CONFIG_PERF_USE_VMALLOC 15 struct work_struct work; 16 int page_order; /* allocation order */ 17 #endif 18 int nr_pages; /* nr of data pages */ 19 int overwrite; /* can overwrite itself */ 20 21 atomic_t poll; /* POLL_ for wakeups */ 22 23 local_t head; /* write position */ 24 local_t nest; /* nested writers */ 25 local_t events; /* event limit */ 26 local_t wakeup; /* wakeup stamp */ 27 local_t lost; /* nr records lost */ 28 29 long watermark; /* wakeup watermark */ 30 /* poll crap */ 31 spinlock_t event_lock; 32 struct list_head event_list; 33 34 atomic_t mmap_count; 35 unsigned long mmap_locked; 36 struct user_struct *mmap_user; 37 38 struct perf_event_mmap_page *user_page; 39 void *data_pages[0]; 40 }; 41 42 extern void rb_free(struct ring_buffer *rb); 43 extern struct ring_buffer * 44 rb_alloc(int nr_pages, long watermark, int cpu, int flags); 45 extern void perf_event_wakeup(struct perf_event *event); 46 47 extern void 48 perf_event_header__init_id(struct perf_event_header *header, 49 struct perf_sample_data *data, 50 struct perf_event *event); 51 extern void 52 perf_event__output_id_sample(struct perf_event *event, 53 struct perf_output_handle *handle, 54 struct perf_sample_data *sample); 55 56 extern struct page * 57 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff); 58 59 #ifdef CONFIG_PERF_USE_VMALLOC 60 /* 61 * Back perf_mmap() with vmalloc memory. 62 * 63 * Required for architectures that have d-cache aliasing issues. 64 */ 65 66 static inline int page_order(struct ring_buffer *rb) 67 { 68 return rb->page_order; 69 } 70 71 #else 72 73 static inline int page_order(struct ring_buffer *rb) 74 { 75 return 0; 76 } 77 #endif 78 79 static inline unsigned long perf_data_size(struct ring_buffer *rb) 80 { 81 return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); 82 } 83 84 #define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \ 85 static inline unsigned int \ 86 func_name(struct perf_output_handle *handle, \ 87 const void *buf, unsigned int len) \ 88 { \ 89 unsigned long size, written; \ 90 \ 91 do { \ 92 size = min_t(unsigned long, handle->size, len); \ 93 \ 94 written = memcpy_func(handle->addr, buf, size); \ 95 \ 96 len -= written; \ 97 handle->addr += written; \ 98 buf += written; \ 99 handle->size -= written; \ 100 if (!handle->size) { \ 101 struct ring_buffer *rb = handle->rb; \ 102 \ 103 handle->page++; \ 104 handle->page &= rb->nr_pages - 1; \ 105 handle->addr = rb->data_pages[handle->page]; \ 106 handle->size = PAGE_SIZE << page_order(rb); \ 107 } \ 108 } while (len && written == size); \ 109 \ 110 return len; \ 111 } 112 113 static inline int memcpy_common(void *dst, const void *src, size_t n) 114 { 115 memcpy(dst, src, n); 116 return n; 117 } 118 119 DEFINE_OUTPUT_COPY(__output_copy, memcpy_common) 120 121 #define MEMCPY_SKIP(dst, src, n) (n) 122 123 DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP) 124 125 #ifndef arch_perf_out_copy_user 126 #define arch_perf_out_copy_user __copy_from_user_inatomic 127 #endif 128 129 DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user) 130 131 /* Callchain handling */ 132 extern struct perf_callchain_entry * 133 perf_callchain(struct perf_event *event, struct pt_regs *regs); 134 extern int get_callchain_buffers(void); 135 extern void put_callchain_buffers(void); 136 137 static inline int get_recursion_context(int *recursion) 138 { 139 int rctx; 140 141 if (in_nmi()) 142 rctx = 3; 143 else if (in_irq()) 144 rctx = 2; 145 else if (in_softirq()) 146 rctx = 1; 147 else 148 rctx = 0; 149 150 if (recursion[rctx]) 151 return -1; 152 153 recursion[rctx]++; 154 barrier(); 155 156 return rctx; 157 } 158 159 static inline void put_recursion_context(int *recursion, int rctx) 160 { 161 barrier(); 162 recursion[rctx]--; 163 } 164 165 #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP 166 static inline bool arch_perf_have_user_stack_dump(void) 167 { 168 return true; 169 } 170 171 #define perf_user_stack_pointer(regs) user_stack_pointer(regs) 172 #else 173 static inline bool arch_perf_have_user_stack_dump(void) 174 { 175 return false; 176 } 177 178 #define perf_user_stack_pointer(regs) 0 179 #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */ 180 181 #endif /* _KERNEL_EVENTS_INTERNAL_H */ 182