xref: /openbmc/linux/kernel/events/internal.h (revision 84fbfc33)
1 #ifndef _KERNEL_EVENTS_INTERNAL_H
2 #define _KERNEL_EVENTS_INTERNAL_H
3 
4 #include <linux/hardirq.h>
5 #include <linux/uaccess.h>
6 
7 /* Buffer handling */
8 
9 #define RING_BUFFER_WRITABLE		0x01
10 
11 struct ring_buffer {
12 	atomic_t			refcount;
13 	struct rcu_head			rcu_head;
14 #ifdef CONFIG_PERF_USE_VMALLOC
15 	struct work_struct		work;
16 	int				page_order;	/* allocation order  */
17 #endif
18 	int				nr_pages;	/* nr of data pages  */
19 	int				overwrite;	/* can overwrite itself */
20 	int				paused;		/* can write into ring buffer */
21 
22 	atomic_t			poll;		/* POLL_ for wakeups */
23 
24 	local_t				head;		/* write position    */
25 	local_t				nest;		/* nested writers    */
26 	local_t				events;		/* event limit       */
27 	local_t				wakeup;		/* wakeup stamp      */
28 	local_t				lost;		/* nr records lost   */
29 
30 	long				watermark;	/* wakeup watermark  */
31 	long				aux_watermark;
32 	/* poll crap */
33 	spinlock_t			event_lock;
34 	struct list_head		event_list;
35 
36 	atomic_t			mmap_count;
37 	unsigned long			mmap_locked;
38 	struct user_struct		*mmap_user;
39 
40 	/* AUX area */
41 	long				aux_head;
42 	local_t				aux_nest;
43 	long				aux_wakeup;	/* last aux_watermark boundary crossed by aux_head */
44 	unsigned long			aux_pgoff;
45 	int				aux_nr_pages;
46 	int				aux_overwrite;
47 	atomic_t			aux_mmap_count;
48 	unsigned long			aux_mmap_locked;
49 	void				(*free_aux)(void *);
50 	atomic_t			aux_refcount;
51 	void				**aux_pages;
52 	void				*aux_priv;
53 
54 	struct perf_event_mmap_page	*user_page;
55 	void				*data_pages[0];
56 };
57 
58 extern void rb_free(struct ring_buffer *rb);
59 
60 static inline void rb_free_rcu(struct rcu_head *rcu_head)
61 {
62 	struct ring_buffer *rb;
63 
64 	rb = container_of(rcu_head, struct ring_buffer, rcu_head);
65 	rb_free(rb);
66 }
67 
68 static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause)
69 {
70 	if (!pause && rb->nr_pages)
71 		rb->paused = 0;
72 	else
73 		rb->paused = 1;
74 }
75 
76 extern struct ring_buffer *
77 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
78 extern void perf_event_wakeup(struct perf_event *event);
79 extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
80 			pgoff_t pgoff, int nr_pages, long watermark, int flags);
81 extern void rb_free_aux(struct ring_buffer *rb);
82 extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
83 extern void ring_buffer_put(struct ring_buffer *rb);
84 
85 static inline bool rb_has_aux(struct ring_buffer *rb)
86 {
87 	return !!rb->aux_nr_pages;
88 }
89 
90 void perf_event_aux_event(struct perf_event *event, unsigned long head,
91 			  unsigned long size, u64 flags);
92 
93 extern struct page *
94 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
95 
96 #ifdef CONFIG_PERF_USE_VMALLOC
97 /*
98  * Back perf_mmap() with vmalloc memory.
99  *
100  * Required for architectures that have d-cache aliasing issues.
101  */
102 
103 static inline int page_order(struct ring_buffer *rb)
104 {
105 	return rb->page_order;
106 }
107 
108 #else
109 
110 static inline int page_order(struct ring_buffer *rb)
111 {
112 	return 0;
113 }
114 #endif
115 
116 static inline unsigned long perf_data_size(struct ring_buffer *rb)
117 {
118 	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
119 }
120 
121 static inline unsigned long perf_aux_size(struct ring_buffer *rb)
122 {
123 	return rb->aux_nr_pages << PAGE_SHIFT;
124 }
125 
126 #define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...)	\
127 {									\
128 	unsigned long size, written;					\
129 									\
130 	do {								\
131 		size    = min(handle->size, len);			\
132 		written = memcpy_func(__VA_ARGS__);			\
133 		written = size - written;				\
134 									\
135 		len -= written;						\
136 		handle->addr += written;				\
137 		if (advance_buf)					\
138 			buf += written;					\
139 		handle->size -= written;				\
140 		if (!handle->size) {					\
141 			struct ring_buffer *rb = handle->rb;		\
142 									\
143 			handle->page++;					\
144 			handle->page &= rb->nr_pages - 1;		\
145 			handle->addr = rb->data_pages[handle->page];	\
146 			handle->size = PAGE_SIZE << page_order(rb);	\
147 		}							\
148 	} while (len && written == size);				\
149 									\
150 	return len;							\
151 }
152 
153 #define DEFINE_OUTPUT_COPY(func_name, memcpy_func)			\
154 static inline unsigned long						\
155 func_name(struct perf_output_handle *handle,				\
156 	  const void *buf, unsigned long len)				\
157 __DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
158 
159 static inline unsigned long
160 __output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
161 		const void *buf, unsigned long len)
162 {
163 	unsigned long orig_len = len;
164 	__DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
165 				  orig_len - len, size)
166 }
167 
168 static inline unsigned long
169 memcpy_common(void *dst, const void *src, unsigned long n)
170 {
171 	memcpy(dst, src, n);
172 	return 0;
173 }
174 
175 DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
176 
177 static inline unsigned long
178 memcpy_skip(void *dst, const void *src, unsigned long n)
179 {
180 	return 0;
181 }
182 
183 DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
184 
185 #ifndef arch_perf_out_copy_user
186 #define arch_perf_out_copy_user arch_perf_out_copy_user
187 
188 static inline unsigned long
189 arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
190 {
191 	unsigned long ret;
192 
193 	pagefault_disable();
194 	ret = __copy_from_user_inatomic(dst, src, n);
195 	pagefault_enable();
196 
197 	return ret;
198 }
199 #endif
200 
201 DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
202 
203 /* Callchain handling */
204 extern struct perf_callchain_entry *
205 perf_callchain(struct perf_event *event, struct pt_regs *regs);
206 
207 static inline int get_recursion_context(int *recursion)
208 {
209 	int rctx;
210 
211 	if (unlikely(in_nmi()))
212 		rctx = 3;
213 	else if (in_irq())
214 		rctx = 2;
215 	else if (in_softirq())
216 		rctx = 1;
217 	else
218 		rctx = 0;
219 
220 	if (recursion[rctx])
221 		return -1;
222 
223 	recursion[rctx]++;
224 	barrier();
225 
226 	return rctx;
227 }
228 
229 static inline void put_recursion_context(int *recursion, int rctx)
230 {
231 	barrier();
232 	recursion[rctx]--;
233 }
234 
235 #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
236 static inline bool arch_perf_have_user_stack_dump(void)
237 {
238 	return true;
239 }
240 
241 #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
242 #else
243 static inline bool arch_perf_have_user_stack_dump(void)
244 {
245 	return false;
246 }
247 
248 #define perf_user_stack_pointer(regs) 0
249 #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
250 
251 #endif /* _KERNEL_EVENTS_INTERNAL_H */
252