xref: /openbmc/linux/kernel/events/internal.h (revision d0e22329)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _KERNEL_EVENTS_INTERNAL_H
3 #define _KERNEL_EVENTS_INTERNAL_H
4 
5 #include <linux/hardirq.h>
6 #include <linux/uaccess.h>
7 
8 /* Buffer handling */
9 
10 #define RING_BUFFER_WRITABLE		0x01
11 
12 struct ring_buffer {
13 	atomic_t			refcount;
14 	struct rcu_head			rcu_head;
15 #ifdef CONFIG_PERF_USE_VMALLOC
16 	struct work_struct		work;
17 	int				page_order;	/* allocation order  */
18 #endif
19 	int				nr_pages;	/* nr of data pages  */
20 	int				overwrite;	/* can overwrite itself */
21 	int				paused;		/* can write into ring buffer */
22 
23 	atomic_t			poll;		/* POLL_ for wakeups */
24 
25 	local_t				head;		/* write position    */
26 	local_t				nest;		/* nested writers    */
27 	local_t				events;		/* event limit       */
28 	local_t				wakeup;		/* wakeup stamp      */
29 	local_t				lost;		/* nr records lost   */
30 
31 	long				watermark;	/* wakeup watermark  */
32 	long				aux_watermark;
33 	/* poll crap */
34 	spinlock_t			event_lock;
35 	struct list_head		event_list;
36 
37 	atomic_t			mmap_count;
38 	unsigned long			mmap_locked;
39 	struct user_struct		*mmap_user;
40 
41 	/* AUX area */
42 	long				aux_head;
43 	local_t				aux_nest;
44 	long				aux_wakeup;	/* last aux_watermark boundary crossed by aux_head */
45 	unsigned long			aux_pgoff;
46 	int				aux_nr_pages;
47 	int				aux_overwrite;
48 	atomic_t			aux_mmap_count;
49 	unsigned long			aux_mmap_locked;
50 	void				(*free_aux)(void *);
51 	atomic_t			aux_refcount;
52 	void				**aux_pages;
53 	void				*aux_priv;
54 
55 	struct perf_event_mmap_page	*user_page;
56 	void				*data_pages[0];
57 };
58 
59 extern void rb_free(struct ring_buffer *rb);
60 
61 static inline void rb_free_rcu(struct rcu_head *rcu_head)
62 {
63 	struct ring_buffer *rb;
64 
65 	rb = container_of(rcu_head, struct ring_buffer, rcu_head);
66 	rb_free(rb);
67 }
68 
69 static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause)
70 {
71 	if (!pause && rb->nr_pages)
72 		rb->paused = 0;
73 	else
74 		rb->paused = 1;
75 }
76 
77 extern struct ring_buffer *
78 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
79 extern void perf_event_wakeup(struct perf_event *event);
80 extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
81 			pgoff_t pgoff, int nr_pages, long watermark, int flags);
82 extern void rb_free_aux(struct ring_buffer *rb);
83 extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
84 extern void ring_buffer_put(struct ring_buffer *rb);
85 
86 static inline bool rb_has_aux(struct ring_buffer *rb)
87 {
88 	return !!rb->aux_nr_pages;
89 }
90 
91 void perf_event_aux_event(struct perf_event *event, unsigned long head,
92 			  unsigned long size, u64 flags);
93 
94 extern struct page *
95 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
96 
97 #ifdef CONFIG_PERF_USE_VMALLOC
98 /*
99  * Back perf_mmap() with vmalloc memory.
100  *
101  * Required for architectures that have d-cache aliasing issues.
102  */
103 
104 static inline int page_order(struct ring_buffer *rb)
105 {
106 	return rb->page_order;
107 }
108 
109 #else
110 
111 static inline int page_order(struct ring_buffer *rb)
112 {
113 	return 0;
114 }
115 #endif
116 
117 static inline unsigned long perf_data_size(struct ring_buffer *rb)
118 {
119 	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
120 }
121 
122 static inline unsigned long perf_aux_size(struct ring_buffer *rb)
123 {
124 	return rb->aux_nr_pages << PAGE_SHIFT;
125 }
126 
127 #define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...)	\
128 {									\
129 	unsigned long size, written;					\
130 									\
131 	do {								\
132 		size    = min(handle->size, len);			\
133 		written = memcpy_func(__VA_ARGS__);			\
134 		written = size - written;				\
135 									\
136 		len -= written;						\
137 		handle->addr += written;				\
138 		if (advance_buf)					\
139 			buf += written;					\
140 		handle->size -= written;				\
141 		if (!handle->size) {					\
142 			struct ring_buffer *rb = handle->rb;		\
143 									\
144 			handle->page++;					\
145 			handle->page &= rb->nr_pages - 1;		\
146 			handle->addr = rb->data_pages[handle->page];	\
147 			handle->size = PAGE_SIZE << page_order(rb);	\
148 		}							\
149 	} while (len && written == size);				\
150 									\
151 	return len;							\
152 }
153 
154 #define DEFINE_OUTPUT_COPY(func_name, memcpy_func)			\
155 static inline unsigned long						\
156 func_name(struct perf_output_handle *handle,				\
157 	  const void *buf, unsigned long len)				\
158 __DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
159 
160 static inline unsigned long
161 __output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
162 		const void *buf, unsigned long len)
163 {
164 	unsigned long orig_len = len;
165 	__DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
166 				  orig_len - len, size)
167 }
168 
169 static inline unsigned long
170 memcpy_common(void *dst, const void *src, unsigned long n)
171 {
172 	memcpy(dst, src, n);
173 	return 0;
174 }
175 
176 DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
177 
178 static inline unsigned long
179 memcpy_skip(void *dst, const void *src, unsigned long n)
180 {
181 	return 0;
182 }
183 
184 DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
185 
186 #ifndef arch_perf_out_copy_user
187 #define arch_perf_out_copy_user arch_perf_out_copy_user
188 
189 static inline unsigned long
190 arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
191 {
192 	unsigned long ret;
193 
194 	pagefault_disable();
195 	ret = __copy_from_user_inatomic(dst, src, n);
196 	pagefault_enable();
197 
198 	return ret;
199 }
200 #endif
201 
202 DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
203 
204 static inline int get_recursion_context(int *recursion)
205 {
206 	int rctx;
207 
208 	if (unlikely(in_nmi()))
209 		rctx = 3;
210 	else if (in_irq())
211 		rctx = 2;
212 	else if (in_softirq())
213 		rctx = 1;
214 	else
215 		rctx = 0;
216 
217 	if (recursion[rctx])
218 		return -1;
219 
220 	recursion[rctx]++;
221 	barrier();
222 
223 	return rctx;
224 }
225 
226 static inline void put_recursion_context(int *recursion, int rctx)
227 {
228 	barrier();
229 	recursion[rctx]--;
230 }
231 
232 #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
233 static inline bool arch_perf_have_user_stack_dump(void)
234 {
235 	return true;
236 }
237 
238 #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
239 #else
240 static inline bool arch_perf_have_user_stack_dump(void)
241 {
242 	return false;
243 }
244 
245 #define perf_user_stack_pointer(regs) 0
246 #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
247 
248 #endif /* _KERNEL_EVENTS_INTERNAL_H */
249