xref: /openbmc/linux/kernel/events/internal.h (revision 36acd5e2)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _KERNEL_EVENTS_INTERNAL_H
3 #define _KERNEL_EVENTS_INTERNAL_H
4 
5 #include <linux/hardirq.h>
6 #include <linux/uaccess.h>
7 #include <linux/refcount.h>
8 
9 /* Buffer handling */
10 
11 #define RING_BUFFER_WRITABLE		0x01
12 
13 struct perf_buffer {
14 	refcount_t			refcount;
15 	struct rcu_head			rcu_head;
16 #ifdef CONFIG_PERF_USE_VMALLOC
17 	struct work_struct		work;
18 	int				page_order;	/* allocation order  */
19 #endif
20 	int				nr_pages;	/* nr of data pages  */
21 	int				overwrite;	/* can overwrite itself */
22 	int				paused;		/* can write into ring buffer */
23 
24 	atomic_t			poll;		/* POLL_ for wakeups */
25 
26 	local_t				head;		/* write position    */
27 	unsigned int			nest;		/* nested writers    */
28 	local_t				events;		/* event limit       */
29 	local_t				wakeup;		/* wakeup stamp      */
30 	local_t				lost;		/* nr records lost   */
31 
32 	long				watermark;	/* wakeup watermark  */
33 	long				aux_watermark;
34 	/* poll crap */
35 	spinlock_t			event_lock;
36 	struct list_head		event_list;
37 
38 	atomic_t			mmap_count;
39 	unsigned long			mmap_locked;
40 	struct user_struct		*mmap_user;
41 
42 	/* AUX area */
43 	long				aux_head;
44 	unsigned int			aux_nest;
45 	long				aux_wakeup;	/* last aux_watermark boundary crossed by aux_head */
46 	unsigned long			aux_pgoff;
47 	int				aux_nr_pages;
48 	int				aux_overwrite;
49 	atomic_t			aux_mmap_count;
50 	unsigned long			aux_mmap_locked;
51 	void				(*free_aux)(void *);
52 	refcount_t			aux_refcount;
53 	int				aux_in_sampling;
54 	void				**aux_pages;
55 	void				*aux_priv;
56 
57 	struct perf_event_mmap_page	*user_page;
58 	void				*data_pages[];
59 };
60 
61 extern void rb_free(struct perf_buffer *rb);
62 
63 static inline void rb_free_rcu(struct rcu_head *rcu_head)
64 {
65 	struct perf_buffer *rb;
66 
67 	rb = container_of(rcu_head, struct perf_buffer, rcu_head);
68 	rb_free(rb);
69 }
70 
71 static inline void rb_toggle_paused(struct perf_buffer *rb, bool pause)
72 {
73 	if (!pause && rb->nr_pages)
74 		rb->paused = 0;
75 	else
76 		rb->paused = 1;
77 }
78 
79 extern struct perf_buffer *
80 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
81 extern void perf_event_wakeup(struct perf_event *event);
82 extern int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
83 			pgoff_t pgoff, int nr_pages, long watermark, int flags);
84 extern void rb_free_aux(struct perf_buffer *rb);
85 extern struct perf_buffer *ring_buffer_get(struct perf_event *event);
86 extern void ring_buffer_put(struct perf_buffer *rb);
87 
88 static inline bool rb_has_aux(struct perf_buffer *rb)
89 {
90 	return !!rb->aux_nr_pages;
91 }
92 
93 void perf_event_aux_event(struct perf_event *event, unsigned long head,
94 			  unsigned long size, u64 flags);
95 
96 extern struct page *
97 perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff);
98 
99 #ifdef CONFIG_PERF_USE_VMALLOC
100 /*
101  * Back perf_mmap() with vmalloc memory.
102  *
103  * Required for architectures that have d-cache aliasing issues.
104  */
105 
106 static inline int page_order(struct perf_buffer *rb)
107 {
108 	return rb->page_order;
109 }
110 
111 #else
112 
113 static inline int page_order(struct perf_buffer *rb)
114 {
115 	return 0;
116 }
117 #endif
118 
119 static inline unsigned long perf_data_size(struct perf_buffer *rb)
120 {
121 	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
122 }
123 
124 static inline unsigned long perf_aux_size(struct perf_buffer *rb)
125 {
126 	return rb->aux_nr_pages << PAGE_SHIFT;
127 }
128 
129 #define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...)	\
130 {									\
131 	unsigned long size, written;					\
132 									\
133 	do {								\
134 		size    = min(handle->size, len);			\
135 		written = memcpy_func(__VA_ARGS__);			\
136 		written = size - written;				\
137 									\
138 		len -= written;						\
139 		handle->addr += written;				\
140 		if (advance_buf)					\
141 			buf += written;					\
142 		handle->size -= written;				\
143 		if (!handle->size) {					\
144 			struct perf_buffer *rb = handle->rb;	\
145 									\
146 			handle->page++;					\
147 			handle->page &= rb->nr_pages - 1;		\
148 			handle->addr = rb->data_pages[handle->page];	\
149 			handle->size = PAGE_SIZE << page_order(rb);	\
150 		}							\
151 	} while (len && written == size);				\
152 									\
153 	return len;							\
154 }
155 
156 #define DEFINE_OUTPUT_COPY(func_name, memcpy_func)			\
157 static inline unsigned long						\
158 func_name(struct perf_output_handle *handle,				\
159 	  const void *buf, unsigned long len)				\
160 __DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
161 
162 static inline unsigned long
163 __output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
164 		const void *buf, unsigned long len)
165 {
166 	unsigned long orig_len = len;
167 	__DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
168 				  orig_len - len, size)
169 }
170 
171 static inline unsigned long
172 memcpy_common(void *dst, const void *src, unsigned long n)
173 {
174 	memcpy(dst, src, n);
175 	return 0;
176 }
177 
178 DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
179 
180 static inline unsigned long
181 memcpy_skip(void *dst, const void *src, unsigned long n)
182 {
183 	return 0;
184 }
185 
186 DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
187 
188 #ifndef arch_perf_out_copy_user
189 #define arch_perf_out_copy_user arch_perf_out_copy_user
190 
191 static inline unsigned long
192 arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
193 {
194 	unsigned long ret;
195 
196 	pagefault_disable();
197 	ret = __copy_from_user_inatomic(dst, src, n);
198 	pagefault_enable();
199 
200 	return ret;
201 }
202 #endif
203 
204 DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
205 
206 static inline int get_recursion_context(int *recursion)
207 {
208 	unsigned int pc = preempt_count();
209 	unsigned char rctx = 0;
210 
211 	rctx += !!(pc & (NMI_MASK));
212 	rctx += !!(pc & (NMI_MASK | HARDIRQ_MASK));
213 	rctx += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
214 
215 	if (recursion[rctx])
216 		return -1;
217 
218 	recursion[rctx]++;
219 	barrier();
220 
221 	return rctx;
222 }
223 
224 static inline void put_recursion_context(int *recursion, int rctx)
225 {
226 	barrier();
227 	recursion[rctx]--;
228 }
229 
230 #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
231 static inline bool arch_perf_have_user_stack_dump(void)
232 {
233 	return true;
234 }
235 
236 #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
237 #else
238 static inline bool arch_perf_have_user_stack_dump(void)
239 {
240 	return false;
241 }
242 
243 #define perf_user_stack_pointer(regs) 0
244 #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
245 
246 #endif /* _KERNEL_EVENTS_INTERNAL_H */
247