xref: /openbmc/linux/kernel/events/internal.h (revision d3597236)
1 #ifndef _KERNEL_EVENTS_INTERNAL_H
2 #define _KERNEL_EVENTS_INTERNAL_H
3 
4 #include <linux/hardirq.h>
5 #include <linux/uaccess.h>
6 
7 /* Buffer handling */
8 
9 #define RING_BUFFER_WRITABLE		0x01
10 
11 struct ring_buffer {
12 	atomic_t			refcount;
13 	struct rcu_head			rcu_head;
14 #ifdef CONFIG_PERF_USE_VMALLOC
15 	struct work_struct		work;
16 	int				page_order;	/* allocation order  */
17 #endif
18 	int				nr_pages;	/* nr of data pages  */
19 	int				overwrite;	/* can overwrite itself */
20 
21 	atomic_t			poll;		/* POLL_ for wakeups */
22 
23 	local_t				head;		/* write position    */
24 	local_t				nest;		/* nested writers    */
25 	local_t				events;		/* event limit       */
26 	local_t				wakeup;		/* wakeup stamp      */
27 	local_t				lost;		/* nr records lost   */
28 
29 	long				watermark;	/* wakeup watermark  */
30 	long				aux_watermark;
31 	/* poll crap */
32 	spinlock_t			event_lock;
33 	struct list_head		event_list;
34 
35 	atomic_t			mmap_count;
36 	unsigned long			mmap_locked;
37 	struct user_struct		*mmap_user;
38 
39 	/* AUX area */
40 	local_t				aux_head;
41 	local_t				aux_nest;
42 	local_t				aux_wakeup;
43 	unsigned long			aux_pgoff;
44 	int				aux_nr_pages;
45 	int				aux_overwrite;
46 	atomic_t			aux_mmap_count;
47 	unsigned long			aux_mmap_locked;
48 	void				(*free_aux)(void *);
49 	atomic_t			aux_refcount;
50 	void				**aux_pages;
51 	void				*aux_priv;
52 
53 	struct perf_event_mmap_page	*user_page;
54 	void				*data_pages[0];
55 };
56 
57 extern void rb_free(struct ring_buffer *rb);
58 extern struct ring_buffer *
59 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
60 extern void perf_event_wakeup(struct perf_event *event);
61 extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
62 			pgoff_t pgoff, int nr_pages, long watermark, int flags);
63 extern void rb_free_aux(struct ring_buffer *rb);
64 extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
65 extern void ring_buffer_put(struct ring_buffer *rb);
66 
67 static inline bool rb_has_aux(struct ring_buffer *rb)
68 {
69 	return !!rb->aux_nr_pages;
70 }
71 
72 void perf_event_aux_event(struct perf_event *event, unsigned long head,
73 			  unsigned long size, u64 flags);
74 
75 extern struct page *
76 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
77 
78 #ifdef CONFIG_PERF_USE_VMALLOC
79 /*
80  * Back perf_mmap() with vmalloc memory.
81  *
82  * Required for architectures that have d-cache aliasing issues.
83  */
84 
85 static inline int page_order(struct ring_buffer *rb)
86 {
87 	return rb->page_order;
88 }
89 
90 #else
91 
92 static inline int page_order(struct ring_buffer *rb)
93 {
94 	return 0;
95 }
96 #endif
97 
98 static inline unsigned long perf_data_size(struct ring_buffer *rb)
99 {
100 	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
101 }
102 
103 static inline unsigned long perf_aux_size(struct ring_buffer *rb)
104 {
105 	return rb->aux_nr_pages << PAGE_SHIFT;
106 }
107 
108 #define DEFINE_OUTPUT_COPY(func_name, memcpy_func)			\
109 static inline unsigned long						\
110 func_name(struct perf_output_handle *handle,				\
111 	  const void *buf, unsigned long len)				\
112 {									\
113 	unsigned long size, written;					\
114 									\
115 	do {								\
116 		size    = min(handle->size, len);			\
117 		written = memcpy_func(handle->addr, buf, size);		\
118 		written = size - written;				\
119 									\
120 		len -= written;						\
121 		handle->addr += written;				\
122 		buf += written;						\
123 		handle->size -= written;				\
124 		if (!handle->size) {					\
125 			struct ring_buffer *rb = handle->rb;		\
126 									\
127 			handle->page++;					\
128 			handle->page &= rb->nr_pages - 1;		\
129 			handle->addr = rb->data_pages[handle->page];	\
130 			handle->size = PAGE_SIZE << page_order(rb);	\
131 		}							\
132 	} while (len && written == size);				\
133 									\
134 	return len;							\
135 }
136 
137 static inline unsigned long
138 memcpy_common(void *dst, const void *src, unsigned long n)
139 {
140 	memcpy(dst, src, n);
141 	return 0;
142 }
143 
144 DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
145 
146 static inline unsigned long
147 memcpy_skip(void *dst, const void *src, unsigned long n)
148 {
149 	return 0;
150 }
151 
152 DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
153 
154 #ifndef arch_perf_out_copy_user
155 #define arch_perf_out_copy_user arch_perf_out_copy_user
156 
157 static inline unsigned long
158 arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
159 {
160 	unsigned long ret;
161 
162 	pagefault_disable();
163 	ret = __copy_from_user_inatomic(dst, src, n);
164 	pagefault_enable();
165 
166 	return ret;
167 }
168 #endif
169 
170 DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
171 
172 /* Callchain handling */
173 extern struct perf_callchain_entry *
174 perf_callchain(struct perf_event *event, struct pt_regs *regs);
175 extern int get_callchain_buffers(void);
176 extern void put_callchain_buffers(void);
177 
178 static inline int get_recursion_context(int *recursion)
179 {
180 	int rctx;
181 
182 	if (in_nmi())
183 		rctx = 3;
184 	else if (in_irq())
185 		rctx = 2;
186 	else if (in_softirq())
187 		rctx = 1;
188 	else
189 		rctx = 0;
190 
191 	if (recursion[rctx])
192 		return -1;
193 
194 	recursion[rctx]++;
195 	barrier();
196 
197 	return rctx;
198 }
199 
200 static inline void put_recursion_context(int *recursion, int rctx)
201 {
202 	barrier();
203 	recursion[rctx]--;
204 }
205 
206 #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
207 static inline bool arch_perf_have_user_stack_dump(void)
208 {
209 	return true;
210 }
211 
212 #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
213 #else
214 static inline bool arch_perf_have_user_stack_dump(void)
215 {
216 	return false;
217 }
218 
219 #define perf_user_stack_pointer(regs) 0
220 #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
221 
222 #endif /* _KERNEL_EVENTS_INTERNAL_H */
223