xref: /openbmc/linux/kernel/events/ring_buffer.c (revision e0bf6c5c)
1 /*
2  * Performance events ring-buffer code:
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8  *
9  * For licensing details see kernel-base/COPYING
10  */
11 
12 #include <linux/perf_event.h>
13 #include <linux/vmalloc.h>
14 #include <linux/slab.h>
15 #include <linux/circ_buf.h>
16 #include <linux/poll.h>
17 
18 #include "internal.h"
19 
20 static void perf_output_wakeup(struct perf_output_handle *handle)
21 {
22 	atomic_set(&handle->rb->poll, POLLIN);
23 
24 	handle->event->pending_wakeup = 1;
25 	irq_work_queue(&handle->event->pending);
26 }
27 
28 /*
29  * We need to ensure a later event_id doesn't publish a head when a former
30  * event isn't done writing. However since we need to deal with NMIs we
31  * cannot fully serialize things.
32  *
33  * We only publish the head (and generate a wakeup) when the outer-most
34  * event completes.
35  */
36 static void perf_output_get_handle(struct perf_output_handle *handle)
37 {
38 	struct ring_buffer *rb = handle->rb;
39 
40 	preempt_disable();
41 	local_inc(&rb->nest);
42 	handle->wakeup = local_read(&rb->wakeup);
43 }
44 
45 static void perf_output_put_handle(struct perf_output_handle *handle)
46 {
47 	struct ring_buffer *rb = handle->rb;
48 	unsigned long head;
49 
50 again:
51 	head = local_read(&rb->head);
52 
53 	/*
54 	 * IRQ/NMI can happen here, which means we can miss a head update.
55 	 */
56 
57 	if (!local_dec_and_test(&rb->nest))
58 		goto out;
59 
60 	/*
61 	 * Since the mmap() consumer (userspace) can run on a different CPU:
62 	 *
63 	 *   kernel				user
64 	 *
65 	 *   if (LOAD ->data_tail) {		LOAD ->data_head
66 	 *			(A)		smp_rmb()	(C)
67 	 *	STORE $data			LOAD $data
68 	 *	smp_wmb()	(B)		smp_mb()	(D)
69 	 *	STORE ->data_head		STORE ->data_tail
70 	 *   }
71 	 *
72 	 * Where A pairs with D, and B pairs with C.
73 	 *
74 	 * In our case (A) is a control dependency that separates the load of
75 	 * the ->data_tail and the stores of $data. In case ->data_tail
76 	 * indicates there is no room in the buffer to store $data we do not.
77 	 *
78 	 * D needs to be a full barrier since it separates the data READ
79 	 * from the tail WRITE.
80 	 *
81 	 * For B a WMB is sufficient since it separates two WRITEs, and for C
82 	 * an RMB is sufficient since it separates two READs.
83 	 *
84 	 * See perf_output_begin().
85 	 */
86 	smp_wmb(); /* B, matches C */
87 	rb->user_page->data_head = head;
88 
89 	/*
90 	 * Now check if we missed an update -- rely on previous implied
91 	 * compiler barriers to force a re-read.
92 	 */
93 	if (unlikely(head != local_read(&rb->head))) {
94 		local_inc(&rb->nest);
95 		goto again;
96 	}
97 
98 	if (handle->wakeup != local_read(&rb->wakeup))
99 		perf_output_wakeup(handle);
100 
101 out:
102 	preempt_enable();
103 }
104 
105 int perf_output_begin(struct perf_output_handle *handle,
106 		      struct perf_event *event, unsigned int size)
107 {
108 	struct ring_buffer *rb;
109 	unsigned long tail, offset, head;
110 	int have_lost, page_shift;
111 	struct {
112 		struct perf_event_header header;
113 		u64			 id;
114 		u64			 lost;
115 	} lost_event;
116 
117 	rcu_read_lock();
118 	/*
119 	 * For inherited events we send all the output towards the parent.
120 	 */
121 	if (event->parent)
122 		event = event->parent;
123 
124 	rb = rcu_dereference(event->rb);
125 	if (unlikely(!rb))
126 		goto out;
127 
128 	if (unlikely(!rb->nr_pages))
129 		goto out;
130 
131 	handle->rb    = rb;
132 	handle->event = event;
133 
134 	have_lost = local_read(&rb->lost);
135 	if (unlikely(have_lost)) {
136 		size += sizeof(lost_event);
137 		if (event->attr.sample_id_all)
138 			size += event->id_header_size;
139 	}
140 
141 	perf_output_get_handle(handle);
142 
143 	do {
144 		tail = ACCESS_ONCE(rb->user_page->data_tail);
145 		offset = head = local_read(&rb->head);
146 		if (!rb->overwrite &&
147 		    unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size))
148 			goto fail;
149 
150 		/*
151 		 * The above forms a control dependency barrier separating the
152 		 * @tail load above from the data stores below. Since the @tail
153 		 * load is required to compute the branch to fail below.
154 		 *
155 		 * A, matches D; the full memory barrier userspace SHOULD issue
156 		 * after reading the data and before storing the new tail
157 		 * position.
158 		 *
159 		 * See perf_output_put_handle().
160 		 */
161 
162 		head += size;
163 	} while (local_cmpxchg(&rb->head, offset, head) != offset);
164 
165 	/*
166 	 * We rely on the implied barrier() by local_cmpxchg() to ensure
167 	 * none of the data stores below can be lifted up by the compiler.
168 	 */
169 
170 	if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
171 		local_add(rb->watermark, &rb->wakeup);
172 
173 	page_shift = PAGE_SHIFT + page_order(rb);
174 
175 	handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
176 	offset &= (1UL << page_shift) - 1;
177 	handle->addr = rb->data_pages[handle->page] + offset;
178 	handle->size = (1UL << page_shift) - offset;
179 
180 	if (unlikely(have_lost)) {
181 		struct perf_sample_data sample_data;
182 
183 		lost_event.header.size = sizeof(lost_event);
184 		lost_event.header.type = PERF_RECORD_LOST;
185 		lost_event.header.misc = 0;
186 		lost_event.id          = event->id;
187 		lost_event.lost        = local_xchg(&rb->lost, 0);
188 
189 		perf_event_header__init_id(&lost_event.header,
190 					   &sample_data, event);
191 		perf_output_put(handle, lost_event);
192 		perf_event__output_id_sample(event, handle, &sample_data);
193 	}
194 
195 	return 0;
196 
197 fail:
198 	local_inc(&rb->lost);
199 	perf_output_put_handle(handle);
200 out:
201 	rcu_read_unlock();
202 
203 	return -ENOSPC;
204 }
205 
206 unsigned int perf_output_copy(struct perf_output_handle *handle,
207 		      const void *buf, unsigned int len)
208 {
209 	return __output_copy(handle, buf, len);
210 }
211 
212 unsigned int perf_output_skip(struct perf_output_handle *handle,
213 			      unsigned int len)
214 {
215 	return __output_skip(handle, NULL, len);
216 }
217 
218 void perf_output_end(struct perf_output_handle *handle)
219 {
220 	perf_output_put_handle(handle);
221 	rcu_read_unlock();
222 }
223 
224 static void
225 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
226 {
227 	long max_size = perf_data_size(rb);
228 
229 	if (watermark)
230 		rb->watermark = min(max_size, watermark);
231 
232 	if (!rb->watermark)
233 		rb->watermark = max_size / 2;
234 
235 	if (flags & RING_BUFFER_WRITABLE)
236 		rb->overwrite = 0;
237 	else
238 		rb->overwrite = 1;
239 
240 	atomic_set(&rb->refcount, 1);
241 
242 	INIT_LIST_HEAD(&rb->event_list);
243 	spin_lock_init(&rb->event_lock);
244 }
245 
246 #ifndef CONFIG_PERF_USE_VMALLOC
247 
248 /*
249  * Back perf_mmap() with regular GFP_KERNEL-0 pages.
250  */
251 
252 struct page *
253 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
254 {
255 	if (pgoff > rb->nr_pages)
256 		return NULL;
257 
258 	if (pgoff == 0)
259 		return virt_to_page(rb->user_page);
260 
261 	return virt_to_page(rb->data_pages[pgoff - 1]);
262 }
263 
264 static void *perf_mmap_alloc_page(int cpu)
265 {
266 	struct page *page;
267 	int node;
268 
269 	node = (cpu == -1) ? cpu : cpu_to_node(cpu);
270 	page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
271 	if (!page)
272 		return NULL;
273 
274 	return page_address(page);
275 }
276 
277 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
278 {
279 	struct ring_buffer *rb;
280 	unsigned long size;
281 	int i;
282 
283 	size = sizeof(struct ring_buffer);
284 	size += nr_pages * sizeof(void *);
285 
286 	rb = kzalloc(size, GFP_KERNEL);
287 	if (!rb)
288 		goto fail;
289 
290 	rb->user_page = perf_mmap_alloc_page(cpu);
291 	if (!rb->user_page)
292 		goto fail_user_page;
293 
294 	for (i = 0; i < nr_pages; i++) {
295 		rb->data_pages[i] = perf_mmap_alloc_page(cpu);
296 		if (!rb->data_pages[i])
297 			goto fail_data_pages;
298 	}
299 
300 	rb->nr_pages = nr_pages;
301 
302 	ring_buffer_init(rb, watermark, flags);
303 
304 	return rb;
305 
306 fail_data_pages:
307 	for (i--; i >= 0; i--)
308 		free_page((unsigned long)rb->data_pages[i]);
309 
310 	free_page((unsigned long)rb->user_page);
311 
312 fail_user_page:
313 	kfree(rb);
314 
315 fail:
316 	return NULL;
317 }
318 
319 static void perf_mmap_free_page(unsigned long addr)
320 {
321 	struct page *page = virt_to_page((void *)addr);
322 
323 	page->mapping = NULL;
324 	__free_page(page);
325 }
326 
327 void rb_free(struct ring_buffer *rb)
328 {
329 	int i;
330 
331 	perf_mmap_free_page((unsigned long)rb->user_page);
332 	for (i = 0; i < rb->nr_pages; i++)
333 		perf_mmap_free_page((unsigned long)rb->data_pages[i]);
334 	kfree(rb);
335 }
336 
337 #else
338 static int data_page_nr(struct ring_buffer *rb)
339 {
340 	return rb->nr_pages << page_order(rb);
341 }
342 
343 struct page *
344 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
345 {
346 	/* The '>' counts in the user page. */
347 	if (pgoff > data_page_nr(rb))
348 		return NULL;
349 
350 	return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
351 }
352 
353 static void perf_mmap_unmark_page(void *addr)
354 {
355 	struct page *page = vmalloc_to_page(addr);
356 
357 	page->mapping = NULL;
358 }
359 
360 static void rb_free_work(struct work_struct *work)
361 {
362 	struct ring_buffer *rb;
363 	void *base;
364 	int i, nr;
365 
366 	rb = container_of(work, struct ring_buffer, work);
367 	nr = data_page_nr(rb);
368 
369 	base = rb->user_page;
370 	/* The '<=' counts in the user page. */
371 	for (i = 0; i <= nr; i++)
372 		perf_mmap_unmark_page(base + (i * PAGE_SIZE));
373 
374 	vfree(base);
375 	kfree(rb);
376 }
377 
378 void rb_free(struct ring_buffer *rb)
379 {
380 	schedule_work(&rb->work);
381 }
382 
383 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
384 {
385 	struct ring_buffer *rb;
386 	unsigned long size;
387 	void *all_buf;
388 
389 	size = sizeof(struct ring_buffer);
390 	size += sizeof(void *);
391 
392 	rb = kzalloc(size, GFP_KERNEL);
393 	if (!rb)
394 		goto fail;
395 
396 	INIT_WORK(&rb->work, rb_free_work);
397 
398 	all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
399 	if (!all_buf)
400 		goto fail_all_buf;
401 
402 	rb->user_page = all_buf;
403 	rb->data_pages[0] = all_buf + PAGE_SIZE;
404 	rb->page_order = ilog2(nr_pages);
405 	rb->nr_pages = !!nr_pages;
406 
407 	ring_buffer_init(rb, watermark, flags);
408 
409 	return rb;
410 
411 fail_all_buf:
412 	kfree(rb);
413 
414 fail:
415 	return NULL;
416 }
417 
418 #endif
419