xref: /openbmc/linux/kernel/events/ring_buffer.c (revision c67e8ec0)
1 /*
2  * Performance events ring-buffer code:
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
7  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8  *
9  * For licensing details see kernel-base/COPYING
10  */
11 
12 #include <linux/perf_event.h>
13 #include <linux/vmalloc.h>
14 #include <linux/slab.h>
15 #include <linux/circ_buf.h>
16 #include <linux/poll.h>
17 #include <linux/nospec.h>
18 
19 #include "internal.h"
20 
21 static void perf_output_wakeup(struct perf_output_handle *handle)
22 {
23 	atomic_set(&handle->rb->poll, EPOLLIN);
24 
25 	handle->event->pending_wakeup = 1;
26 	irq_work_queue(&handle->event->pending);
27 }
28 
29 /*
30  * We need to ensure a later event_id doesn't publish a head when a former
31  * event isn't done writing. However since we need to deal with NMIs we
32  * cannot fully serialize things.
33  *
34  * We only publish the head (and generate a wakeup) when the outer-most
35  * event completes.
36  */
37 static void perf_output_get_handle(struct perf_output_handle *handle)
38 {
39 	struct ring_buffer *rb = handle->rb;
40 
41 	preempt_disable();
42 	local_inc(&rb->nest);
43 	handle->wakeup = local_read(&rb->wakeup);
44 }
45 
46 static void perf_output_put_handle(struct perf_output_handle *handle)
47 {
48 	struct ring_buffer *rb = handle->rb;
49 	unsigned long head;
50 
51 again:
52 	head = local_read(&rb->head);
53 
54 	/*
55 	 * IRQ/NMI can happen here, which means we can miss a head update.
56 	 */
57 
58 	if (!local_dec_and_test(&rb->nest))
59 		goto out;
60 
61 	/*
62 	 * Since the mmap() consumer (userspace) can run on a different CPU:
63 	 *
64 	 *   kernel				user
65 	 *
66 	 *   if (LOAD ->data_tail) {		LOAD ->data_head
67 	 *			(A)		smp_rmb()	(C)
68 	 *	STORE $data			LOAD $data
69 	 *	smp_wmb()	(B)		smp_mb()	(D)
70 	 *	STORE ->data_head		STORE ->data_tail
71 	 *   }
72 	 *
73 	 * Where A pairs with D, and B pairs with C.
74 	 *
75 	 * In our case (A) is a control dependency that separates the load of
76 	 * the ->data_tail and the stores of $data. In case ->data_tail
77 	 * indicates there is no room in the buffer to store $data we do not.
78 	 *
79 	 * D needs to be a full barrier since it separates the data READ
80 	 * from the tail WRITE.
81 	 *
82 	 * For B a WMB is sufficient since it separates two WRITEs, and for C
83 	 * an RMB is sufficient since it separates two READs.
84 	 *
85 	 * See perf_output_begin().
86 	 */
87 	smp_wmb(); /* B, matches C */
88 	rb->user_page->data_head = head;
89 
90 	/*
91 	 * Now check if we missed an update -- rely on previous implied
92 	 * compiler barriers to force a re-read.
93 	 */
94 	if (unlikely(head != local_read(&rb->head))) {
95 		local_inc(&rb->nest);
96 		goto again;
97 	}
98 
99 	if (handle->wakeup != local_read(&rb->wakeup))
100 		perf_output_wakeup(handle);
101 
102 out:
103 	preempt_enable();
104 }
105 
106 static __always_inline bool
107 ring_buffer_has_space(unsigned long head, unsigned long tail,
108 		      unsigned long data_size, unsigned int size,
109 		      bool backward)
110 {
111 	if (!backward)
112 		return CIRC_SPACE(head, tail, data_size) >= size;
113 	else
114 		return CIRC_SPACE(tail, head, data_size) >= size;
115 }
116 
117 static __always_inline int
118 __perf_output_begin(struct perf_output_handle *handle,
119 		    struct perf_event *event, unsigned int size,
120 		    bool backward)
121 {
122 	struct ring_buffer *rb;
123 	unsigned long tail, offset, head;
124 	int have_lost, page_shift;
125 	struct {
126 		struct perf_event_header header;
127 		u64			 id;
128 		u64			 lost;
129 	} lost_event;
130 
131 	rcu_read_lock();
132 	/*
133 	 * For inherited events we send all the output towards the parent.
134 	 */
135 	if (event->parent)
136 		event = event->parent;
137 
138 	rb = rcu_dereference(event->rb);
139 	if (unlikely(!rb))
140 		goto out;
141 
142 	if (unlikely(rb->paused)) {
143 		if (rb->nr_pages)
144 			local_inc(&rb->lost);
145 		goto out;
146 	}
147 
148 	handle->rb    = rb;
149 	handle->event = event;
150 
151 	have_lost = local_read(&rb->lost);
152 	if (unlikely(have_lost)) {
153 		size += sizeof(lost_event);
154 		if (event->attr.sample_id_all)
155 			size += event->id_header_size;
156 	}
157 
158 	perf_output_get_handle(handle);
159 
160 	do {
161 		tail = READ_ONCE(rb->user_page->data_tail);
162 		offset = head = local_read(&rb->head);
163 		if (!rb->overwrite) {
164 			if (unlikely(!ring_buffer_has_space(head, tail,
165 							    perf_data_size(rb),
166 							    size, backward)))
167 				goto fail;
168 		}
169 
170 		/*
171 		 * The above forms a control dependency barrier separating the
172 		 * @tail load above from the data stores below. Since the @tail
173 		 * load is required to compute the branch to fail below.
174 		 *
175 		 * A, matches D; the full memory barrier userspace SHOULD issue
176 		 * after reading the data and before storing the new tail
177 		 * position.
178 		 *
179 		 * See perf_output_put_handle().
180 		 */
181 
182 		if (!backward)
183 			head += size;
184 		else
185 			head -= size;
186 	} while (local_cmpxchg(&rb->head, offset, head) != offset);
187 
188 	if (backward) {
189 		offset = head;
190 		head = (u64)(-head);
191 	}
192 
193 	/*
194 	 * We rely on the implied barrier() by local_cmpxchg() to ensure
195 	 * none of the data stores below can be lifted up by the compiler.
196 	 */
197 
198 	if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
199 		local_add(rb->watermark, &rb->wakeup);
200 
201 	page_shift = PAGE_SHIFT + page_order(rb);
202 
203 	handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
204 	offset &= (1UL << page_shift) - 1;
205 	handle->addr = rb->data_pages[handle->page] + offset;
206 	handle->size = (1UL << page_shift) - offset;
207 
208 	if (unlikely(have_lost)) {
209 		struct perf_sample_data sample_data;
210 
211 		lost_event.header.size = sizeof(lost_event);
212 		lost_event.header.type = PERF_RECORD_LOST;
213 		lost_event.header.misc = 0;
214 		lost_event.id          = event->id;
215 		lost_event.lost        = local_xchg(&rb->lost, 0);
216 
217 		perf_event_header__init_id(&lost_event.header,
218 					   &sample_data, event);
219 		perf_output_put(handle, lost_event);
220 		perf_event__output_id_sample(event, handle, &sample_data);
221 	}
222 
223 	return 0;
224 
225 fail:
226 	local_inc(&rb->lost);
227 	perf_output_put_handle(handle);
228 out:
229 	rcu_read_unlock();
230 
231 	return -ENOSPC;
232 }
233 
234 int perf_output_begin_forward(struct perf_output_handle *handle,
235 			     struct perf_event *event, unsigned int size)
236 {
237 	return __perf_output_begin(handle, event, size, false);
238 }
239 
240 int perf_output_begin_backward(struct perf_output_handle *handle,
241 			       struct perf_event *event, unsigned int size)
242 {
243 	return __perf_output_begin(handle, event, size, true);
244 }
245 
246 int perf_output_begin(struct perf_output_handle *handle,
247 		      struct perf_event *event, unsigned int size)
248 {
249 
250 	return __perf_output_begin(handle, event, size,
251 				   unlikely(is_write_backward(event)));
252 }
253 
254 unsigned int perf_output_copy(struct perf_output_handle *handle,
255 		      const void *buf, unsigned int len)
256 {
257 	return __output_copy(handle, buf, len);
258 }
259 
260 unsigned int perf_output_skip(struct perf_output_handle *handle,
261 			      unsigned int len)
262 {
263 	return __output_skip(handle, NULL, len);
264 }
265 
266 void perf_output_end(struct perf_output_handle *handle)
267 {
268 	perf_output_put_handle(handle);
269 	rcu_read_unlock();
270 }
271 
272 static void
273 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
274 {
275 	long max_size = perf_data_size(rb);
276 
277 	if (watermark)
278 		rb->watermark = min(max_size, watermark);
279 
280 	if (!rb->watermark)
281 		rb->watermark = max_size / 2;
282 
283 	if (flags & RING_BUFFER_WRITABLE)
284 		rb->overwrite = 0;
285 	else
286 		rb->overwrite = 1;
287 
288 	atomic_set(&rb->refcount, 1);
289 
290 	INIT_LIST_HEAD(&rb->event_list);
291 	spin_lock_init(&rb->event_lock);
292 
293 	/*
294 	 * perf_output_begin() only checks rb->paused, therefore
295 	 * rb->paused must be true if we have no pages for output.
296 	 */
297 	if (!rb->nr_pages)
298 		rb->paused = 1;
299 }
300 
301 void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)
302 {
303 	/*
304 	 * OVERWRITE is determined by perf_aux_output_end() and can't
305 	 * be passed in directly.
306 	 */
307 	if (WARN_ON_ONCE(flags & PERF_AUX_FLAG_OVERWRITE))
308 		return;
309 
310 	handle->aux_flags |= flags;
311 }
312 EXPORT_SYMBOL_GPL(perf_aux_output_flag);
313 
314 /*
315  * This is called before hardware starts writing to the AUX area to
316  * obtain an output handle and make sure there's room in the buffer.
317  * When the capture completes, call perf_aux_output_end() to commit
318  * the recorded data to the buffer.
319  *
320  * The ordering is similar to that of perf_output_{begin,end}, with
321  * the exception of (B), which should be taken care of by the pmu
322  * driver, since ordering rules will differ depending on hardware.
323  *
324  * Call this from pmu::start(); see the comment in perf_aux_output_end()
325  * about its use in pmu callbacks. Both can also be called from the PMI
326  * handler if needed.
327  */
328 void *perf_aux_output_begin(struct perf_output_handle *handle,
329 			    struct perf_event *event)
330 {
331 	struct perf_event *output_event = event;
332 	unsigned long aux_head, aux_tail;
333 	struct ring_buffer *rb;
334 
335 	if (output_event->parent)
336 		output_event = output_event->parent;
337 
338 	/*
339 	 * Since this will typically be open across pmu::add/pmu::del, we
340 	 * grab ring_buffer's refcount instead of holding rcu read lock
341 	 * to make sure it doesn't disappear under us.
342 	 */
343 	rb = ring_buffer_get(output_event);
344 	if (!rb)
345 		return NULL;
346 
347 	if (!rb_has_aux(rb))
348 		goto err;
349 
350 	/*
351 	 * If aux_mmap_count is zero, the aux buffer is in perf_mmap_close(),
352 	 * about to get freed, so we leave immediately.
353 	 *
354 	 * Checking rb::aux_mmap_count and rb::refcount has to be done in
355 	 * the same order, see perf_mmap_close. Otherwise we end up freeing
356 	 * aux pages in this path, which is a bug, because in_atomic().
357 	 */
358 	if (!atomic_read(&rb->aux_mmap_count))
359 		goto err;
360 
361 	if (!atomic_inc_not_zero(&rb->aux_refcount))
362 		goto err;
363 
364 	/*
365 	 * Nesting is not supported for AUX area, make sure nested
366 	 * writers are caught early
367 	 */
368 	if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1)))
369 		goto err_put;
370 
371 	aux_head = rb->aux_head;
372 
373 	handle->rb = rb;
374 	handle->event = event;
375 	handle->head = aux_head;
376 	handle->size = 0;
377 	handle->aux_flags = 0;
378 
379 	/*
380 	 * In overwrite mode, AUX data stores do not depend on aux_tail,
381 	 * therefore (A) control dependency barrier does not exist. The
382 	 * (B) <-> (C) ordering is still observed by the pmu driver.
383 	 */
384 	if (!rb->aux_overwrite) {
385 		aux_tail = READ_ONCE(rb->user_page->aux_tail);
386 		handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
387 		if (aux_head - aux_tail < perf_aux_size(rb))
388 			handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
389 
390 		/*
391 		 * handle->size computation depends on aux_tail load; this forms a
392 		 * control dependency barrier separating aux_tail load from aux data
393 		 * store that will be enabled on successful return
394 		 */
395 		if (!handle->size) { /* A, matches D */
396 			event->pending_disable = 1;
397 			perf_output_wakeup(handle);
398 			local_set(&rb->aux_nest, 0);
399 			goto err_put;
400 		}
401 	}
402 
403 	return handle->rb->aux_priv;
404 
405 err_put:
406 	/* can't be last */
407 	rb_free_aux(rb);
408 
409 err:
410 	ring_buffer_put(rb);
411 	handle->event = NULL;
412 
413 	return NULL;
414 }
415 EXPORT_SYMBOL_GPL(perf_aux_output_begin);
416 
417 static __always_inline bool rb_need_aux_wakeup(struct ring_buffer *rb)
418 {
419 	if (rb->aux_overwrite)
420 		return false;
421 
422 	if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) {
423 		rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark);
424 		return true;
425 	}
426 
427 	return false;
428 }
429 
430 /*
431  * Commit the data written by hardware into the ring buffer by adjusting
432  * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
433  * pmu driver's responsibility to observe ordering rules of the hardware,
434  * so that all the data is externally visible before this is called.
435  *
436  * Note: this has to be called from pmu::stop() callback, as the assumption
437  * of the AUX buffer management code is that after pmu::stop(), the AUX
438  * transaction must be stopped and therefore drop the AUX reference count.
439  */
440 void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
441 {
442 	bool wakeup = !!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED);
443 	struct ring_buffer *rb = handle->rb;
444 	unsigned long aux_head;
445 
446 	/* in overwrite mode, driver provides aux_head via handle */
447 	if (rb->aux_overwrite) {
448 		handle->aux_flags |= PERF_AUX_FLAG_OVERWRITE;
449 
450 		aux_head = handle->head;
451 		rb->aux_head = aux_head;
452 	} else {
453 		handle->aux_flags &= ~PERF_AUX_FLAG_OVERWRITE;
454 
455 		aux_head = rb->aux_head;
456 		rb->aux_head += size;
457 	}
458 
459 	if (size || handle->aux_flags) {
460 		/*
461 		 * Only send RECORD_AUX if we have something useful to communicate
462 		 *
463 		 * Note: the OVERWRITE records by themselves are not considered
464 		 * useful, as they don't communicate any *new* information,
465 		 * aside from the short-lived offset, that becomes history at
466 		 * the next event sched-in and therefore isn't useful.
467 		 * The userspace that needs to copy out AUX data in overwrite
468 		 * mode should know to use user_page::aux_head for the actual
469 		 * offset. So, from now on we don't output AUX records that
470 		 * have *only* OVERWRITE flag set.
471 		 */
472 
473 		if (handle->aux_flags & ~(u64)PERF_AUX_FLAG_OVERWRITE)
474 			perf_event_aux_event(handle->event, aux_head, size,
475 			                     handle->aux_flags);
476 	}
477 
478 	rb->user_page->aux_head = rb->aux_head;
479 	if (rb_need_aux_wakeup(rb))
480 		wakeup = true;
481 
482 	if (wakeup) {
483 		if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
484 			handle->event->pending_disable = 1;
485 		perf_output_wakeup(handle);
486 	}
487 
488 	handle->event = NULL;
489 
490 	local_set(&rb->aux_nest, 0);
491 	/* can't be last */
492 	rb_free_aux(rb);
493 	ring_buffer_put(rb);
494 }
495 EXPORT_SYMBOL_GPL(perf_aux_output_end);
496 
497 /*
498  * Skip over a given number of bytes in the AUX buffer, due to, for example,
499  * hardware's alignment constraints.
500  */
501 int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
502 {
503 	struct ring_buffer *rb = handle->rb;
504 
505 	if (size > handle->size)
506 		return -ENOSPC;
507 
508 	rb->aux_head += size;
509 
510 	rb->user_page->aux_head = rb->aux_head;
511 	if (rb_need_aux_wakeup(rb)) {
512 		perf_output_wakeup(handle);
513 		handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
514 	}
515 
516 	handle->head = rb->aux_head;
517 	handle->size -= size;
518 
519 	return 0;
520 }
521 EXPORT_SYMBOL_GPL(perf_aux_output_skip);
522 
523 void *perf_get_aux(struct perf_output_handle *handle)
524 {
525 	/* this is only valid between perf_aux_output_begin and *_end */
526 	if (!handle->event)
527 		return NULL;
528 
529 	return handle->rb->aux_priv;
530 }
531 EXPORT_SYMBOL_GPL(perf_get_aux);
532 
533 #define PERF_AUX_GFP	(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
534 
535 static struct page *rb_alloc_aux_page(int node, int order)
536 {
537 	struct page *page;
538 
539 	if (order > MAX_ORDER)
540 		order = MAX_ORDER;
541 
542 	do {
543 		page = alloc_pages_node(node, PERF_AUX_GFP, order);
544 	} while (!page && order--);
545 
546 	if (page && order) {
547 		/*
548 		 * Communicate the allocation size to the driver:
549 		 * if we managed to secure a high-order allocation,
550 		 * set its first page's private to this order;
551 		 * !PagePrivate(page) means it's just a normal page.
552 		 */
553 		split_page(page, order);
554 		SetPagePrivate(page);
555 		set_page_private(page, order);
556 	}
557 
558 	return page;
559 }
560 
561 static void rb_free_aux_page(struct ring_buffer *rb, int idx)
562 {
563 	struct page *page = virt_to_page(rb->aux_pages[idx]);
564 
565 	ClearPagePrivate(page);
566 	page->mapping = NULL;
567 	__free_page(page);
568 }
569 
570 static void __rb_free_aux(struct ring_buffer *rb)
571 {
572 	int pg;
573 
574 	/*
575 	 * Should never happen, the last reference should be dropped from
576 	 * perf_mmap_close() path, which first stops aux transactions (which
577 	 * in turn are the atomic holders of aux_refcount) and then does the
578 	 * last rb_free_aux().
579 	 */
580 	WARN_ON_ONCE(in_atomic());
581 
582 	if (rb->aux_priv) {
583 		rb->free_aux(rb->aux_priv);
584 		rb->free_aux = NULL;
585 		rb->aux_priv = NULL;
586 	}
587 
588 	if (rb->aux_nr_pages) {
589 		for (pg = 0; pg < rb->aux_nr_pages; pg++)
590 			rb_free_aux_page(rb, pg);
591 
592 		kfree(rb->aux_pages);
593 		rb->aux_nr_pages = 0;
594 	}
595 }
596 
597 int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
598 		 pgoff_t pgoff, int nr_pages, long watermark, int flags)
599 {
600 	bool overwrite = !(flags & RING_BUFFER_WRITABLE);
601 	int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
602 	int ret = -ENOMEM, max_order = 0;
603 
604 	if (!has_aux(event))
605 		return -EOPNOTSUPP;
606 
607 	if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) {
608 		/*
609 		 * We need to start with the max_order that fits in nr_pages,
610 		 * not the other way around, hence ilog2() and not get_order.
611 		 */
612 		max_order = ilog2(nr_pages);
613 
614 		/*
615 		 * PMU requests more than one contiguous chunks of memory
616 		 * for SW double buffering
617 		 */
618 		if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_SW_DOUBLEBUF) &&
619 		    !overwrite) {
620 			if (!max_order)
621 				return -EINVAL;
622 
623 			max_order--;
624 		}
625 	}
626 
627 	rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL,
628 				     node);
629 	if (!rb->aux_pages)
630 		return -ENOMEM;
631 
632 	rb->free_aux = event->pmu->free_aux;
633 	for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
634 		struct page *page;
635 		int last, order;
636 
637 		order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
638 		page = rb_alloc_aux_page(node, order);
639 		if (!page)
640 			goto out;
641 
642 		for (last = rb->aux_nr_pages + (1 << page_private(page));
643 		     last > rb->aux_nr_pages; rb->aux_nr_pages++)
644 			rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
645 	}
646 
647 	/*
648 	 * In overwrite mode, PMUs that don't support SG may not handle more
649 	 * than one contiguous allocation, since they rely on PMI to do double
650 	 * buffering. In this case, the entire buffer has to be one contiguous
651 	 * chunk.
652 	 */
653 	if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
654 	    overwrite) {
655 		struct page *page = virt_to_page(rb->aux_pages[0]);
656 
657 		if (page_private(page) != max_order)
658 			goto out;
659 	}
660 
661 	rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
662 					     overwrite);
663 	if (!rb->aux_priv)
664 		goto out;
665 
666 	ret = 0;
667 
668 	/*
669 	 * aux_pages (and pmu driver's private data, aux_priv) will be
670 	 * referenced in both producer's and consumer's contexts, thus
671 	 * we keep a refcount here to make sure either of the two can
672 	 * reference them safely.
673 	 */
674 	atomic_set(&rb->aux_refcount, 1);
675 
676 	rb->aux_overwrite = overwrite;
677 	rb->aux_watermark = watermark;
678 
679 	if (!rb->aux_watermark && !rb->aux_overwrite)
680 		rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1);
681 
682 out:
683 	if (!ret)
684 		rb->aux_pgoff = pgoff;
685 	else
686 		__rb_free_aux(rb);
687 
688 	return ret;
689 }
690 
691 void rb_free_aux(struct ring_buffer *rb)
692 {
693 	if (atomic_dec_and_test(&rb->aux_refcount))
694 		__rb_free_aux(rb);
695 }
696 
697 #ifndef CONFIG_PERF_USE_VMALLOC
698 
699 /*
700  * Back perf_mmap() with regular GFP_KERNEL-0 pages.
701  */
702 
703 static struct page *
704 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
705 {
706 	if (pgoff > rb->nr_pages)
707 		return NULL;
708 
709 	if (pgoff == 0)
710 		return virt_to_page(rb->user_page);
711 
712 	return virt_to_page(rb->data_pages[pgoff - 1]);
713 }
714 
715 static void *perf_mmap_alloc_page(int cpu)
716 {
717 	struct page *page;
718 	int node;
719 
720 	node = (cpu == -1) ? cpu : cpu_to_node(cpu);
721 	page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
722 	if (!page)
723 		return NULL;
724 
725 	return page_address(page);
726 }
727 
728 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
729 {
730 	struct ring_buffer *rb;
731 	unsigned long size;
732 	int i;
733 
734 	size = sizeof(struct ring_buffer);
735 	size += nr_pages * sizeof(void *);
736 
737 	rb = kzalloc(size, GFP_KERNEL);
738 	if (!rb)
739 		goto fail;
740 
741 	rb->user_page = perf_mmap_alloc_page(cpu);
742 	if (!rb->user_page)
743 		goto fail_user_page;
744 
745 	for (i = 0; i < nr_pages; i++) {
746 		rb->data_pages[i] = perf_mmap_alloc_page(cpu);
747 		if (!rb->data_pages[i])
748 			goto fail_data_pages;
749 	}
750 
751 	rb->nr_pages = nr_pages;
752 
753 	ring_buffer_init(rb, watermark, flags);
754 
755 	return rb;
756 
757 fail_data_pages:
758 	for (i--; i >= 0; i--)
759 		free_page((unsigned long)rb->data_pages[i]);
760 
761 	free_page((unsigned long)rb->user_page);
762 
763 fail_user_page:
764 	kfree(rb);
765 
766 fail:
767 	return NULL;
768 }
769 
770 static void perf_mmap_free_page(unsigned long addr)
771 {
772 	struct page *page = virt_to_page((void *)addr);
773 
774 	page->mapping = NULL;
775 	__free_page(page);
776 }
777 
778 void rb_free(struct ring_buffer *rb)
779 {
780 	int i;
781 
782 	perf_mmap_free_page((unsigned long)rb->user_page);
783 	for (i = 0; i < rb->nr_pages; i++)
784 		perf_mmap_free_page((unsigned long)rb->data_pages[i]);
785 	kfree(rb);
786 }
787 
788 #else
789 static int data_page_nr(struct ring_buffer *rb)
790 {
791 	return rb->nr_pages << page_order(rb);
792 }
793 
794 static struct page *
795 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
796 {
797 	/* The '>' counts in the user page. */
798 	if (pgoff > data_page_nr(rb))
799 		return NULL;
800 
801 	return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
802 }
803 
804 static void perf_mmap_unmark_page(void *addr)
805 {
806 	struct page *page = vmalloc_to_page(addr);
807 
808 	page->mapping = NULL;
809 }
810 
811 static void rb_free_work(struct work_struct *work)
812 {
813 	struct ring_buffer *rb;
814 	void *base;
815 	int i, nr;
816 
817 	rb = container_of(work, struct ring_buffer, work);
818 	nr = data_page_nr(rb);
819 
820 	base = rb->user_page;
821 	/* The '<=' counts in the user page. */
822 	for (i = 0; i <= nr; i++)
823 		perf_mmap_unmark_page(base + (i * PAGE_SIZE));
824 
825 	vfree(base);
826 	kfree(rb);
827 }
828 
829 void rb_free(struct ring_buffer *rb)
830 {
831 	schedule_work(&rb->work);
832 }
833 
834 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
835 {
836 	struct ring_buffer *rb;
837 	unsigned long size;
838 	void *all_buf;
839 
840 	size = sizeof(struct ring_buffer);
841 	size += sizeof(void *);
842 
843 	rb = kzalloc(size, GFP_KERNEL);
844 	if (!rb)
845 		goto fail;
846 
847 	INIT_WORK(&rb->work, rb_free_work);
848 
849 	all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
850 	if (!all_buf)
851 		goto fail_all_buf;
852 
853 	rb->user_page = all_buf;
854 	rb->data_pages[0] = all_buf + PAGE_SIZE;
855 	if (nr_pages) {
856 		rb->nr_pages = 1;
857 		rb->page_order = ilog2(nr_pages);
858 	}
859 
860 	ring_buffer_init(rb, watermark, flags);
861 
862 	return rb;
863 
864 fail_all_buf:
865 	kfree(rb);
866 
867 fail:
868 	return NULL;
869 }
870 
871 #endif
872 
873 struct page *
874 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
875 {
876 	if (rb->aux_nr_pages) {
877 		/* above AUX space */
878 		if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
879 			return NULL;
880 
881 		/* AUX space */
882 		if (pgoff >= rb->aux_pgoff) {
883 			int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
884 			return virt_to_page(rb->aux_pages[aux_pgoff]);
885 		}
886 	}
887 
888 	return __perf_mmap_to_page(rb, pgoff);
889 }
890