1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Performance events ring-buffer code:
4 *
5 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
6 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
7 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
8 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 */
10
11 #include <linux/perf_event.h>
12 #include <linux/vmalloc.h>
13 #include <linux/slab.h>
14 #include <linux/circ_buf.h>
15 #include <linux/poll.h>
16 #include <linux/nospec.h>
17
18 #include "internal.h"
19
perf_output_wakeup(struct perf_output_handle * handle)20 static void perf_output_wakeup(struct perf_output_handle *handle)
21 {
22 atomic_set(&handle->rb->poll, EPOLLIN);
23
24 handle->event->pending_wakeup = 1;
25 irq_work_queue(&handle->event->pending_irq);
26 }
27
28 /*
29 * We need to ensure a later event_id doesn't publish a head when a former
30 * event isn't done writing. However since we need to deal with NMIs we
31 * cannot fully serialize things.
32 *
33 * We only publish the head (and generate a wakeup) when the outer-most
34 * event completes.
35 */
perf_output_get_handle(struct perf_output_handle * handle)36 static void perf_output_get_handle(struct perf_output_handle *handle)
37 {
38 struct perf_buffer *rb = handle->rb;
39
40 preempt_disable();
41
42 /*
43 * Avoid an explicit LOAD/STORE such that architectures with memops
44 * can use them.
45 */
46 (*(volatile unsigned int *)&rb->nest)++;
47 handle->wakeup = local_read(&rb->wakeup);
48 }
49
perf_output_put_handle(struct perf_output_handle * handle)50 static void perf_output_put_handle(struct perf_output_handle *handle)
51 {
52 struct perf_buffer *rb = handle->rb;
53 unsigned long head;
54 unsigned int nest;
55
56 /*
57 * If this isn't the outermost nesting, we don't have to update
58 * @rb->user_page->data_head.
59 */
60 nest = READ_ONCE(rb->nest);
61 if (nest > 1) {
62 WRITE_ONCE(rb->nest, nest - 1);
63 goto out;
64 }
65
66 again:
67 /*
68 * In order to avoid publishing a head value that goes backwards,
69 * we must ensure the load of @rb->head happens after we've
70 * incremented @rb->nest.
71 *
72 * Otherwise we can observe a @rb->head value before one published
73 * by an IRQ/NMI happening between the load and the increment.
74 */
75 barrier();
76 head = local_read(&rb->head);
77
78 /*
79 * IRQ/NMI can happen here and advance @rb->head, causing our
80 * load above to be stale.
81 */
82
83 /*
84 * Since the mmap() consumer (userspace) can run on a different CPU:
85 *
86 * kernel user
87 *
88 * if (LOAD ->data_tail) { LOAD ->data_head
89 * (A) smp_rmb() (C)
90 * STORE $data LOAD $data
91 * smp_wmb() (B) smp_mb() (D)
92 * STORE ->data_head STORE ->data_tail
93 * }
94 *
95 * Where A pairs with D, and B pairs with C.
96 *
97 * In our case (A) is a control dependency that separates the load of
98 * the ->data_tail and the stores of $data. In case ->data_tail
99 * indicates there is no room in the buffer to store $data we do not.
100 *
101 * D needs to be a full barrier since it separates the data READ
102 * from the tail WRITE.
103 *
104 * For B a WMB is sufficient since it separates two WRITEs, and for C
105 * an RMB is sufficient since it separates two READs.
106 *
107 * See perf_output_begin().
108 */
109 smp_wmb(); /* B, matches C */
110 WRITE_ONCE(rb->user_page->data_head, head);
111
112 /*
113 * We must publish the head before decrementing the nest count,
114 * otherwise an IRQ/NMI can publish a more recent head value and our
115 * write will (temporarily) publish a stale value.
116 */
117 barrier();
118 WRITE_ONCE(rb->nest, 0);
119
120 /*
121 * Ensure we decrement @rb->nest before we validate the @rb->head.
122 * Otherwise we cannot be sure we caught the 'last' nested update.
123 */
124 barrier();
125 if (unlikely(head != local_read(&rb->head))) {
126 WRITE_ONCE(rb->nest, 1);
127 goto again;
128 }
129
130 if (handle->wakeup != local_read(&rb->wakeup))
131 perf_output_wakeup(handle);
132
133 out:
134 preempt_enable();
135 }
136
137 static __always_inline bool
ring_buffer_has_space(unsigned long head,unsigned long tail,unsigned long data_size,unsigned int size,bool backward)138 ring_buffer_has_space(unsigned long head, unsigned long tail,
139 unsigned long data_size, unsigned int size,
140 bool backward)
141 {
142 if (!backward)
143 return CIRC_SPACE(head, tail, data_size) >= size;
144 else
145 return CIRC_SPACE(tail, head, data_size) >= size;
146 }
147
148 static __always_inline int
__perf_output_begin(struct perf_output_handle * handle,struct perf_sample_data * data,struct perf_event * event,unsigned int size,bool backward)149 __perf_output_begin(struct perf_output_handle *handle,
150 struct perf_sample_data *data,
151 struct perf_event *event, unsigned int size,
152 bool backward)
153 {
154 struct perf_buffer *rb;
155 unsigned long tail, offset, head;
156 int have_lost, page_shift;
157 struct {
158 struct perf_event_header header;
159 u64 id;
160 u64 lost;
161 } lost_event;
162
163 rcu_read_lock();
164 /*
165 * For inherited events we send all the output towards the parent.
166 */
167 if (event->parent)
168 event = event->parent;
169
170 rb = rcu_dereference(event->rb);
171 if (unlikely(!rb))
172 goto out;
173
174 if (unlikely(rb->paused)) {
175 if (rb->nr_pages) {
176 local_inc(&rb->lost);
177 atomic64_inc(&event->lost_samples);
178 }
179 goto out;
180 }
181
182 handle->rb = rb;
183 handle->event = event;
184
185 have_lost = local_read(&rb->lost);
186 if (unlikely(have_lost)) {
187 size += sizeof(lost_event);
188 if (event->attr.sample_id_all)
189 size += event->id_header_size;
190 }
191
192 perf_output_get_handle(handle);
193
194 offset = local_read(&rb->head);
195 do {
196 head = offset;
197 tail = READ_ONCE(rb->user_page->data_tail);
198 if (!rb->overwrite) {
199 if (unlikely(!ring_buffer_has_space(head, tail,
200 perf_data_size(rb),
201 size, backward)))
202 goto fail;
203 }
204
205 /*
206 * The above forms a control dependency barrier separating the
207 * @tail load above from the data stores below. Since the @tail
208 * load is required to compute the branch to fail below.
209 *
210 * A, matches D; the full memory barrier userspace SHOULD issue
211 * after reading the data and before storing the new tail
212 * position.
213 *
214 * See perf_output_put_handle().
215 */
216
217 if (!backward)
218 head += size;
219 else
220 head -= size;
221 } while (!local_try_cmpxchg(&rb->head, &offset, head));
222
223 if (backward) {
224 offset = head;
225 head = (u64)(-head);
226 }
227
228 /*
229 * We rely on the implied barrier() by local_cmpxchg() to ensure
230 * none of the data stores below can be lifted up by the compiler.
231 */
232
233 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
234 local_add(rb->watermark, &rb->wakeup);
235
236 page_shift = PAGE_SHIFT + page_order(rb);
237
238 handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
239 offset &= (1UL << page_shift) - 1;
240 handle->addr = rb->data_pages[handle->page] + offset;
241 handle->size = (1UL << page_shift) - offset;
242
243 if (unlikely(have_lost)) {
244 lost_event.header.size = sizeof(lost_event);
245 lost_event.header.type = PERF_RECORD_LOST;
246 lost_event.header.misc = 0;
247 lost_event.id = event->id;
248 lost_event.lost = local_xchg(&rb->lost, 0);
249
250 /* XXX mostly redundant; @data is already fully initializes */
251 perf_event_header__init_id(&lost_event.header, data, event);
252 perf_output_put(handle, lost_event);
253 perf_event__output_id_sample(event, handle, data);
254 }
255
256 return 0;
257
258 fail:
259 local_inc(&rb->lost);
260 atomic64_inc(&event->lost_samples);
261 perf_output_put_handle(handle);
262 out:
263 rcu_read_unlock();
264
265 return -ENOSPC;
266 }
267
perf_output_begin_forward(struct perf_output_handle * handle,struct perf_sample_data * data,struct perf_event * event,unsigned int size)268 int perf_output_begin_forward(struct perf_output_handle *handle,
269 struct perf_sample_data *data,
270 struct perf_event *event, unsigned int size)
271 {
272 return __perf_output_begin(handle, data, event, size, false);
273 }
274
perf_output_begin_backward(struct perf_output_handle * handle,struct perf_sample_data * data,struct perf_event * event,unsigned int size)275 int perf_output_begin_backward(struct perf_output_handle *handle,
276 struct perf_sample_data *data,
277 struct perf_event *event, unsigned int size)
278 {
279 return __perf_output_begin(handle, data, event, size, true);
280 }
281
perf_output_begin(struct perf_output_handle * handle,struct perf_sample_data * data,struct perf_event * event,unsigned int size)282 int perf_output_begin(struct perf_output_handle *handle,
283 struct perf_sample_data *data,
284 struct perf_event *event, unsigned int size)
285 {
286
287 return __perf_output_begin(handle, data, event, size,
288 unlikely(is_write_backward(event)));
289 }
290
perf_output_copy(struct perf_output_handle * handle,const void * buf,unsigned int len)291 unsigned int perf_output_copy(struct perf_output_handle *handle,
292 const void *buf, unsigned int len)
293 {
294 return __output_copy(handle, buf, len);
295 }
296
perf_output_skip(struct perf_output_handle * handle,unsigned int len)297 unsigned int perf_output_skip(struct perf_output_handle *handle,
298 unsigned int len)
299 {
300 return __output_skip(handle, NULL, len);
301 }
302
perf_output_end(struct perf_output_handle * handle)303 void perf_output_end(struct perf_output_handle *handle)
304 {
305 perf_output_put_handle(handle);
306 rcu_read_unlock();
307 }
308
309 static void
ring_buffer_init(struct perf_buffer * rb,long watermark,int flags)310 ring_buffer_init(struct perf_buffer *rb, long watermark, int flags)
311 {
312 long max_size = perf_data_size(rb);
313
314 if (watermark)
315 rb->watermark = min(max_size, watermark);
316
317 if (!rb->watermark)
318 rb->watermark = max_size / 2;
319
320 if (flags & RING_BUFFER_WRITABLE)
321 rb->overwrite = 0;
322 else
323 rb->overwrite = 1;
324
325 refcount_set(&rb->refcount, 1);
326
327 INIT_LIST_HEAD(&rb->event_list);
328 spin_lock_init(&rb->event_lock);
329
330 /*
331 * perf_output_begin() only checks rb->paused, therefore
332 * rb->paused must be true if we have no pages for output.
333 */
334 if (!rb->nr_pages)
335 rb->paused = 1;
336
337 mutex_init(&rb->aux_mutex);
338 }
339
perf_aux_output_flag(struct perf_output_handle * handle,u64 flags)340 void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)
341 {
342 /*
343 * OVERWRITE is determined by perf_aux_output_end() and can't
344 * be passed in directly.
345 */
346 if (WARN_ON_ONCE(flags & PERF_AUX_FLAG_OVERWRITE))
347 return;
348
349 handle->aux_flags |= flags;
350 }
351 EXPORT_SYMBOL_GPL(perf_aux_output_flag);
352
353 /*
354 * This is called before hardware starts writing to the AUX area to
355 * obtain an output handle and make sure there's room in the buffer.
356 * When the capture completes, call perf_aux_output_end() to commit
357 * the recorded data to the buffer.
358 *
359 * The ordering is similar to that of perf_output_{begin,end}, with
360 * the exception of (B), which should be taken care of by the pmu
361 * driver, since ordering rules will differ depending on hardware.
362 *
363 * Call this from pmu::start(); see the comment in perf_aux_output_end()
364 * about its use in pmu callbacks. Both can also be called from the PMI
365 * handler if needed.
366 */
perf_aux_output_begin(struct perf_output_handle * handle,struct perf_event * event)367 void *perf_aux_output_begin(struct perf_output_handle *handle,
368 struct perf_event *event)
369 {
370 struct perf_event *output_event = event;
371 unsigned long aux_head, aux_tail;
372 struct perf_buffer *rb;
373 unsigned int nest;
374
375 if (output_event->parent)
376 output_event = output_event->parent;
377
378 /*
379 * Since this will typically be open across pmu::add/pmu::del, we
380 * grab ring_buffer's refcount instead of holding rcu read lock
381 * to make sure it doesn't disappear under us.
382 */
383 rb = ring_buffer_get(output_event);
384 if (!rb)
385 return NULL;
386
387 if (!rb_has_aux(rb))
388 goto err;
389
390 /*
391 * If aux_mmap_count is zero, the aux buffer is in perf_mmap_close(),
392 * about to get freed, so we leave immediately.
393 *
394 * Checking rb::aux_mmap_count and rb::refcount has to be done in
395 * the same order, see perf_mmap_close. Otherwise we end up freeing
396 * aux pages in this path, which is a bug, because in_atomic().
397 */
398 if (!atomic_read(&rb->aux_mmap_count))
399 goto err;
400
401 if (!refcount_inc_not_zero(&rb->aux_refcount))
402 goto err;
403
404 nest = READ_ONCE(rb->aux_nest);
405 /*
406 * Nesting is not supported for AUX area, make sure nested
407 * writers are caught early
408 */
409 if (WARN_ON_ONCE(nest))
410 goto err_put;
411
412 WRITE_ONCE(rb->aux_nest, nest + 1);
413
414 aux_head = rb->aux_head;
415
416 handle->rb = rb;
417 handle->event = event;
418 handle->head = aux_head;
419 handle->size = 0;
420 handle->aux_flags = 0;
421
422 /*
423 * In overwrite mode, AUX data stores do not depend on aux_tail,
424 * therefore (A) control dependency barrier does not exist. The
425 * (B) <-> (C) ordering is still observed by the pmu driver.
426 */
427 if (!rb->aux_overwrite) {
428 aux_tail = READ_ONCE(rb->user_page->aux_tail);
429 handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
430 if (aux_head - aux_tail < perf_aux_size(rb))
431 handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
432
433 /*
434 * handle->size computation depends on aux_tail load; this forms a
435 * control dependency barrier separating aux_tail load from aux data
436 * store that will be enabled on successful return
437 */
438 if (!handle->size) { /* A, matches D */
439 event->pending_disable = smp_processor_id();
440 perf_output_wakeup(handle);
441 WRITE_ONCE(rb->aux_nest, 0);
442 goto err_put;
443 }
444 }
445
446 return handle->rb->aux_priv;
447
448 err_put:
449 /* can't be last */
450 rb_free_aux(rb);
451
452 err:
453 ring_buffer_put(rb);
454 handle->event = NULL;
455
456 return NULL;
457 }
458 EXPORT_SYMBOL_GPL(perf_aux_output_begin);
459
rb_need_aux_wakeup(struct perf_buffer * rb)460 static __always_inline bool rb_need_aux_wakeup(struct perf_buffer *rb)
461 {
462 if (rb->aux_overwrite)
463 return false;
464
465 if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) {
466 rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark);
467 return true;
468 }
469
470 return false;
471 }
472
473 /*
474 * Commit the data written by hardware into the ring buffer by adjusting
475 * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
476 * pmu driver's responsibility to observe ordering rules of the hardware,
477 * so that all the data is externally visible before this is called.
478 *
479 * Note: this has to be called from pmu::stop() callback, as the assumption
480 * of the AUX buffer management code is that after pmu::stop(), the AUX
481 * transaction must be stopped and therefore drop the AUX reference count.
482 */
perf_aux_output_end(struct perf_output_handle * handle,unsigned long size)483 void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
484 {
485 bool wakeup = !!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED);
486 struct perf_buffer *rb = handle->rb;
487 unsigned long aux_head;
488
489 /* in overwrite mode, driver provides aux_head via handle */
490 if (rb->aux_overwrite) {
491 handle->aux_flags |= PERF_AUX_FLAG_OVERWRITE;
492
493 aux_head = handle->head;
494 rb->aux_head = aux_head;
495 } else {
496 handle->aux_flags &= ~PERF_AUX_FLAG_OVERWRITE;
497
498 aux_head = rb->aux_head;
499 rb->aux_head += size;
500 }
501
502 /*
503 * Only send RECORD_AUX if we have something useful to communicate
504 *
505 * Note: the OVERWRITE records by themselves are not considered
506 * useful, as they don't communicate any *new* information,
507 * aside from the short-lived offset, that becomes history at
508 * the next event sched-in and therefore isn't useful.
509 * The userspace that needs to copy out AUX data in overwrite
510 * mode should know to use user_page::aux_head for the actual
511 * offset. So, from now on we don't output AUX records that
512 * have *only* OVERWRITE flag set.
513 */
514 if (size || (handle->aux_flags & ~(u64)PERF_AUX_FLAG_OVERWRITE))
515 perf_event_aux_event(handle->event, aux_head, size,
516 handle->aux_flags);
517
518 WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
519 if (rb_need_aux_wakeup(rb))
520 wakeup = true;
521
522 if (wakeup) {
523 if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
524 handle->event->pending_disable = smp_processor_id();
525 perf_output_wakeup(handle);
526 }
527
528 handle->event = NULL;
529
530 WRITE_ONCE(rb->aux_nest, 0);
531 /* can't be last */
532 rb_free_aux(rb);
533 ring_buffer_put(rb);
534 }
535 EXPORT_SYMBOL_GPL(perf_aux_output_end);
536
537 /*
538 * Skip over a given number of bytes in the AUX buffer, due to, for example,
539 * hardware's alignment constraints.
540 */
perf_aux_output_skip(struct perf_output_handle * handle,unsigned long size)541 int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
542 {
543 struct perf_buffer *rb = handle->rb;
544
545 if (size > handle->size)
546 return -ENOSPC;
547
548 rb->aux_head += size;
549
550 WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
551 if (rb_need_aux_wakeup(rb)) {
552 perf_output_wakeup(handle);
553 handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
554 }
555
556 handle->head = rb->aux_head;
557 handle->size -= size;
558
559 return 0;
560 }
561 EXPORT_SYMBOL_GPL(perf_aux_output_skip);
562
perf_get_aux(struct perf_output_handle * handle)563 void *perf_get_aux(struct perf_output_handle *handle)
564 {
565 /* this is only valid between perf_aux_output_begin and *_end */
566 if (!handle->event)
567 return NULL;
568
569 return handle->rb->aux_priv;
570 }
571 EXPORT_SYMBOL_GPL(perf_get_aux);
572
573 /*
574 * Copy out AUX data from an AUX handle.
575 */
perf_output_copy_aux(struct perf_output_handle * aux_handle,struct perf_output_handle * handle,unsigned long from,unsigned long to)576 long perf_output_copy_aux(struct perf_output_handle *aux_handle,
577 struct perf_output_handle *handle,
578 unsigned long from, unsigned long to)
579 {
580 struct perf_buffer *rb = aux_handle->rb;
581 unsigned long tocopy, remainder, len = 0;
582 void *addr;
583
584 from &= (rb->aux_nr_pages << PAGE_SHIFT) - 1;
585 to &= (rb->aux_nr_pages << PAGE_SHIFT) - 1;
586
587 do {
588 tocopy = PAGE_SIZE - offset_in_page(from);
589 if (to > from)
590 tocopy = min(tocopy, to - from);
591 if (!tocopy)
592 break;
593
594 addr = rb->aux_pages[from >> PAGE_SHIFT];
595 addr += offset_in_page(from);
596
597 remainder = perf_output_copy(handle, addr, tocopy);
598 if (remainder)
599 return -EFAULT;
600
601 len += tocopy;
602 from += tocopy;
603 from &= (rb->aux_nr_pages << PAGE_SHIFT) - 1;
604 } while (to != from);
605
606 return len;
607 }
608
609 #define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
610
rb_alloc_aux_page(int node,int order)611 static struct page *rb_alloc_aux_page(int node, int order)
612 {
613 struct page *page;
614
615 if (order > MAX_ORDER)
616 order = MAX_ORDER;
617
618 do {
619 page = alloc_pages_node(node, PERF_AUX_GFP, order);
620 } while (!page && order--);
621
622 if (page && order) {
623 /*
624 * Communicate the allocation size to the driver:
625 * if we managed to secure a high-order allocation,
626 * set its first page's private to this order;
627 * !PagePrivate(page) means it's just a normal page.
628 */
629 split_page(page, order);
630 SetPagePrivate(page);
631 set_page_private(page, order);
632 }
633
634 return page;
635 }
636
rb_free_aux_page(struct perf_buffer * rb,int idx)637 static void rb_free_aux_page(struct perf_buffer *rb, int idx)
638 {
639 struct page *page = virt_to_page(rb->aux_pages[idx]);
640
641 ClearPagePrivate(page);
642 page->mapping = NULL;
643 __free_page(page);
644 }
645
__rb_free_aux(struct perf_buffer * rb)646 static void __rb_free_aux(struct perf_buffer *rb)
647 {
648 int pg;
649
650 /*
651 * Should never happen, the last reference should be dropped from
652 * perf_mmap_close() path, which first stops aux transactions (which
653 * in turn are the atomic holders of aux_refcount) and then does the
654 * last rb_free_aux().
655 */
656 WARN_ON_ONCE(in_atomic());
657
658 if (rb->aux_priv) {
659 rb->free_aux(rb->aux_priv);
660 rb->free_aux = NULL;
661 rb->aux_priv = NULL;
662 }
663
664 if (rb->aux_nr_pages) {
665 for (pg = 0; pg < rb->aux_nr_pages; pg++)
666 rb_free_aux_page(rb, pg);
667
668 kfree(rb->aux_pages);
669 rb->aux_nr_pages = 0;
670 }
671 }
672
rb_alloc_aux(struct perf_buffer * rb,struct perf_event * event,pgoff_t pgoff,int nr_pages,long watermark,int flags)673 int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
674 pgoff_t pgoff, int nr_pages, long watermark, int flags)
675 {
676 bool overwrite = !(flags & RING_BUFFER_WRITABLE);
677 int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
678 int ret = -ENOMEM, max_order;
679
680 if (!has_aux(event))
681 return -EOPNOTSUPP;
682
683 if (!overwrite) {
684 /*
685 * Watermark defaults to half the buffer, and so does the
686 * max_order, to aid PMU drivers in double buffering.
687 */
688 if (!watermark)
689 watermark = min_t(unsigned long,
690 U32_MAX,
691 (unsigned long)nr_pages << (PAGE_SHIFT - 1));
692
693 /*
694 * Use aux_watermark as the basis for chunking to
695 * help PMU drivers honor the watermark.
696 */
697 max_order = get_order(watermark);
698 } else {
699 /*
700 * We need to start with the max_order that fits in nr_pages,
701 * not the other way around, hence ilog2() and not get_order.
702 */
703 max_order = ilog2(nr_pages);
704 watermark = 0;
705 }
706
707 /*
708 * kcalloc_node() is unable to allocate buffer if the size is larger
709 * than: PAGE_SIZE << MAX_ORDER; directly bail out in this case.
710 */
711 if (get_order((unsigned long)nr_pages * sizeof(void *)) > MAX_ORDER)
712 return -ENOMEM;
713 rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL,
714 node);
715 if (!rb->aux_pages)
716 return -ENOMEM;
717
718 rb->free_aux = event->pmu->free_aux;
719 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
720 struct page *page;
721 int last, order;
722
723 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
724 page = rb_alloc_aux_page(node, order);
725 if (!page)
726 goto out;
727
728 for (last = rb->aux_nr_pages + (1 << page_private(page));
729 last > rb->aux_nr_pages; rb->aux_nr_pages++)
730 rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
731 }
732
733 /*
734 * In overwrite mode, PMUs that don't support SG may not handle more
735 * than one contiguous allocation, since they rely on PMI to do double
736 * buffering. In this case, the entire buffer has to be one contiguous
737 * chunk.
738 */
739 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
740 overwrite) {
741 struct page *page = virt_to_page(rb->aux_pages[0]);
742
743 if (page_private(page) != max_order)
744 goto out;
745 }
746
747 rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages,
748 overwrite);
749 if (!rb->aux_priv)
750 goto out;
751
752 ret = 0;
753
754 /*
755 * aux_pages (and pmu driver's private data, aux_priv) will be
756 * referenced in both producer's and consumer's contexts, thus
757 * we keep a refcount here to make sure either of the two can
758 * reference them safely.
759 */
760 refcount_set(&rb->aux_refcount, 1);
761
762 rb->aux_overwrite = overwrite;
763 rb->aux_watermark = watermark;
764
765 out:
766 if (!ret)
767 rb->aux_pgoff = pgoff;
768 else
769 __rb_free_aux(rb);
770
771 return ret;
772 }
773
rb_free_aux(struct perf_buffer * rb)774 void rb_free_aux(struct perf_buffer *rb)
775 {
776 if (refcount_dec_and_test(&rb->aux_refcount))
777 __rb_free_aux(rb);
778 }
779
780 #ifndef CONFIG_PERF_USE_VMALLOC
781
782 /*
783 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
784 */
785
786 static struct page *
__perf_mmap_to_page(struct perf_buffer * rb,unsigned long pgoff)787 __perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
788 {
789 if (pgoff > rb->nr_pages)
790 return NULL;
791
792 if (pgoff == 0)
793 return virt_to_page(rb->user_page);
794
795 return virt_to_page(rb->data_pages[pgoff - 1]);
796 }
797
perf_mmap_alloc_page(int cpu)798 static void *perf_mmap_alloc_page(int cpu)
799 {
800 struct page *page;
801 int node;
802
803 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
804 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
805 if (!page)
806 return NULL;
807
808 return page_address(page);
809 }
810
perf_mmap_free_page(void * addr)811 static void perf_mmap_free_page(void *addr)
812 {
813 struct page *page = virt_to_page(addr);
814
815 page->mapping = NULL;
816 __free_page(page);
817 }
818
rb_alloc(int nr_pages,long watermark,int cpu,int flags)819 struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
820 {
821 struct perf_buffer *rb;
822 unsigned long size;
823 int i, node;
824
825 size = sizeof(struct perf_buffer);
826 size += nr_pages * sizeof(void *);
827
828 if (order_base_2(size) > PAGE_SHIFT+MAX_ORDER)
829 goto fail;
830
831 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
832 rb = kzalloc_node(size, GFP_KERNEL, node);
833 if (!rb)
834 goto fail;
835
836 rb->user_page = perf_mmap_alloc_page(cpu);
837 if (!rb->user_page)
838 goto fail_user_page;
839
840 for (i = 0; i < nr_pages; i++) {
841 rb->data_pages[i] = perf_mmap_alloc_page(cpu);
842 if (!rb->data_pages[i])
843 goto fail_data_pages;
844 }
845
846 rb->nr_pages = nr_pages;
847
848 ring_buffer_init(rb, watermark, flags);
849
850 return rb;
851
852 fail_data_pages:
853 for (i--; i >= 0; i--)
854 perf_mmap_free_page(rb->data_pages[i]);
855
856 perf_mmap_free_page(rb->user_page);
857
858 fail_user_page:
859 kfree(rb);
860
861 fail:
862 return NULL;
863 }
864
rb_free(struct perf_buffer * rb)865 void rb_free(struct perf_buffer *rb)
866 {
867 int i;
868
869 perf_mmap_free_page(rb->user_page);
870 for (i = 0; i < rb->nr_pages; i++)
871 perf_mmap_free_page(rb->data_pages[i]);
872 kfree(rb);
873 }
874
875 #else
876 static struct page *
__perf_mmap_to_page(struct perf_buffer * rb,unsigned long pgoff)877 __perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
878 {
879 /* The '>' counts in the user page. */
880 if (pgoff > data_page_nr(rb))
881 return NULL;
882
883 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
884 }
885
perf_mmap_unmark_page(void * addr)886 static void perf_mmap_unmark_page(void *addr)
887 {
888 struct page *page = vmalloc_to_page(addr);
889
890 page->mapping = NULL;
891 }
892
rb_free_work(struct work_struct * work)893 static void rb_free_work(struct work_struct *work)
894 {
895 struct perf_buffer *rb;
896 void *base;
897 int i, nr;
898
899 rb = container_of(work, struct perf_buffer, work);
900 nr = data_page_nr(rb);
901
902 base = rb->user_page;
903 /* The '<=' counts in the user page. */
904 for (i = 0; i <= nr; i++)
905 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
906
907 vfree(base);
908 kfree(rb);
909 }
910
rb_free(struct perf_buffer * rb)911 void rb_free(struct perf_buffer *rb)
912 {
913 schedule_work(&rb->work);
914 }
915
rb_alloc(int nr_pages,long watermark,int cpu,int flags)916 struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
917 {
918 struct perf_buffer *rb;
919 unsigned long size;
920 void *all_buf;
921 int node;
922
923 size = sizeof(struct perf_buffer);
924 size += sizeof(void *);
925
926 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
927 rb = kzalloc_node(size, GFP_KERNEL, node);
928 if (!rb)
929 goto fail;
930
931 INIT_WORK(&rb->work, rb_free_work);
932
933 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
934 if (!all_buf)
935 goto fail_all_buf;
936
937 rb->user_page = all_buf;
938 rb->data_pages[0] = all_buf + PAGE_SIZE;
939 if (nr_pages) {
940 rb->nr_pages = 1;
941 rb->page_order = ilog2(nr_pages);
942 }
943
944 ring_buffer_init(rb, watermark, flags);
945
946 return rb;
947
948 fail_all_buf:
949 kfree(rb);
950
951 fail:
952 return NULL;
953 }
954
955 #endif
956
957 struct page *
perf_mmap_to_page(struct perf_buffer * rb,unsigned long pgoff)958 perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
959 {
960 if (rb->aux_nr_pages) {
961 /* above AUX space */
962 if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
963 return NULL;
964
965 /* AUX space */
966 if (pgoff >= rb->aux_pgoff) {
967 int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
968 return virt_to_page(rb->aux_pages[aux_pgoff]);
969 }
970 }
971
972 return __perf_mmap_to_page(rb, pgoff);
973 }
974