1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Generic ring buffer 4 * 5 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> 6 */ 7 #include <linux/trace_recursion.h> 8 #include <linux/trace_events.h> 9 #include <linux/ring_buffer.h> 10 #include <linux/trace_clock.h> 11 #include <linux/sched/clock.h> 12 #include <linux/trace_seq.h> 13 #include <linux/spinlock.h> 14 #include <linux/irq_work.h> 15 #include <linux/security.h> 16 #include <linux/uaccess.h> 17 #include <linux/hardirq.h> 18 #include <linux/kthread.h> /* for self test */ 19 #include <linux/module.h> 20 #include <linux/percpu.h> 21 #include <linux/mutex.h> 22 #include <linux/delay.h> 23 #include <linux/slab.h> 24 #include <linux/init.h> 25 #include <linux/hash.h> 26 #include <linux/list.h> 27 #include <linux/cpu.h> 28 #include <linux/oom.h> 29 30 #include <asm/local.h> 31 32 static void update_pages_handler(struct work_struct *work); 33 34 /* 35 * The ring buffer header is special. We must manually up keep it. 36 */ 37 int ring_buffer_print_entry_header(struct trace_seq *s) 38 { 39 trace_seq_puts(s, "# compressed entry header\n"); 40 trace_seq_puts(s, "\ttype_len : 5 bits\n"); 41 trace_seq_puts(s, "\ttime_delta : 27 bits\n"); 42 trace_seq_puts(s, "\tarray : 32 bits\n"); 43 trace_seq_putc(s, '\n'); 44 trace_seq_printf(s, "\tpadding : type == %d\n", 45 RINGBUF_TYPE_PADDING); 46 trace_seq_printf(s, "\ttime_extend : type == %d\n", 47 RINGBUF_TYPE_TIME_EXTEND); 48 trace_seq_printf(s, "\ttime_stamp : type == %d\n", 49 RINGBUF_TYPE_TIME_STAMP); 50 trace_seq_printf(s, "\tdata max type_len == %d\n", 51 RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 52 53 return !trace_seq_has_overflowed(s); 54 } 55 56 /* 57 * The ring buffer is made up of a list of pages. A separate list of pages is 58 * allocated for each CPU. A writer may only write to a buffer that is 59 * associated with the CPU it is currently executing on. A reader may read 60 * from any per cpu buffer. 61 * 62 * The reader is special. For each per cpu buffer, the reader has its own 63 * reader page. When a reader has read the entire reader page, this reader 64 * page is swapped with another page in the ring buffer. 65 * 66 * Now, as long as the writer is off the reader page, the reader can do what 67 * ever it wants with that page. The writer will never write to that page 68 * again (as long as it is out of the ring buffer). 69 * 70 * Here's some silly ASCII art. 71 * 72 * +------+ 73 * |reader| RING BUFFER 74 * |page | 75 * +------+ +---+ +---+ +---+ 76 * | |-->| |-->| | 77 * +---+ +---+ +---+ 78 * ^ | 79 * | | 80 * +---------------+ 81 * 82 * 83 * +------+ 84 * |reader| RING BUFFER 85 * |page |------------------v 86 * +------+ +---+ +---+ +---+ 87 * | |-->| |-->| | 88 * +---+ +---+ +---+ 89 * ^ | 90 * | | 91 * +---------------+ 92 * 93 * 94 * +------+ 95 * |reader| RING BUFFER 96 * |page |------------------v 97 * +------+ +---+ +---+ +---+ 98 * ^ | |-->| |-->| | 99 * | +---+ +---+ +---+ 100 * | | 101 * | | 102 * +------------------------------+ 103 * 104 * 105 * +------+ 106 * |buffer| RING BUFFER 107 * |page |------------------v 108 * +------+ +---+ +---+ +---+ 109 * ^ | | | |-->| | 110 * | New +---+ +---+ +---+ 111 * | Reader------^ | 112 * | page | 113 * +------------------------------+ 114 * 115 * 116 * After we make this swap, the reader can hand this page off to the splice 117 * code and be done with it. It can even allocate a new page if it needs to 118 * and swap that into the ring buffer. 119 * 120 * We will be using cmpxchg soon to make all this lockless. 121 * 122 */ 123 124 /* Used for individual buffers (after the counter) */ 125 #define RB_BUFFER_OFF (1 << 20) 126 127 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) 128 129 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) 130 #define RB_ALIGNMENT 4U 131 #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 132 #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ 133 134 #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS 135 # define RB_FORCE_8BYTE_ALIGNMENT 0 136 # define RB_ARCH_ALIGNMENT RB_ALIGNMENT 137 #else 138 # define RB_FORCE_8BYTE_ALIGNMENT 1 139 # define RB_ARCH_ALIGNMENT 8U 140 #endif 141 142 #define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT) 143 144 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ 145 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX 146 147 enum { 148 RB_LEN_TIME_EXTEND = 8, 149 RB_LEN_TIME_STAMP = 8, 150 }; 151 152 #define skip_time_extend(event) \ 153 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND)) 154 155 #define extended_time(event) \ 156 (event->type_len >= RINGBUF_TYPE_TIME_EXTEND) 157 158 static inline int rb_null_event(struct ring_buffer_event *event) 159 { 160 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; 161 } 162 163 static void rb_event_set_padding(struct ring_buffer_event *event) 164 { 165 /* padding has a NULL time_delta */ 166 event->type_len = RINGBUF_TYPE_PADDING; 167 event->time_delta = 0; 168 } 169 170 static unsigned 171 rb_event_data_length(struct ring_buffer_event *event) 172 { 173 unsigned length; 174 175 if (event->type_len) 176 length = event->type_len * RB_ALIGNMENT; 177 else 178 length = event->array[0]; 179 return length + RB_EVNT_HDR_SIZE; 180 } 181 182 /* 183 * Return the length of the given event. Will return 184 * the length of the time extend if the event is a 185 * time extend. 186 */ 187 static inline unsigned 188 rb_event_length(struct ring_buffer_event *event) 189 { 190 switch (event->type_len) { 191 case RINGBUF_TYPE_PADDING: 192 if (rb_null_event(event)) 193 /* undefined */ 194 return -1; 195 return event->array[0] + RB_EVNT_HDR_SIZE; 196 197 case RINGBUF_TYPE_TIME_EXTEND: 198 return RB_LEN_TIME_EXTEND; 199 200 case RINGBUF_TYPE_TIME_STAMP: 201 return RB_LEN_TIME_STAMP; 202 203 case RINGBUF_TYPE_DATA: 204 return rb_event_data_length(event); 205 default: 206 WARN_ON_ONCE(1); 207 } 208 /* not hit */ 209 return 0; 210 } 211 212 /* 213 * Return total length of time extend and data, 214 * or just the event length for all other events. 215 */ 216 static inline unsigned 217 rb_event_ts_length(struct ring_buffer_event *event) 218 { 219 unsigned len = 0; 220 221 if (extended_time(event)) { 222 /* time extends include the data event after it */ 223 len = RB_LEN_TIME_EXTEND; 224 event = skip_time_extend(event); 225 } 226 return len + rb_event_length(event); 227 } 228 229 /** 230 * ring_buffer_event_length - return the length of the event 231 * @event: the event to get the length of 232 * 233 * Returns the size of the data load of a data event. 234 * If the event is something other than a data event, it 235 * returns the size of the event itself. With the exception 236 * of a TIME EXTEND, where it still returns the size of the 237 * data load of the data event after it. 238 */ 239 unsigned ring_buffer_event_length(struct ring_buffer_event *event) 240 { 241 unsigned length; 242 243 if (extended_time(event)) 244 event = skip_time_extend(event); 245 246 length = rb_event_length(event); 247 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 248 return length; 249 length -= RB_EVNT_HDR_SIZE; 250 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) 251 length -= sizeof(event->array[0]); 252 return length; 253 } 254 EXPORT_SYMBOL_GPL(ring_buffer_event_length); 255 256 /* inline for ring buffer fast paths */ 257 static __always_inline void * 258 rb_event_data(struct ring_buffer_event *event) 259 { 260 if (extended_time(event)) 261 event = skip_time_extend(event); 262 WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 263 /* If length is in len field, then array[0] has the data */ 264 if (event->type_len) 265 return (void *)&event->array[0]; 266 /* Otherwise length is in array[0] and array[1] has the data */ 267 return (void *)&event->array[1]; 268 } 269 270 /** 271 * ring_buffer_event_data - return the data of the event 272 * @event: the event to get the data from 273 */ 274 void *ring_buffer_event_data(struct ring_buffer_event *event) 275 { 276 return rb_event_data(event); 277 } 278 EXPORT_SYMBOL_GPL(ring_buffer_event_data); 279 280 #define for_each_buffer_cpu(buffer, cpu) \ 281 for_each_cpu(cpu, buffer->cpumask) 282 283 #define for_each_online_buffer_cpu(buffer, cpu) \ 284 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask) 285 286 #define TS_SHIFT 27 287 #define TS_MASK ((1ULL << TS_SHIFT) - 1) 288 #define TS_DELTA_TEST (~TS_MASK) 289 290 static u64 rb_event_time_stamp(struct ring_buffer_event *event) 291 { 292 u64 ts; 293 294 ts = event->array[0]; 295 ts <<= TS_SHIFT; 296 ts += event->time_delta; 297 298 return ts; 299 } 300 301 /* Flag when events were overwritten */ 302 #define RB_MISSED_EVENTS (1 << 31) 303 /* Missed count stored at end */ 304 #define RB_MISSED_STORED (1 << 30) 305 306 struct buffer_data_page { 307 u64 time_stamp; /* page time stamp */ 308 local_t commit; /* write committed index */ 309 unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */ 310 }; 311 312 /* 313 * Note, the buffer_page list must be first. The buffer pages 314 * are allocated in cache lines, which means that each buffer 315 * page will be at the beginning of a cache line, and thus 316 * the least significant bits will be zero. We use this to 317 * add flags in the list struct pointers, to make the ring buffer 318 * lockless. 319 */ 320 struct buffer_page { 321 struct list_head list; /* list of buffer pages */ 322 local_t write; /* index for next write */ 323 unsigned read; /* index for next read */ 324 local_t entries; /* entries on this page */ 325 unsigned long real_end; /* real end of data */ 326 struct buffer_data_page *page; /* Actual data page */ 327 }; 328 329 /* 330 * The buffer page counters, write and entries, must be reset 331 * atomically when crossing page boundaries. To synchronize this 332 * update, two counters are inserted into the number. One is 333 * the actual counter for the write position or count on the page. 334 * 335 * The other is a counter of updaters. Before an update happens 336 * the update partition of the counter is incremented. This will 337 * allow the updater to update the counter atomically. 338 * 339 * The counter is 20 bits, and the state data is 12. 340 */ 341 #define RB_WRITE_MASK 0xfffff 342 #define RB_WRITE_INTCNT (1 << 20) 343 344 static void rb_init_page(struct buffer_data_page *bpage) 345 { 346 local_set(&bpage->commit, 0); 347 } 348 349 /* 350 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing 351 * this issue out. 352 */ 353 static void free_buffer_page(struct buffer_page *bpage) 354 { 355 free_page((unsigned long)bpage->page); 356 kfree(bpage); 357 } 358 359 /* 360 * We need to fit the time_stamp delta into 27 bits. 361 */ 362 static inline int test_time_stamp(u64 delta) 363 { 364 if (delta & TS_DELTA_TEST) 365 return 1; 366 return 0; 367 } 368 369 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE) 370 371 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */ 372 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) 373 374 int ring_buffer_print_page_header(struct trace_seq *s) 375 { 376 struct buffer_data_page field; 377 378 trace_seq_printf(s, "\tfield: u64 timestamp;\t" 379 "offset:0;\tsize:%u;\tsigned:%u;\n", 380 (unsigned int)sizeof(field.time_stamp), 381 (unsigned int)is_signed_type(u64)); 382 383 trace_seq_printf(s, "\tfield: local_t commit;\t" 384 "offset:%u;\tsize:%u;\tsigned:%u;\n", 385 (unsigned int)offsetof(typeof(field), commit), 386 (unsigned int)sizeof(field.commit), 387 (unsigned int)is_signed_type(long)); 388 389 trace_seq_printf(s, "\tfield: int overwrite;\t" 390 "offset:%u;\tsize:%u;\tsigned:%u;\n", 391 (unsigned int)offsetof(typeof(field), commit), 392 1, 393 (unsigned int)is_signed_type(long)); 394 395 trace_seq_printf(s, "\tfield: char data;\t" 396 "offset:%u;\tsize:%u;\tsigned:%u;\n", 397 (unsigned int)offsetof(typeof(field), data), 398 (unsigned int)BUF_PAGE_SIZE, 399 (unsigned int)is_signed_type(char)); 400 401 return !trace_seq_has_overflowed(s); 402 } 403 404 struct rb_irq_work { 405 struct irq_work work; 406 wait_queue_head_t waiters; 407 wait_queue_head_t full_waiters; 408 bool waiters_pending; 409 bool full_waiters_pending; 410 bool wakeup_full; 411 }; 412 413 /* 414 * Structure to hold event state and handle nested events. 415 */ 416 struct rb_event_info { 417 u64 ts; 418 u64 delta; 419 u64 before; 420 u64 after; 421 unsigned long length; 422 struct buffer_page *tail_page; 423 int add_timestamp; 424 }; 425 426 /* 427 * Used for the add_timestamp 428 * NONE 429 * EXTEND - wants a time extend 430 * ABSOLUTE - the buffer requests all events to have absolute time stamps 431 * FORCE - force a full time stamp. 432 */ 433 enum { 434 RB_ADD_STAMP_NONE = 0, 435 RB_ADD_STAMP_EXTEND = BIT(1), 436 RB_ADD_STAMP_ABSOLUTE = BIT(2), 437 RB_ADD_STAMP_FORCE = BIT(3) 438 }; 439 /* 440 * Used for which event context the event is in. 441 * TRANSITION = 0 442 * NMI = 1 443 * IRQ = 2 444 * SOFTIRQ = 3 445 * NORMAL = 4 446 * 447 * See trace_recursive_lock() comment below for more details. 448 */ 449 enum { 450 RB_CTX_TRANSITION, 451 RB_CTX_NMI, 452 RB_CTX_IRQ, 453 RB_CTX_SOFTIRQ, 454 RB_CTX_NORMAL, 455 RB_CTX_MAX 456 }; 457 458 #if BITS_PER_LONG == 32 459 #define RB_TIME_32 460 #endif 461 462 /* To test on 64 bit machines */ 463 //#define RB_TIME_32 464 465 #ifdef RB_TIME_32 466 467 struct rb_time_struct { 468 local_t cnt; 469 local_t top; 470 local_t bottom; 471 }; 472 #else 473 #include <asm/local64.h> 474 struct rb_time_struct { 475 local64_t time; 476 }; 477 #endif 478 typedef struct rb_time_struct rb_time_t; 479 480 #define MAX_NEST 5 481 482 /* 483 * head_page == tail_page && head == tail then buffer is empty. 484 */ 485 struct ring_buffer_per_cpu { 486 int cpu; 487 atomic_t record_disabled; 488 atomic_t resize_disabled; 489 struct trace_buffer *buffer; 490 raw_spinlock_t reader_lock; /* serialize readers */ 491 arch_spinlock_t lock; 492 struct lock_class_key lock_key; 493 struct buffer_data_page *free_page; 494 unsigned long nr_pages; 495 unsigned int current_context; 496 struct list_head *pages; 497 struct buffer_page *head_page; /* read from head */ 498 struct buffer_page *tail_page; /* write to tail */ 499 struct buffer_page *commit_page; /* committed pages */ 500 struct buffer_page *reader_page; 501 unsigned long lost_events; 502 unsigned long last_overrun; 503 unsigned long nest; 504 local_t entries_bytes; 505 local_t entries; 506 local_t overrun; 507 local_t commit_overrun; 508 local_t dropped_events; 509 local_t committing; 510 local_t commits; 511 local_t pages_touched; 512 local_t pages_read; 513 long last_pages_touch; 514 size_t shortest_full; 515 unsigned long read; 516 unsigned long read_bytes; 517 rb_time_t write_stamp; 518 rb_time_t before_stamp; 519 u64 event_stamp[MAX_NEST]; 520 u64 read_stamp; 521 /* ring buffer pages to update, > 0 to add, < 0 to remove */ 522 long nr_pages_to_update; 523 struct list_head new_pages; /* new pages to add */ 524 struct work_struct update_pages_work; 525 struct completion update_done; 526 527 struct rb_irq_work irq_work; 528 }; 529 530 struct trace_buffer { 531 unsigned flags; 532 int cpus; 533 atomic_t record_disabled; 534 cpumask_var_t cpumask; 535 536 struct lock_class_key *reader_lock_key; 537 538 struct mutex mutex; 539 540 struct ring_buffer_per_cpu **buffers; 541 542 struct hlist_node node; 543 u64 (*clock)(void); 544 545 struct rb_irq_work irq_work; 546 bool time_stamp_abs; 547 }; 548 549 struct ring_buffer_iter { 550 struct ring_buffer_per_cpu *cpu_buffer; 551 unsigned long head; 552 unsigned long next_event; 553 struct buffer_page *head_page; 554 struct buffer_page *cache_reader_page; 555 unsigned long cache_read; 556 u64 read_stamp; 557 u64 page_stamp; 558 struct ring_buffer_event *event; 559 int missed_events; 560 }; 561 562 #ifdef RB_TIME_32 563 564 /* 565 * On 32 bit machines, local64_t is very expensive. As the ring 566 * buffer doesn't need all the features of a true 64 bit atomic, 567 * on 32 bit, it uses these functions (64 still uses local64_t). 568 * 569 * For the ring buffer, 64 bit required operations for the time is 570 * the following: 571 * 572 * - Only need 59 bits (uses 60 to make it even). 573 * - Reads may fail if it interrupted a modification of the time stamp. 574 * It will succeed if it did not interrupt another write even if 575 * the read itself is interrupted by a write. 576 * It returns whether it was successful or not. 577 * 578 * - Writes always succeed and will overwrite other writes and writes 579 * that were done by events interrupting the current write. 580 * 581 * - A write followed by a read of the same time stamp will always succeed, 582 * but may not contain the same value. 583 * 584 * - A cmpxchg will fail if it interrupted another write or cmpxchg. 585 * Other than that, it acts like a normal cmpxchg. 586 * 587 * The 60 bit time stamp is broken up by 30 bits in a top and bottom half 588 * (bottom being the least significant 30 bits of the 60 bit time stamp). 589 * 590 * The two most significant bits of each half holds a 2 bit counter (0-3). 591 * Each update will increment this counter by one. 592 * When reading the top and bottom, if the two counter bits match then the 593 * top and bottom together make a valid 60 bit number. 594 */ 595 #define RB_TIME_SHIFT 30 596 #define RB_TIME_VAL_MASK ((1 << RB_TIME_SHIFT) - 1) 597 598 static inline int rb_time_cnt(unsigned long val) 599 { 600 return (val >> RB_TIME_SHIFT) & 3; 601 } 602 603 static inline u64 rb_time_val(unsigned long top, unsigned long bottom) 604 { 605 u64 val; 606 607 val = top & RB_TIME_VAL_MASK; 608 val <<= RB_TIME_SHIFT; 609 val |= bottom & RB_TIME_VAL_MASK; 610 611 return val; 612 } 613 614 static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt) 615 { 616 unsigned long top, bottom; 617 unsigned long c; 618 619 /* 620 * If the read is interrupted by a write, then the cnt will 621 * be different. Loop until both top and bottom have been read 622 * without interruption. 623 */ 624 do { 625 c = local_read(&t->cnt); 626 top = local_read(&t->top); 627 bottom = local_read(&t->bottom); 628 } while (c != local_read(&t->cnt)); 629 630 *cnt = rb_time_cnt(top); 631 632 /* If top and bottom counts don't match, this interrupted a write */ 633 if (*cnt != rb_time_cnt(bottom)) 634 return false; 635 636 *ret = rb_time_val(top, bottom); 637 return true; 638 } 639 640 static bool rb_time_read(rb_time_t *t, u64 *ret) 641 { 642 unsigned long cnt; 643 644 return __rb_time_read(t, ret, &cnt); 645 } 646 647 static inline unsigned long rb_time_val_cnt(unsigned long val, unsigned long cnt) 648 { 649 return (val & RB_TIME_VAL_MASK) | ((cnt & 3) << RB_TIME_SHIFT); 650 } 651 652 static inline void rb_time_split(u64 val, unsigned long *top, unsigned long *bottom) 653 { 654 *top = (unsigned long)((val >> RB_TIME_SHIFT) & RB_TIME_VAL_MASK); 655 *bottom = (unsigned long)(val & RB_TIME_VAL_MASK); 656 } 657 658 static inline void rb_time_val_set(local_t *t, unsigned long val, unsigned long cnt) 659 { 660 val = rb_time_val_cnt(val, cnt); 661 local_set(t, val); 662 } 663 664 static void rb_time_set(rb_time_t *t, u64 val) 665 { 666 unsigned long cnt, top, bottom; 667 668 rb_time_split(val, &top, &bottom); 669 670 /* Writes always succeed with a valid number even if it gets interrupted. */ 671 do { 672 cnt = local_inc_return(&t->cnt); 673 rb_time_val_set(&t->top, top, cnt); 674 rb_time_val_set(&t->bottom, bottom, cnt); 675 } while (cnt != local_read(&t->cnt)); 676 } 677 678 static inline bool 679 rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set) 680 { 681 unsigned long ret; 682 683 ret = local_cmpxchg(l, expect, set); 684 return ret == expect; 685 } 686 687 static int rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set) 688 { 689 unsigned long cnt, top, bottom; 690 unsigned long cnt2, top2, bottom2; 691 u64 val; 692 693 /* The cmpxchg always fails if it interrupted an update */ 694 if (!__rb_time_read(t, &val, &cnt2)) 695 return false; 696 697 if (val != expect) 698 return false; 699 700 cnt = local_read(&t->cnt); 701 if ((cnt & 3) != cnt2) 702 return false; 703 704 cnt2 = cnt + 1; 705 706 rb_time_split(val, &top, &bottom); 707 top = rb_time_val_cnt(top, cnt); 708 bottom = rb_time_val_cnt(bottom, cnt); 709 710 rb_time_split(set, &top2, &bottom2); 711 top2 = rb_time_val_cnt(top2, cnt2); 712 bottom2 = rb_time_val_cnt(bottom2, cnt2); 713 714 if (!rb_time_read_cmpxchg(&t->cnt, cnt, cnt2)) 715 return false; 716 if (!rb_time_read_cmpxchg(&t->top, top, top2)) 717 return false; 718 if (!rb_time_read_cmpxchg(&t->bottom, bottom, bottom2)) 719 return false; 720 return true; 721 } 722 723 #else /* 64 bits */ 724 725 /* local64_t always succeeds */ 726 727 static inline bool rb_time_read(rb_time_t *t, u64 *ret) 728 { 729 *ret = local64_read(&t->time); 730 return true; 731 } 732 static void rb_time_set(rb_time_t *t, u64 val) 733 { 734 local64_set(&t->time, val); 735 } 736 737 static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set) 738 { 739 u64 val; 740 val = local64_cmpxchg(&t->time, expect, set); 741 return val == expect; 742 } 743 #endif 744 745 /* 746 * Enable this to make sure that the event passed to 747 * ring_buffer_event_time_stamp() is not committed and also 748 * is on the buffer that it passed in. 749 */ 750 //#define RB_VERIFY_EVENT 751 #ifdef RB_VERIFY_EVENT 752 static struct list_head *rb_list_head(struct list_head *list); 753 static void verify_event(struct ring_buffer_per_cpu *cpu_buffer, 754 void *event) 755 { 756 struct buffer_page *page = cpu_buffer->commit_page; 757 struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page); 758 struct list_head *next; 759 long commit, write; 760 unsigned long addr = (unsigned long)event; 761 bool done = false; 762 int stop = 0; 763 764 /* Make sure the event exists and is not committed yet */ 765 do { 766 if (page == tail_page || WARN_ON_ONCE(stop++ > 100)) 767 done = true; 768 commit = local_read(&page->page->commit); 769 write = local_read(&page->write); 770 if (addr >= (unsigned long)&page->page->data[commit] && 771 addr < (unsigned long)&page->page->data[write]) 772 return; 773 774 next = rb_list_head(page->list.next); 775 page = list_entry(next, struct buffer_page, list); 776 } while (!done); 777 WARN_ON_ONCE(1); 778 } 779 #else 780 static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer, 781 void *event) 782 { 783 } 784 #endif 785 786 787 static inline u64 rb_time_stamp(struct trace_buffer *buffer); 788 789 /** 790 * ring_buffer_event_time_stamp - return the event's current time stamp 791 * @buffer: The buffer that the event is on 792 * @event: the event to get the time stamp of 793 * 794 * Note, this must be called after @event is reserved, and before it is 795 * committed to the ring buffer. And must be called from the same 796 * context where the event was reserved (normal, softirq, irq, etc). 797 * 798 * Returns the time stamp associated with the current event. 799 * If the event has an extended time stamp, then that is used as 800 * the time stamp to return. 801 * In the highly unlikely case that the event was nested more than 802 * the max nesting, then the write_stamp of the buffer is returned, 803 * otherwise current time is returned, but that really neither of 804 * the last two cases should ever happen. 805 */ 806 u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer, 807 struct ring_buffer_event *event) 808 { 809 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; 810 unsigned int nest; 811 u64 ts; 812 813 /* If the event includes an absolute time, then just use that */ 814 if (event->type_len == RINGBUF_TYPE_TIME_STAMP) 815 return rb_event_time_stamp(event); 816 817 nest = local_read(&cpu_buffer->committing); 818 verify_event(cpu_buffer, event); 819 if (WARN_ON_ONCE(!nest)) 820 goto fail; 821 822 /* Read the current saved nesting level time stamp */ 823 if (likely(--nest < MAX_NEST)) 824 return cpu_buffer->event_stamp[nest]; 825 826 /* Shouldn't happen, warn if it does */ 827 WARN_ONCE(1, "nest (%d) greater than max", nest); 828 829 fail: 830 /* Can only fail on 32 bit */ 831 if (!rb_time_read(&cpu_buffer->write_stamp, &ts)) 832 /* Screw it, just read the current time */ 833 ts = rb_time_stamp(cpu_buffer->buffer); 834 835 return ts; 836 } 837 838 /** 839 * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer 840 * @buffer: The ring_buffer to get the number of pages from 841 * @cpu: The cpu of the ring_buffer to get the number of pages from 842 * 843 * Returns the number of pages used by a per_cpu buffer of the ring buffer. 844 */ 845 size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu) 846 { 847 return buffer->buffers[cpu]->nr_pages; 848 } 849 850 /** 851 * ring_buffer_nr_pages_dirty - get the number of used pages in the ring buffer 852 * @buffer: The ring_buffer to get the number of pages from 853 * @cpu: The cpu of the ring_buffer to get the number of pages from 854 * 855 * Returns the number of pages that have content in the ring buffer. 856 */ 857 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) 858 { 859 size_t read; 860 size_t cnt; 861 862 read = local_read(&buffer->buffers[cpu]->pages_read); 863 cnt = local_read(&buffer->buffers[cpu]->pages_touched); 864 /* The reader can read an empty page, but not more than that */ 865 if (cnt < read) { 866 WARN_ON_ONCE(read > cnt + 1); 867 return 0; 868 } 869 870 return cnt - read; 871 } 872 873 /* 874 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input 875 * 876 * Schedules a delayed work to wake up any task that is blocked on the 877 * ring buffer waiters queue. 878 */ 879 static void rb_wake_up_waiters(struct irq_work *work) 880 { 881 struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work); 882 883 wake_up_all(&rbwork->waiters); 884 if (rbwork->wakeup_full) { 885 rbwork->wakeup_full = false; 886 wake_up_all(&rbwork->full_waiters); 887 } 888 } 889 890 /** 891 * ring_buffer_wait - wait for input to the ring buffer 892 * @buffer: buffer to wait on 893 * @cpu: the cpu buffer to wait on 894 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS 895 * 896 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 897 * as data is added to any of the @buffer's cpu buffers. Otherwise 898 * it will wait for data to be added to a specific cpu buffer. 899 */ 900 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) 901 { 902 struct ring_buffer_per_cpu *cpu_buffer; 903 DEFINE_WAIT(wait); 904 struct rb_irq_work *work; 905 int ret = 0; 906 907 /* 908 * Depending on what the caller is waiting for, either any 909 * data in any cpu buffer, or a specific buffer, put the 910 * caller on the appropriate wait queue. 911 */ 912 if (cpu == RING_BUFFER_ALL_CPUS) { 913 work = &buffer->irq_work; 914 /* Full only makes sense on per cpu reads */ 915 full = 0; 916 } else { 917 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 918 return -ENODEV; 919 cpu_buffer = buffer->buffers[cpu]; 920 work = &cpu_buffer->irq_work; 921 } 922 923 924 while (true) { 925 if (full) 926 prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE); 927 else 928 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); 929 930 /* 931 * The events can happen in critical sections where 932 * checking a work queue can cause deadlocks. 933 * After adding a task to the queue, this flag is set 934 * only to notify events to try to wake up the queue 935 * using irq_work. 936 * 937 * We don't clear it even if the buffer is no longer 938 * empty. The flag only causes the next event to run 939 * irq_work to do the work queue wake up. The worse 940 * that can happen if we race with !trace_empty() is that 941 * an event will cause an irq_work to try to wake up 942 * an empty queue. 943 * 944 * There's no reason to protect this flag either, as 945 * the work queue and irq_work logic will do the necessary 946 * synchronization for the wake ups. The only thing 947 * that is necessary is that the wake up happens after 948 * a task has been queued. It's OK for spurious wake ups. 949 */ 950 if (full) 951 work->full_waiters_pending = true; 952 else 953 work->waiters_pending = true; 954 955 if (signal_pending(current)) { 956 ret = -EINTR; 957 break; 958 } 959 960 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) 961 break; 962 963 if (cpu != RING_BUFFER_ALL_CPUS && 964 !ring_buffer_empty_cpu(buffer, cpu)) { 965 unsigned long flags; 966 bool pagebusy; 967 size_t nr_pages; 968 size_t dirty; 969 970 if (!full) 971 break; 972 973 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 974 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; 975 nr_pages = cpu_buffer->nr_pages; 976 dirty = ring_buffer_nr_dirty_pages(buffer, cpu); 977 if (!cpu_buffer->shortest_full || 978 cpu_buffer->shortest_full < full) 979 cpu_buffer->shortest_full = full; 980 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 981 if (!pagebusy && 982 (!nr_pages || (dirty * 100) > full * nr_pages)) 983 break; 984 } 985 986 schedule(); 987 } 988 989 if (full) 990 finish_wait(&work->full_waiters, &wait); 991 else 992 finish_wait(&work->waiters, &wait); 993 994 return ret; 995 } 996 997 /** 998 * ring_buffer_poll_wait - poll on buffer input 999 * @buffer: buffer to wait on 1000 * @cpu: the cpu buffer to wait on 1001 * @filp: the file descriptor 1002 * @poll_table: The poll descriptor 1003 * 1004 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 1005 * as data is added to any of the @buffer's cpu buffers. Otherwise 1006 * it will wait for data to be added to a specific cpu buffer. 1007 * 1008 * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers, 1009 * zero otherwise. 1010 */ 1011 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, 1012 struct file *filp, poll_table *poll_table) 1013 { 1014 struct ring_buffer_per_cpu *cpu_buffer; 1015 struct rb_irq_work *work; 1016 1017 if (cpu == RING_BUFFER_ALL_CPUS) 1018 work = &buffer->irq_work; 1019 else { 1020 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1021 return -EINVAL; 1022 1023 cpu_buffer = buffer->buffers[cpu]; 1024 work = &cpu_buffer->irq_work; 1025 } 1026 1027 poll_wait(filp, &work->waiters, poll_table); 1028 work->waiters_pending = true; 1029 /* 1030 * There's a tight race between setting the waiters_pending and 1031 * checking if the ring buffer is empty. Once the waiters_pending bit 1032 * is set, the next event will wake the task up, but we can get stuck 1033 * if there's only a single event in. 1034 * 1035 * FIXME: Ideally, we need a memory barrier on the writer side as well, 1036 * but adding a memory barrier to all events will cause too much of a 1037 * performance hit in the fast path. We only need a memory barrier when 1038 * the buffer goes from empty to having content. But as this race is 1039 * extremely small, and it's not a problem if another event comes in, we 1040 * will fix it later. 1041 */ 1042 smp_mb(); 1043 1044 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || 1045 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) 1046 return EPOLLIN | EPOLLRDNORM; 1047 return 0; 1048 } 1049 1050 /* buffer may be either ring_buffer or ring_buffer_per_cpu */ 1051 #define RB_WARN_ON(b, cond) \ 1052 ({ \ 1053 int _____ret = unlikely(cond); \ 1054 if (_____ret) { \ 1055 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \ 1056 struct ring_buffer_per_cpu *__b = \ 1057 (void *)b; \ 1058 atomic_inc(&__b->buffer->record_disabled); \ 1059 } else \ 1060 atomic_inc(&b->record_disabled); \ 1061 WARN_ON(1); \ 1062 } \ 1063 _____ret; \ 1064 }) 1065 1066 /* Up this if you want to test the TIME_EXTENTS and normalization */ 1067 #define DEBUG_SHIFT 0 1068 1069 static inline u64 rb_time_stamp(struct trace_buffer *buffer) 1070 { 1071 u64 ts; 1072 1073 /* Skip retpolines :-( */ 1074 if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local)) 1075 ts = trace_clock_local(); 1076 else 1077 ts = buffer->clock(); 1078 1079 /* shift to debug/test normalization and TIME_EXTENTS */ 1080 return ts << DEBUG_SHIFT; 1081 } 1082 1083 u64 ring_buffer_time_stamp(struct trace_buffer *buffer) 1084 { 1085 u64 time; 1086 1087 preempt_disable_notrace(); 1088 time = rb_time_stamp(buffer); 1089 preempt_enable_notrace(); 1090 1091 return time; 1092 } 1093 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); 1094 1095 void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer, 1096 int cpu, u64 *ts) 1097 { 1098 /* Just stupid testing the normalize function and deltas */ 1099 *ts >>= DEBUG_SHIFT; 1100 } 1101 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); 1102 1103 /* 1104 * Making the ring buffer lockless makes things tricky. 1105 * Although writes only happen on the CPU that they are on, 1106 * and they only need to worry about interrupts. Reads can 1107 * happen on any CPU. 1108 * 1109 * The reader page is always off the ring buffer, but when the 1110 * reader finishes with a page, it needs to swap its page with 1111 * a new one from the buffer. The reader needs to take from 1112 * the head (writes go to the tail). But if a writer is in overwrite 1113 * mode and wraps, it must push the head page forward. 1114 * 1115 * Here lies the problem. 1116 * 1117 * The reader must be careful to replace only the head page, and 1118 * not another one. As described at the top of the file in the 1119 * ASCII art, the reader sets its old page to point to the next 1120 * page after head. It then sets the page after head to point to 1121 * the old reader page. But if the writer moves the head page 1122 * during this operation, the reader could end up with the tail. 1123 * 1124 * We use cmpxchg to help prevent this race. We also do something 1125 * special with the page before head. We set the LSB to 1. 1126 * 1127 * When the writer must push the page forward, it will clear the 1128 * bit that points to the head page, move the head, and then set 1129 * the bit that points to the new head page. 1130 * 1131 * We also don't want an interrupt coming in and moving the head 1132 * page on another writer. Thus we use the second LSB to catch 1133 * that too. Thus: 1134 * 1135 * head->list->prev->next bit 1 bit 0 1136 * ------- ------- 1137 * Normal page 0 0 1138 * Points to head page 0 1 1139 * New head page 1 0 1140 * 1141 * Note we can not trust the prev pointer of the head page, because: 1142 * 1143 * +----+ +-----+ +-----+ 1144 * | |------>| T |---X--->| N | 1145 * | |<------| | | | 1146 * +----+ +-----+ +-----+ 1147 * ^ ^ | 1148 * | +-----+ | | 1149 * +----------| R |----------+ | 1150 * | |<-----------+ 1151 * +-----+ 1152 * 1153 * Key: ---X--> HEAD flag set in pointer 1154 * T Tail page 1155 * R Reader page 1156 * N Next page 1157 * 1158 * (see __rb_reserve_next() to see where this happens) 1159 * 1160 * What the above shows is that the reader just swapped out 1161 * the reader page with a page in the buffer, but before it 1162 * could make the new header point back to the new page added 1163 * it was preempted by a writer. The writer moved forward onto 1164 * the new page added by the reader and is about to move forward 1165 * again. 1166 * 1167 * You can see, it is legitimate for the previous pointer of 1168 * the head (or any page) not to point back to itself. But only 1169 * temporarily. 1170 */ 1171 1172 #define RB_PAGE_NORMAL 0UL 1173 #define RB_PAGE_HEAD 1UL 1174 #define RB_PAGE_UPDATE 2UL 1175 1176 1177 #define RB_FLAG_MASK 3UL 1178 1179 /* PAGE_MOVED is not part of the mask */ 1180 #define RB_PAGE_MOVED 4UL 1181 1182 /* 1183 * rb_list_head - remove any bit 1184 */ 1185 static struct list_head *rb_list_head(struct list_head *list) 1186 { 1187 unsigned long val = (unsigned long)list; 1188 1189 return (struct list_head *)(val & ~RB_FLAG_MASK); 1190 } 1191 1192 /* 1193 * rb_is_head_page - test if the given page is the head page 1194 * 1195 * Because the reader may move the head_page pointer, we can 1196 * not trust what the head page is (it may be pointing to 1197 * the reader page). But if the next page is a header page, 1198 * its flags will be non zero. 1199 */ 1200 static inline int 1201 rb_is_head_page(struct buffer_page *page, struct list_head *list) 1202 { 1203 unsigned long val; 1204 1205 val = (unsigned long)list->next; 1206 1207 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) 1208 return RB_PAGE_MOVED; 1209 1210 return val & RB_FLAG_MASK; 1211 } 1212 1213 /* 1214 * rb_is_reader_page 1215 * 1216 * The unique thing about the reader page, is that, if the 1217 * writer is ever on it, the previous pointer never points 1218 * back to the reader page. 1219 */ 1220 static bool rb_is_reader_page(struct buffer_page *page) 1221 { 1222 struct list_head *list = page->list.prev; 1223 1224 return rb_list_head(list->next) != &page->list; 1225 } 1226 1227 /* 1228 * rb_set_list_to_head - set a list_head to be pointing to head. 1229 */ 1230 static void rb_set_list_to_head(struct list_head *list) 1231 { 1232 unsigned long *ptr; 1233 1234 ptr = (unsigned long *)&list->next; 1235 *ptr |= RB_PAGE_HEAD; 1236 *ptr &= ~RB_PAGE_UPDATE; 1237 } 1238 1239 /* 1240 * rb_head_page_activate - sets up head page 1241 */ 1242 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) 1243 { 1244 struct buffer_page *head; 1245 1246 head = cpu_buffer->head_page; 1247 if (!head) 1248 return; 1249 1250 /* 1251 * Set the previous list pointer to have the HEAD flag. 1252 */ 1253 rb_set_list_to_head(head->list.prev); 1254 } 1255 1256 static void rb_list_head_clear(struct list_head *list) 1257 { 1258 unsigned long *ptr = (unsigned long *)&list->next; 1259 1260 *ptr &= ~RB_FLAG_MASK; 1261 } 1262 1263 /* 1264 * rb_head_page_deactivate - clears head page ptr (for free list) 1265 */ 1266 static void 1267 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) 1268 { 1269 struct list_head *hd; 1270 1271 /* Go through the whole list and clear any pointers found. */ 1272 rb_list_head_clear(cpu_buffer->pages); 1273 1274 list_for_each(hd, cpu_buffer->pages) 1275 rb_list_head_clear(hd); 1276 } 1277 1278 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, 1279 struct buffer_page *head, 1280 struct buffer_page *prev, 1281 int old_flag, int new_flag) 1282 { 1283 struct list_head *list; 1284 unsigned long val = (unsigned long)&head->list; 1285 unsigned long ret; 1286 1287 list = &prev->list; 1288 1289 val &= ~RB_FLAG_MASK; 1290 1291 ret = cmpxchg((unsigned long *)&list->next, 1292 val | old_flag, val | new_flag); 1293 1294 /* check if the reader took the page */ 1295 if ((ret & ~RB_FLAG_MASK) != val) 1296 return RB_PAGE_MOVED; 1297 1298 return ret & RB_FLAG_MASK; 1299 } 1300 1301 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, 1302 struct buffer_page *head, 1303 struct buffer_page *prev, 1304 int old_flag) 1305 { 1306 return rb_head_page_set(cpu_buffer, head, prev, 1307 old_flag, RB_PAGE_UPDATE); 1308 } 1309 1310 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, 1311 struct buffer_page *head, 1312 struct buffer_page *prev, 1313 int old_flag) 1314 { 1315 return rb_head_page_set(cpu_buffer, head, prev, 1316 old_flag, RB_PAGE_HEAD); 1317 } 1318 1319 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, 1320 struct buffer_page *head, 1321 struct buffer_page *prev, 1322 int old_flag) 1323 { 1324 return rb_head_page_set(cpu_buffer, head, prev, 1325 old_flag, RB_PAGE_NORMAL); 1326 } 1327 1328 static inline void rb_inc_page(struct buffer_page **bpage) 1329 { 1330 struct list_head *p = rb_list_head((*bpage)->list.next); 1331 1332 *bpage = list_entry(p, struct buffer_page, list); 1333 } 1334 1335 static struct buffer_page * 1336 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) 1337 { 1338 struct buffer_page *head; 1339 struct buffer_page *page; 1340 struct list_head *list; 1341 int i; 1342 1343 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) 1344 return NULL; 1345 1346 /* sanity check */ 1347 list = cpu_buffer->pages; 1348 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) 1349 return NULL; 1350 1351 page = head = cpu_buffer->head_page; 1352 /* 1353 * It is possible that the writer moves the header behind 1354 * where we started, and we miss in one loop. 1355 * A second loop should grab the header, but we'll do 1356 * three loops just because I'm paranoid. 1357 */ 1358 for (i = 0; i < 3; i++) { 1359 do { 1360 if (rb_is_head_page(page, page->list.prev)) { 1361 cpu_buffer->head_page = page; 1362 return page; 1363 } 1364 rb_inc_page(&page); 1365 } while (page != head); 1366 } 1367 1368 RB_WARN_ON(cpu_buffer, 1); 1369 1370 return NULL; 1371 } 1372 1373 static int rb_head_page_replace(struct buffer_page *old, 1374 struct buffer_page *new) 1375 { 1376 unsigned long *ptr = (unsigned long *)&old->list.prev->next; 1377 unsigned long val; 1378 unsigned long ret; 1379 1380 val = *ptr & ~RB_FLAG_MASK; 1381 val |= RB_PAGE_HEAD; 1382 1383 ret = cmpxchg(ptr, val, (unsigned long)&new->list); 1384 1385 return ret == val; 1386 } 1387 1388 /* 1389 * rb_tail_page_update - move the tail page forward 1390 */ 1391 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, 1392 struct buffer_page *tail_page, 1393 struct buffer_page *next_page) 1394 { 1395 unsigned long old_entries; 1396 unsigned long old_write; 1397 1398 /* 1399 * The tail page now needs to be moved forward. 1400 * 1401 * We need to reset the tail page, but without messing 1402 * with possible erasing of data brought in by interrupts 1403 * that have moved the tail page and are currently on it. 1404 * 1405 * We add a counter to the write field to denote this. 1406 */ 1407 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); 1408 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); 1409 1410 local_inc(&cpu_buffer->pages_touched); 1411 /* 1412 * Just make sure we have seen our old_write and synchronize 1413 * with any interrupts that come in. 1414 */ 1415 barrier(); 1416 1417 /* 1418 * If the tail page is still the same as what we think 1419 * it is, then it is up to us to update the tail 1420 * pointer. 1421 */ 1422 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { 1423 /* Zero the write counter */ 1424 unsigned long val = old_write & ~RB_WRITE_MASK; 1425 unsigned long eval = old_entries & ~RB_WRITE_MASK; 1426 1427 /* 1428 * This will only succeed if an interrupt did 1429 * not come in and change it. In which case, we 1430 * do not want to modify it. 1431 * 1432 * We add (void) to let the compiler know that we do not care 1433 * about the return value of these functions. We use the 1434 * cmpxchg to only update if an interrupt did not already 1435 * do it for us. If the cmpxchg fails, we don't care. 1436 */ 1437 (void)local_cmpxchg(&next_page->write, old_write, val); 1438 (void)local_cmpxchg(&next_page->entries, old_entries, eval); 1439 1440 /* 1441 * No need to worry about races with clearing out the commit. 1442 * it only can increment when a commit takes place. But that 1443 * only happens in the outer most nested commit. 1444 */ 1445 local_set(&next_page->page->commit, 0); 1446 1447 /* Again, either we update tail_page or an interrupt does */ 1448 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page); 1449 } 1450 } 1451 1452 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, 1453 struct buffer_page *bpage) 1454 { 1455 unsigned long val = (unsigned long)bpage; 1456 1457 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK)) 1458 return 1; 1459 1460 return 0; 1461 } 1462 1463 /** 1464 * rb_check_list - make sure a pointer to a list has the last bits zero 1465 */ 1466 static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer, 1467 struct list_head *list) 1468 { 1469 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev)) 1470 return 1; 1471 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next)) 1472 return 1; 1473 return 0; 1474 } 1475 1476 /** 1477 * rb_check_pages - integrity check of buffer pages 1478 * @cpu_buffer: CPU buffer with pages to test 1479 * 1480 * As a safety measure we check to make sure the data pages have not 1481 * been corrupted. 1482 */ 1483 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) 1484 { 1485 struct list_head *head = cpu_buffer->pages; 1486 struct buffer_page *bpage, *tmp; 1487 1488 /* Reset the head page if it exists */ 1489 if (cpu_buffer->head_page) 1490 rb_set_head_page(cpu_buffer); 1491 1492 rb_head_page_deactivate(cpu_buffer); 1493 1494 if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) 1495 return -1; 1496 if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) 1497 return -1; 1498 1499 if (rb_check_list(cpu_buffer, head)) 1500 return -1; 1501 1502 list_for_each_entry_safe(bpage, tmp, head, list) { 1503 if (RB_WARN_ON(cpu_buffer, 1504 bpage->list.next->prev != &bpage->list)) 1505 return -1; 1506 if (RB_WARN_ON(cpu_buffer, 1507 bpage->list.prev->next != &bpage->list)) 1508 return -1; 1509 if (rb_check_list(cpu_buffer, &bpage->list)) 1510 return -1; 1511 } 1512 1513 rb_head_page_activate(cpu_buffer); 1514 1515 return 0; 1516 } 1517 1518 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 1519 long nr_pages, struct list_head *pages) 1520 { 1521 struct buffer_page *bpage, *tmp; 1522 bool user_thread = current->mm != NULL; 1523 gfp_t mflags; 1524 long i; 1525 1526 /* 1527 * Check if the available memory is there first. 1528 * Note, si_mem_available() only gives us a rough estimate of available 1529 * memory. It may not be accurate. But we don't care, we just want 1530 * to prevent doing any allocation when it is obvious that it is 1531 * not going to succeed. 1532 */ 1533 i = si_mem_available(); 1534 if (i < nr_pages) 1535 return -ENOMEM; 1536 1537 /* 1538 * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails 1539 * gracefully without invoking oom-killer and the system is not 1540 * destabilized. 1541 */ 1542 mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL; 1543 1544 /* 1545 * If a user thread allocates too much, and si_mem_available() 1546 * reports there's enough memory, even though there is not. 1547 * Make sure the OOM killer kills this thread. This can happen 1548 * even with RETRY_MAYFAIL because another task may be doing 1549 * an allocation after this task has taken all memory. 1550 * This is the task the OOM killer needs to take out during this 1551 * loop, even if it was triggered by an allocation somewhere else. 1552 */ 1553 if (user_thread) 1554 set_current_oom_origin(); 1555 for (i = 0; i < nr_pages; i++) { 1556 struct page *page; 1557 1558 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1559 mflags, cpu_to_node(cpu_buffer->cpu)); 1560 if (!bpage) 1561 goto free_pages; 1562 1563 rb_check_bpage(cpu_buffer, bpage); 1564 1565 list_add(&bpage->list, pages); 1566 1567 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, 0); 1568 if (!page) 1569 goto free_pages; 1570 bpage->page = page_address(page); 1571 rb_init_page(bpage->page); 1572 1573 if (user_thread && fatal_signal_pending(current)) 1574 goto free_pages; 1575 } 1576 if (user_thread) 1577 clear_current_oom_origin(); 1578 1579 return 0; 1580 1581 free_pages: 1582 list_for_each_entry_safe(bpage, tmp, pages, list) { 1583 list_del_init(&bpage->list); 1584 free_buffer_page(bpage); 1585 } 1586 if (user_thread) 1587 clear_current_oom_origin(); 1588 1589 return -ENOMEM; 1590 } 1591 1592 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 1593 unsigned long nr_pages) 1594 { 1595 LIST_HEAD(pages); 1596 1597 WARN_ON(!nr_pages); 1598 1599 if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages)) 1600 return -ENOMEM; 1601 1602 /* 1603 * The ring buffer page list is a circular list that does not 1604 * start and end with a list head. All page list items point to 1605 * other pages. 1606 */ 1607 cpu_buffer->pages = pages.next; 1608 list_del(&pages); 1609 1610 cpu_buffer->nr_pages = nr_pages; 1611 1612 rb_check_pages(cpu_buffer); 1613 1614 return 0; 1615 } 1616 1617 static struct ring_buffer_per_cpu * 1618 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu) 1619 { 1620 struct ring_buffer_per_cpu *cpu_buffer; 1621 struct buffer_page *bpage; 1622 struct page *page; 1623 int ret; 1624 1625 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), 1626 GFP_KERNEL, cpu_to_node(cpu)); 1627 if (!cpu_buffer) 1628 return NULL; 1629 1630 cpu_buffer->cpu = cpu; 1631 cpu_buffer->buffer = buffer; 1632 raw_spin_lock_init(&cpu_buffer->reader_lock); 1633 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1634 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 1635 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); 1636 init_completion(&cpu_buffer->update_done); 1637 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); 1638 init_waitqueue_head(&cpu_buffer->irq_work.waiters); 1639 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); 1640 1641 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1642 GFP_KERNEL, cpu_to_node(cpu)); 1643 if (!bpage) 1644 goto fail_free_buffer; 1645 1646 rb_check_bpage(cpu_buffer, bpage); 1647 1648 cpu_buffer->reader_page = bpage; 1649 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); 1650 if (!page) 1651 goto fail_free_reader; 1652 bpage->page = page_address(page); 1653 rb_init_page(bpage->page); 1654 1655 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 1656 INIT_LIST_HEAD(&cpu_buffer->new_pages); 1657 1658 ret = rb_allocate_pages(cpu_buffer, nr_pages); 1659 if (ret < 0) 1660 goto fail_free_reader; 1661 1662 cpu_buffer->head_page 1663 = list_entry(cpu_buffer->pages, struct buffer_page, list); 1664 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; 1665 1666 rb_head_page_activate(cpu_buffer); 1667 1668 return cpu_buffer; 1669 1670 fail_free_reader: 1671 free_buffer_page(cpu_buffer->reader_page); 1672 1673 fail_free_buffer: 1674 kfree(cpu_buffer); 1675 return NULL; 1676 } 1677 1678 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 1679 { 1680 struct list_head *head = cpu_buffer->pages; 1681 struct buffer_page *bpage, *tmp; 1682 1683 free_buffer_page(cpu_buffer->reader_page); 1684 1685 rb_head_page_deactivate(cpu_buffer); 1686 1687 if (head) { 1688 list_for_each_entry_safe(bpage, tmp, head, list) { 1689 list_del_init(&bpage->list); 1690 free_buffer_page(bpage); 1691 } 1692 bpage = list_entry(head, struct buffer_page, list); 1693 free_buffer_page(bpage); 1694 } 1695 1696 kfree(cpu_buffer); 1697 } 1698 1699 /** 1700 * __ring_buffer_alloc - allocate a new ring_buffer 1701 * @size: the size in bytes per cpu that is needed. 1702 * @flags: attributes to set for the ring buffer. 1703 * @key: ring buffer reader_lock_key. 1704 * 1705 * Currently the only flag that is available is the RB_FL_OVERWRITE 1706 * flag. This flag means that the buffer will overwrite old data 1707 * when the buffer wraps. If this flag is not set, the buffer will 1708 * drop data when the tail hits the head. 1709 */ 1710 struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, 1711 struct lock_class_key *key) 1712 { 1713 struct trace_buffer *buffer; 1714 long nr_pages; 1715 int bsize; 1716 int cpu; 1717 int ret; 1718 1719 /* keep it in its own cache line */ 1720 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), 1721 GFP_KERNEL); 1722 if (!buffer) 1723 return NULL; 1724 1725 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) 1726 goto fail_free_buffer; 1727 1728 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 1729 buffer->flags = flags; 1730 buffer->clock = trace_clock_local; 1731 buffer->reader_lock_key = key; 1732 1733 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); 1734 init_waitqueue_head(&buffer->irq_work.waiters); 1735 1736 /* need at least two pages */ 1737 if (nr_pages < 2) 1738 nr_pages = 2; 1739 1740 buffer->cpus = nr_cpu_ids; 1741 1742 bsize = sizeof(void *) * nr_cpu_ids; 1743 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), 1744 GFP_KERNEL); 1745 if (!buffer->buffers) 1746 goto fail_free_cpumask; 1747 1748 cpu = raw_smp_processor_id(); 1749 cpumask_set_cpu(cpu, buffer->cpumask); 1750 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 1751 if (!buffer->buffers[cpu]) 1752 goto fail_free_buffers; 1753 1754 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); 1755 if (ret < 0) 1756 goto fail_free_buffers; 1757 1758 mutex_init(&buffer->mutex); 1759 1760 return buffer; 1761 1762 fail_free_buffers: 1763 for_each_buffer_cpu(buffer, cpu) { 1764 if (buffer->buffers[cpu]) 1765 rb_free_cpu_buffer(buffer->buffers[cpu]); 1766 } 1767 kfree(buffer->buffers); 1768 1769 fail_free_cpumask: 1770 free_cpumask_var(buffer->cpumask); 1771 1772 fail_free_buffer: 1773 kfree(buffer); 1774 return NULL; 1775 } 1776 EXPORT_SYMBOL_GPL(__ring_buffer_alloc); 1777 1778 /** 1779 * ring_buffer_free - free a ring buffer. 1780 * @buffer: the buffer to free. 1781 */ 1782 void 1783 ring_buffer_free(struct trace_buffer *buffer) 1784 { 1785 int cpu; 1786 1787 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); 1788 1789 for_each_buffer_cpu(buffer, cpu) 1790 rb_free_cpu_buffer(buffer->buffers[cpu]); 1791 1792 kfree(buffer->buffers); 1793 free_cpumask_var(buffer->cpumask); 1794 1795 kfree(buffer); 1796 } 1797 EXPORT_SYMBOL_GPL(ring_buffer_free); 1798 1799 void ring_buffer_set_clock(struct trace_buffer *buffer, 1800 u64 (*clock)(void)) 1801 { 1802 buffer->clock = clock; 1803 } 1804 1805 void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs) 1806 { 1807 buffer->time_stamp_abs = abs; 1808 } 1809 1810 bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer) 1811 { 1812 return buffer->time_stamp_abs; 1813 } 1814 1815 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); 1816 1817 static inline unsigned long rb_page_entries(struct buffer_page *bpage) 1818 { 1819 return local_read(&bpage->entries) & RB_WRITE_MASK; 1820 } 1821 1822 static inline unsigned long rb_page_write(struct buffer_page *bpage) 1823 { 1824 return local_read(&bpage->write) & RB_WRITE_MASK; 1825 } 1826 1827 static int 1828 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) 1829 { 1830 struct list_head *tail_page, *to_remove, *next_page; 1831 struct buffer_page *to_remove_page, *tmp_iter_page; 1832 struct buffer_page *last_page, *first_page; 1833 unsigned long nr_removed; 1834 unsigned long head_bit; 1835 int page_entries; 1836 1837 head_bit = 0; 1838 1839 raw_spin_lock_irq(&cpu_buffer->reader_lock); 1840 atomic_inc(&cpu_buffer->record_disabled); 1841 /* 1842 * We don't race with the readers since we have acquired the reader 1843 * lock. We also don't race with writers after disabling recording. 1844 * This makes it easy to figure out the first and the last page to be 1845 * removed from the list. We unlink all the pages in between including 1846 * the first and last pages. This is done in a busy loop so that we 1847 * lose the least number of traces. 1848 * The pages are freed after we restart recording and unlock readers. 1849 */ 1850 tail_page = &cpu_buffer->tail_page->list; 1851 1852 /* 1853 * tail page might be on reader page, we remove the next page 1854 * from the ring buffer 1855 */ 1856 if (cpu_buffer->tail_page == cpu_buffer->reader_page) 1857 tail_page = rb_list_head(tail_page->next); 1858 to_remove = tail_page; 1859 1860 /* start of pages to remove */ 1861 first_page = list_entry(rb_list_head(to_remove->next), 1862 struct buffer_page, list); 1863 1864 for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) { 1865 to_remove = rb_list_head(to_remove)->next; 1866 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD; 1867 } 1868 1869 next_page = rb_list_head(to_remove)->next; 1870 1871 /* 1872 * Now we remove all pages between tail_page and next_page. 1873 * Make sure that we have head_bit value preserved for the 1874 * next page 1875 */ 1876 tail_page->next = (struct list_head *)((unsigned long)next_page | 1877 head_bit); 1878 next_page = rb_list_head(next_page); 1879 next_page->prev = tail_page; 1880 1881 /* make sure pages points to a valid page in the ring buffer */ 1882 cpu_buffer->pages = next_page; 1883 1884 /* update head page */ 1885 if (head_bit) 1886 cpu_buffer->head_page = list_entry(next_page, 1887 struct buffer_page, list); 1888 1889 /* 1890 * change read pointer to make sure any read iterators reset 1891 * themselves 1892 */ 1893 cpu_buffer->read = 0; 1894 1895 /* pages are removed, resume tracing and then free the pages */ 1896 atomic_dec(&cpu_buffer->record_disabled); 1897 raw_spin_unlock_irq(&cpu_buffer->reader_lock); 1898 1899 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); 1900 1901 /* last buffer page to remove */ 1902 last_page = list_entry(rb_list_head(to_remove), struct buffer_page, 1903 list); 1904 tmp_iter_page = first_page; 1905 1906 do { 1907 cond_resched(); 1908 1909 to_remove_page = tmp_iter_page; 1910 rb_inc_page(&tmp_iter_page); 1911 1912 /* update the counters */ 1913 page_entries = rb_page_entries(to_remove_page); 1914 if (page_entries) { 1915 /* 1916 * If something was added to this page, it was full 1917 * since it is not the tail page. So we deduct the 1918 * bytes consumed in ring buffer from here. 1919 * Increment overrun to account for the lost events. 1920 */ 1921 local_add(page_entries, &cpu_buffer->overrun); 1922 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); 1923 } 1924 1925 /* 1926 * We have already removed references to this list item, just 1927 * free up the buffer_page and its page 1928 */ 1929 free_buffer_page(to_remove_page); 1930 nr_removed--; 1931 1932 } while (to_remove_page != last_page); 1933 1934 RB_WARN_ON(cpu_buffer, nr_removed); 1935 1936 return nr_removed == 0; 1937 } 1938 1939 static int 1940 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) 1941 { 1942 struct list_head *pages = &cpu_buffer->new_pages; 1943 int retries, success; 1944 1945 raw_spin_lock_irq(&cpu_buffer->reader_lock); 1946 /* 1947 * We are holding the reader lock, so the reader page won't be swapped 1948 * in the ring buffer. Now we are racing with the writer trying to 1949 * move head page and the tail page. 1950 * We are going to adapt the reader page update process where: 1951 * 1. We first splice the start and end of list of new pages between 1952 * the head page and its previous page. 1953 * 2. We cmpxchg the prev_page->next to point from head page to the 1954 * start of new pages list. 1955 * 3. Finally, we update the head->prev to the end of new list. 1956 * 1957 * We will try this process 10 times, to make sure that we don't keep 1958 * spinning. 1959 */ 1960 retries = 10; 1961 success = 0; 1962 while (retries--) { 1963 struct list_head *head_page, *prev_page, *r; 1964 struct list_head *last_page, *first_page; 1965 struct list_head *head_page_with_bit; 1966 1967 head_page = &rb_set_head_page(cpu_buffer)->list; 1968 if (!head_page) 1969 break; 1970 prev_page = head_page->prev; 1971 1972 first_page = pages->next; 1973 last_page = pages->prev; 1974 1975 head_page_with_bit = (struct list_head *) 1976 ((unsigned long)head_page | RB_PAGE_HEAD); 1977 1978 last_page->next = head_page_with_bit; 1979 first_page->prev = prev_page; 1980 1981 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page); 1982 1983 if (r == head_page_with_bit) { 1984 /* 1985 * yay, we replaced the page pointer to our new list, 1986 * now, we just have to update to head page's prev 1987 * pointer to point to end of list 1988 */ 1989 head_page->prev = last_page; 1990 success = 1; 1991 break; 1992 } 1993 } 1994 1995 if (success) 1996 INIT_LIST_HEAD(pages); 1997 /* 1998 * If we weren't successful in adding in new pages, warn and stop 1999 * tracing 2000 */ 2001 RB_WARN_ON(cpu_buffer, !success); 2002 raw_spin_unlock_irq(&cpu_buffer->reader_lock); 2003 2004 /* free pages if they weren't inserted */ 2005 if (!success) { 2006 struct buffer_page *bpage, *tmp; 2007 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 2008 list) { 2009 list_del_init(&bpage->list); 2010 free_buffer_page(bpage); 2011 } 2012 } 2013 return success; 2014 } 2015 2016 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) 2017 { 2018 int success; 2019 2020 if (cpu_buffer->nr_pages_to_update > 0) 2021 success = rb_insert_pages(cpu_buffer); 2022 else 2023 success = rb_remove_pages(cpu_buffer, 2024 -cpu_buffer->nr_pages_to_update); 2025 2026 if (success) 2027 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; 2028 } 2029 2030 static void update_pages_handler(struct work_struct *work) 2031 { 2032 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, 2033 struct ring_buffer_per_cpu, update_pages_work); 2034 rb_update_pages(cpu_buffer); 2035 complete(&cpu_buffer->update_done); 2036 } 2037 2038 /** 2039 * ring_buffer_resize - resize the ring buffer 2040 * @buffer: the buffer to resize. 2041 * @size: the new size. 2042 * @cpu_id: the cpu buffer to resize 2043 * 2044 * Minimum size is 2 * BUF_PAGE_SIZE. 2045 * 2046 * Returns 0 on success and < 0 on failure. 2047 */ 2048 int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, 2049 int cpu_id) 2050 { 2051 struct ring_buffer_per_cpu *cpu_buffer; 2052 unsigned long nr_pages; 2053 int cpu, err; 2054 2055 /* 2056 * Always succeed at resizing a non-existent buffer: 2057 */ 2058 if (!buffer) 2059 return 0; 2060 2061 /* Make sure the requested buffer exists */ 2062 if (cpu_id != RING_BUFFER_ALL_CPUS && 2063 !cpumask_test_cpu(cpu_id, buffer->cpumask)) 2064 return 0; 2065 2066 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 2067 2068 /* we need a minimum of two pages */ 2069 if (nr_pages < 2) 2070 nr_pages = 2; 2071 2072 /* prevent another thread from changing buffer sizes */ 2073 mutex_lock(&buffer->mutex); 2074 2075 2076 if (cpu_id == RING_BUFFER_ALL_CPUS) { 2077 /* 2078 * Don't succeed if resizing is disabled, as a reader might be 2079 * manipulating the ring buffer and is expecting a sane state while 2080 * this is true. 2081 */ 2082 for_each_buffer_cpu(buffer, cpu) { 2083 cpu_buffer = buffer->buffers[cpu]; 2084 if (atomic_read(&cpu_buffer->resize_disabled)) { 2085 err = -EBUSY; 2086 goto out_err_unlock; 2087 } 2088 } 2089 2090 /* calculate the pages to update */ 2091 for_each_buffer_cpu(buffer, cpu) { 2092 cpu_buffer = buffer->buffers[cpu]; 2093 2094 cpu_buffer->nr_pages_to_update = nr_pages - 2095 cpu_buffer->nr_pages; 2096 /* 2097 * nothing more to do for removing pages or no update 2098 */ 2099 if (cpu_buffer->nr_pages_to_update <= 0) 2100 continue; 2101 /* 2102 * to add pages, make sure all new pages can be 2103 * allocated without receiving ENOMEM 2104 */ 2105 INIT_LIST_HEAD(&cpu_buffer->new_pages); 2106 if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, 2107 &cpu_buffer->new_pages)) { 2108 /* not enough memory for new pages */ 2109 err = -ENOMEM; 2110 goto out_err; 2111 } 2112 } 2113 2114 cpus_read_lock(); 2115 /* 2116 * Fire off all the required work handlers 2117 * We can't schedule on offline CPUs, but it's not necessary 2118 * since we can change their buffer sizes without any race. 2119 */ 2120 for_each_buffer_cpu(buffer, cpu) { 2121 cpu_buffer = buffer->buffers[cpu]; 2122 if (!cpu_buffer->nr_pages_to_update) 2123 continue; 2124 2125 /* Can't run something on an offline CPU. */ 2126 if (!cpu_online(cpu)) { 2127 rb_update_pages(cpu_buffer); 2128 cpu_buffer->nr_pages_to_update = 0; 2129 } else { 2130 schedule_work_on(cpu, 2131 &cpu_buffer->update_pages_work); 2132 } 2133 } 2134 2135 /* wait for all the updates to complete */ 2136 for_each_buffer_cpu(buffer, cpu) { 2137 cpu_buffer = buffer->buffers[cpu]; 2138 if (!cpu_buffer->nr_pages_to_update) 2139 continue; 2140 2141 if (cpu_online(cpu)) 2142 wait_for_completion(&cpu_buffer->update_done); 2143 cpu_buffer->nr_pages_to_update = 0; 2144 } 2145 2146 cpus_read_unlock(); 2147 } else { 2148 cpu_buffer = buffer->buffers[cpu_id]; 2149 2150 if (nr_pages == cpu_buffer->nr_pages) 2151 goto out; 2152 2153 /* 2154 * Don't succeed if resizing is disabled, as a reader might be 2155 * manipulating the ring buffer and is expecting a sane state while 2156 * this is true. 2157 */ 2158 if (atomic_read(&cpu_buffer->resize_disabled)) { 2159 err = -EBUSY; 2160 goto out_err_unlock; 2161 } 2162 2163 cpu_buffer->nr_pages_to_update = nr_pages - 2164 cpu_buffer->nr_pages; 2165 2166 INIT_LIST_HEAD(&cpu_buffer->new_pages); 2167 if (cpu_buffer->nr_pages_to_update > 0 && 2168 __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, 2169 &cpu_buffer->new_pages)) { 2170 err = -ENOMEM; 2171 goto out_err; 2172 } 2173 2174 cpus_read_lock(); 2175 2176 /* Can't run something on an offline CPU. */ 2177 if (!cpu_online(cpu_id)) 2178 rb_update_pages(cpu_buffer); 2179 else { 2180 schedule_work_on(cpu_id, 2181 &cpu_buffer->update_pages_work); 2182 wait_for_completion(&cpu_buffer->update_done); 2183 } 2184 2185 cpu_buffer->nr_pages_to_update = 0; 2186 cpus_read_unlock(); 2187 } 2188 2189 out: 2190 /* 2191 * The ring buffer resize can happen with the ring buffer 2192 * enabled, so that the update disturbs the tracing as little 2193 * as possible. But if the buffer is disabled, we do not need 2194 * to worry about that, and we can take the time to verify 2195 * that the buffer is not corrupt. 2196 */ 2197 if (atomic_read(&buffer->record_disabled)) { 2198 atomic_inc(&buffer->record_disabled); 2199 /* 2200 * Even though the buffer was disabled, we must make sure 2201 * that it is truly disabled before calling rb_check_pages. 2202 * There could have been a race between checking 2203 * record_disable and incrementing it. 2204 */ 2205 synchronize_rcu(); 2206 for_each_buffer_cpu(buffer, cpu) { 2207 cpu_buffer = buffer->buffers[cpu]; 2208 rb_check_pages(cpu_buffer); 2209 } 2210 atomic_dec(&buffer->record_disabled); 2211 } 2212 2213 mutex_unlock(&buffer->mutex); 2214 return 0; 2215 2216 out_err: 2217 for_each_buffer_cpu(buffer, cpu) { 2218 struct buffer_page *bpage, *tmp; 2219 2220 cpu_buffer = buffer->buffers[cpu]; 2221 cpu_buffer->nr_pages_to_update = 0; 2222 2223 if (list_empty(&cpu_buffer->new_pages)) 2224 continue; 2225 2226 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 2227 list) { 2228 list_del_init(&bpage->list); 2229 free_buffer_page(bpage); 2230 } 2231 } 2232 out_err_unlock: 2233 mutex_unlock(&buffer->mutex); 2234 return err; 2235 } 2236 EXPORT_SYMBOL_GPL(ring_buffer_resize); 2237 2238 void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val) 2239 { 2240 mutex_lock(&buffer->mutex); 2241 if (val) 2242 buffer->flags |= RB_FL_OVERWRITE; 2243 else 2244 buffer->flags &= ~RB_FL_OVERWRITE; 2245 mutex_unlock(&buffer->mutex); 2246 } 2247 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite); 2248 2249 static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) 2250 { 2251 return bpage->page->data + index; 2252 } 2253 2254 static __always_inline struct ring_buffer_event * 2255 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) 2256 { 2257 return __rb_page_index(cpu_buffer->reader_page, 2258 cpu_buffer->reader_page->read); 2259 } 2260 2261 static __always_inline unsigned rb_page_commit(struct buffer_page *bpage) 2262 { 2263 return local_read(&bpage->page->commit); 2264 } 2265 2266 static struct ring_buffer_event * 2267 rb_iter_head_event(struct ring_buffer_iter *iter) 2268 { 2269 struct ring_buffer_event *event; 2270 struct buffer_page *iter_head_page = iter->head_page; 2271 unsigned long commit; 2272 unsigned length; 2273 2274 if (iter->head != iter->next_event) 2275 return iter->event; 2276 2277 /* 2278 * When the writer goes across pages, it issues a cmpxchg which 2279 * is a mb(), which will synchronize with the rmb here. 2280 * (see rb_tail_page_update() and __rb_reserve_next()) 2281 */ 2282 commit = rb_page_commit(iter_head_page); 2283 smp_rmb(); 2284 event = __rb_page_index(iter_head_page, iter->head); 2285 length = rb_event_length(event); 2286 2287 /* 2288 * READ_ONCE() doesn't work on functions and we don't want the 2289 * compiler doing any crazy optimizations with length. 2290 */ 2291 barrier(); 2292 2293 if ((iter->head + length) > commit || length > BUF_MAX_DATA_SIZE) 2294 /* Writer corrupted the read? */ 2295 goto reset; 2296 2297 memcpy(iter->event, event, length); 2298 /* 2299 * If the page stamp is still the same after this rmb() then the 2300 * event was safely copied without the writer entering the page. 2301 */ 2302 smp_rmb(); 2303 2304 /* Make sure the page didn't change since we read this */ 2305 if (iter->page_stamp != iter_head_page->page->time_stamp || 2306 commit > rb_page_commit(iter_head_page)) 2307 goto reset; 2308 2309 iter->next_event = iter->head + length; 2310 return iter->event; 2311 reset: 2312 /* Reset to the beginning */ 2313 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; 2314 iter->head = 0; 2315 iter->next_event = 0; 2316 iter->missed_events = 1; 2317 return NULL; 2318 } 2319 2320 /* Size is determined by what has been committed */ 2321 static __always_inline unsigned rb_page_size(struct buffer_page *bpage) 2322 { 2323 return rb_page_commit(bpage); 2324 } 2325 2326 static __always_inline unsigned 2327 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) 2328 { 2329 return rb_page_commit(cpu_buffer->commit_page); 2330 } 2331 2332 static __always_inline unsigned 2333 rb_event_index(struct ring_buffer_event *event) 2334 { 2335 unsigned long addr = (unsigned long)event; 2336 2337 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; 2338 } 2339 2340 static void rb_inc_iter(struct ring_buffer_iter *iter) 2341 { 2342 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 2343 2344 /* 2345 * The iterator could be on the reader page (it starts there). 2346 * But the head could have moved, since the reader was 2347 * found. Check for this case and assign the iterator 2348 * to the head page instead of next. 2349 */ 2350 if (iter->head_page == cpu_buffer->reader_page) 2351 iter->head_page = rb_set_head_page(cpu_buffer); 2352 else 2353 rb_inc_page(&iter->head_page); 2354 2355 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; 2356 iter->head = 0; 2357 iter->next_event = 0; 2358 } 2359 2360 /* 2361 * rb_handle_head_page - writer hit the head page 2362 * 2363 * Returns: +1 to retry page 2364 * 0 to continue 2365 * -1 on error 2366 */ 2367 static int 2368 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, 2369 struct buffer_page *tail_page, 2370 struct buffer_page *next_page) 2371 { 2372 struct buffer_page *new_head; 2373 int entries; 2374 int type; 2375 int ret; 2376 2377 entries = rb_page_entries(next_page); 2378 2379 /* 2380 * The hard part is here. We need to move the head 2381 * forward, and protect against both readers on 2382 * other CPUs and writers coming in via interrupts. 2383 */ 2384 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, 2385 RB_PAGE_HEAD); 2386 2387 /* 2388 * type can be one of four: 2389 * NORMAL - an interrupt already moved it for us 2390 * HEAD - we are the first to get here. 2391 * UPDATE - we are the interrupt interrupting 2392 * a current move. 2393 * MOVED - a reader on another CPU moved the next 2394 * pointer to its reader page. Give up 2395 * and try again. 2396 */ 2397 2398 switch (type) { 2399 case RB_PAGE_HEAD: 2400 /* 2401 * We changed the head to UPDATE, thus 2402 * it is our responsibility to update 2403 * the counters. 2404 */ 2405 local_add(entries, &cpu_buffer->overrun); 2406 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); 2407 2408 /* 2409 * The entries will be zeroed out when we move the 2410 * tail page. 2411 */ 2412 2413 /* still more to do */ 2414 break; 2415 2416 case RB_PAGE_UPDATE: 2417 /* 2418 * This is an interrupt that interrupt the 2419 * previous update. Still more to do. 2420 */ 2421 break; 2422 case RB_PAGE_NORMAL: 2423 /* 2424 * An interrupt came in before the update 2425 * and processed this for us. 2426 * Nothing left to do. 2427 */ 2428 return 1; 2429 case RB_PAGE_MOVED: 2430 /* 2431 * The reader is on another CPU and just did 2432 * a swap with our next_page. 2433 * Try again. 2434 */ 2435 return 1; 2436 default: 2437 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ 2438 return -1; 2439 } 2440 2441 /* 2442 * Now that we are here, the old head pointer is 2443 * set to UPDATE. This will keep the reader from 2444 * swapping the head page with the reader page. 2445 * The reader (on another CPU) will spin till 2446 * we are finished. 2447 * 2448 * We just need to protect against interrupts 2449 * doing the job. We will set the next pointer 2450 * to HEAD. After that, we set the old pointer 2451 * to NORMAL, but only if it was HEAD before. 2452 * otherwise we are an interrupt, and only 2453 * want the outer most commit to reset it. 2454 */ 2455 new_head = next_page; 2456 rb_inc_page(&new_head); 2457 2458 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, 2459 RB_PAGE_NORMAL); 2460 2461 /* 2462 * Valid returns are: 2463 * HEAD - an interrupt came in and already set it. 2464 * NORMAL - One of two things: 2465 * 1) We really set it. 2466 * 2) A bunch of interrupts came in and moved 2467 * the page forward again. 2468 */ 2469 switch (ret) { 2470 case RB_PAGE_HEAD: 2471 case RB_PAGE_NORMAL: 2472 /* OK */ 2473 break; 2474 default: 2475 RB_WARN_ON(cpu_buffer, 1); 2476 return -1; 2477 } 2478 2479 /* 2480 * It is possible that an interrupt came in, 2481 * set the head up, then more interrupts came in 2482 * and moved it again. When we get back here, 2483 * the page would have been set to NORMAL but we 2484 * just set it back to HEAD. 2485 * 2486 * How do you detect this? Well, if that happened 2487 * the tail page would have moved. 2488 */ 2489 if (ret == RB_PAGE_NORMAL) { 2490 struct buffer_page *buffer_tail_page; 2491 2492 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); 2493 /* 2494 * If the tail had moved passed next, then we need 2495 * to reset the pointer. 2496 */ 2497 if (buffer_tail_page != tail_page && 2498 buffer_tail_page != next_page) 2499 rb_head_page_set_normal(cpu_buffer, new_head, 2500 next_page, 2501 RB_PAGE_HEAD); 2502 } 2503 2504 /* 2505 * If this was the outer most commit (the one that 2506 * changed the original pointer from HEAD to UPDATE), 2507 * then it is up to us to reset it to NORMAL. 2508 */ 2509 if (type == RB_PAGE_HEAD) { 2510 ret = rb_head_page_set_normal(cpu_buffer, next_page, 2511 tail_page, 2512 RB_PAGE_UPDATE); 2513 if (RB_WARN_ON(cpu_buffer, 2514 ret != RB_PAGE_UPDATE)) 2515 return -1; 2516 } 2517 2518 return 0; 2519 } 2520 2521 static inline void 2522 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, 2523 unsigned long tail, struct rb_event_info *info) 2524 { 2525 struct buffer_page *tail_page = info->tail_page; 2526 struct ring_buffer_event *event; 2527 unsigned long length = info->length; 2528 2529 /* 2530 * Only the event that crossed the page boundary 2531 * must fill the old tail_page with padding. 2532 */ 2533 if (tail >= BUF_PAGE_SIZE) { 2534 /* 2535 * If the page was filled, then we still need 2536 * to update the real_end. Reset it to zero 2537 * and the reader will ignore it. 2538 */ 2539 if (tail == BUF_PAGE_SIZE) 2540 tail_page->real_end = 0; 2541 2542 local_sub(length, &tail_page->write); 2543 return; 2544 } 2545 2546 event = __rb_page_index(tail_page, tail); 2547 2548 /* account for padding bytes */ 2549 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); 2550 2551 /* 2552 * Save the original length to the meta data. 2553 * This will be used by the reader to add lost event 2554 * counter. 2555 */ 2556 tail_page->real_end = tail; 2557 2558 /* 2559 * If this event is bigger than the minimum size, then 2560 * we need to be careful that we don't subtract the 2561 * write counter enough to allow another writer to slip 2562 * in on this page. 2563 * We put in a discarded commit instead, to make sure 2564 * that this space is not used again. 2565 * 2566 * If we are less than the minimum size, we don't need to 2567 * worry about it. 2568 */ 2569 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) { 2570 /* No room for any events */ 2571 2572 /* Mark the rest of the page with padding */ 2573 rb_event_set_padding(event); 2574 2575 /* Set the write back to the previous setting */ 2576 local_sub(length, &tail_page->write); 2577 return; 2578 } 2579 2580 /* Put in a discarded event */ 2581 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; 2582 event->type_len = RINGBUF_TYPE_PADDING; 2583 /* time delta must be non zero */ 2584 event->time_delta = 1; 2585 2586 /* Set write to end of buffer */ 2587 length = (tail + length) - BUF_PAGE_SIZE; 2588 local_sub(length, &tail_page->write); 2589 } 2590 2591 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer); 2592 2593 /* 2594 * This is the slow path, force gcc not to inline it. 2595 */ 2596 static noinline struct ring_buffer_event * 2597 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, 2598 unsigned long tail, struct rb_event_info *info) 2599 { 2600 struct buffer_page *tail_page = info->tail_page; 2601 struct buffer_page *commit_page = cpu_buffer->commit_page; 2602 struct trace_buffer *buffer = cpu_buffer->buffer; 2603 struct buffer_page *next_page; 2604 int ret; 2605 2606 next_page = tail_page; 2607 2608 rb_inc_page(&next_page); 2609 2610 /* 2611 * If for some reason, we had an interrupt storm that made 2612 * it all the way around the buffer, bail, and warn 2613 * about it. 2614 */ 2615 if (unlikely(next_page == commit_page)) { 2616 local_inc(&cpu_buffer->commit_overrun); 2617 goto out_reset; 2618 } 2619 2620 /* 2621 * This is where the fun begins! 2622 * 2623 * We are fighting against races between a reader that 2624 * could be on another CPU trying to swap its reader 2625 * page with the buffer head. 2626 * 2627 * We are also fighting against interrupts coming in and 2628 * moving the head or tail on us as well. 2629 * 2630 * If the next page is the head page then we have filled 2631 * the buffer, unless the commit page is still on the 2632 * reader page. 2633 */ 2634 if (rb_is_head_page(next_page, &tail_page->list)) { 2635 2636 /* 2637 * If the commit is not on the reader page, then 2638 * move the header page. 2639 */ 2640 if (!rb_is_reader_page(cpu_buffer->commit_page)) { 2641 /* 2642 * If we are not in overwrite mode, 2643 * this is easy, just stop here. 2644 */ 2645 if (!(buffer->flags & RB_FL_OVERWRITE)) { 2646 local_inc(&cpu_buffer->dropped_events); 2647 goto out_reset; 2648 } 2649 2650 ret = rb_handle_head_page(cpu_buffer, 2651 tail_page, 2652 next_page); 2653 if (ret < 0) 2654 goto out_reset; 2655 if (ret) 2656 goto out_again; 2657 } else { 2658 /* 2659 * We need to be careful here too. The 2660 * commit page could still be on the reader 2661 * page. We could have a small buffer, and 2662 * have filled up the buffer with events 2663 * from interrupts and such, and wrapped. 2664 * 2665 * Note, if the tail page is also on the 2666 * reader_page, we let it move out. 2667 */ 2668 if (unlikely((cpu_buffer->commit_page != 2669 cpu_buffer->tail_page) && 2670 (cpu_buffer->commit_page == 2671 cpu_buffer->reader_page))) { 2672 local_inc(&cpu_buffer->commit_overrun); 2673 goto out_reset; 2674 } 2675 } 2676 } 2677 2678 rb_tail_page_update(cpu_buffer, tail_page, next_page); 2679 2680 out_again: 2681 2682 rb_reset_tail(cpu_buffer, tail, info); 2683 2684 /* Commit what we have for now. */ 2685 rb_end_commit(cpu_buffer); 2686 /* rb_end_commit() decs committing */ 2687 local_inc(&cpu_buffer->committing); 2688 2689 /* fail and let the caller try again */ 2690 return ERR_PTR(-EAGAIN); 2691 2692 out_reset: 2693 /* reset write */ 2694 rb_reset_tail(cpu_buffer, tail, info); 2695 2696 return NULL; 2697 } 2698 2699 /* Slow path */ 2700 static struct ring_buffer_event * 2701 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs) 2702 { 2703 if (abs) 2704 event->type_len = RINGBUF_TYPE_TIME_STAMP; 2705 else 2706 event->type_len = RINGBUF_TYPE_TIME_EXTEND; 2707 2708 /* Not the first event on the page, or not delta? */ 2709 if (abs || rb_event_index(event)) { 2710 event->time_delta = delta & TS_MASK; 2711 event->array[0] = delta >> TS_SHIFT; 2712 } else { 2713 /* nope, just zero it */ 2714 event->time_delta = 0; 2715 event->array[0] = 0; 2716 } 2717 2718 return skip_time_extend(event); 2719 } 2720 2721 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 2722 static inline bool sched_clock_stable(void) 2723 { 2724 return true; 2725 } 2726 #endif 2727 2728 static void 2729 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer, 2730 struct rb_event_info *info) 2731 { 2732 u64 write_stamp; 2733 2734 WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s", 2735 (unsigned long long)info->delta, 2736 (unsigned long long)info->ts, 2737 (unsigned long long)info->before, 2738 (unsigned long long)info->after, 2739 (unsigned long long)(rb_time_read(&cpu_buffer->write_stamp, &write_stamp) ? write_stamp : 0), 2740 sched_clock_stable() ? "" : 2741 "If you just came from a suspend/resume,\n" 2742 "please switch to the trace global clock:\n" 2743 " echo global > /sys/kernel/debug/tracing/trace_clock\n" 2744 "or add trace_clock=global to the kernel command line\n"); 2745 } 2746 2747 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer, 2748 struct ring_buffer_event **event, 2749 struct rb_event_info *info, 2750 u64 *delta, 2751 unsigned int *length) 2752 { 2753 bool abs = info->add_timestamp & 2754 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE); 2755 2756 if (unlikely(info->delta > (1ULL << 59))) { 2757 /* did the clock go backwards */ 2758 if (info->before == info->after && info->before > info->ts) { 2759 /* not interrupted */ 2760 static int once; 2761 2762 /* 2763 * This is possible with a recalibrating of the TSC. 2764 * Do not produce a call stack, but just report it. 2765 */ 2766 if (!once) { 2767 once++; 2768 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n", 2769 info->before, info->ts); 2770 } 2771 } else 2772 rb_check_timestamp(cpu_buffer, info); 2773 if (!abs) 2774 info->delta = 0; 2775 } 2776 *event = rb_add_time_stamp(*event, info->delta, abs); 2777 *length -= RB_LEN_TIME_EXTEND; 2778 *delta = 0; 2779 } 2780 2781 /** 2782 * rb_update_event - update event type and data 2783 * @cpu_buffer: The per cpu buffer of the @event 2784 * @event: the event to update 2785 * @info: The info to update the @event with (contains length and delta) 2786 * 2787 * Update the type and data fields of the @event. The length 2788 * is the actual size that is written to the ring buffer, 2789 * and with this, we can determine what to place into the 2790 * data field. 2791 */ 2792 static void 2793 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, 2794 struct ring_buffer_event *event, 2795 struct rb_event_info *info) 2796 { 2797 unsigned length = info->length; 2798 u64 delta = info->delta; 2799 unsigned int nest = local_read(&cpu_buffer->committing) - 1; 2800 2801 if (!WARN_ON_ONCE(nest >= MAX_NEST)) 2802 cpu_buffer->event_stamp[nest] = info->ts; 2803 2804 /* 2805 * If we need to add a timestamp, then we 2806 * add it to the start of the reserved space. 2807 */ 2808 if (unlikely(info->add_timestamp)) 2809 rb_add_timestamp(cpu_buffer, &event, info, &delta, &length); 2810 2811 event->time_delta = delta; 2812 length -= RB_EVNT_HDR_SIZE; 2813 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) { 2814 event->type_len = 0; 2815 event->array[0] = length; 2816 } else 2817 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); 2818 } 2819 2820 static unsigned rb_calculate_event_length(unsigned length) 2821 { 2822 struct ring_buffer_event event; /* Used only for sizeof array */ 2823 2824 /* zero length can cause confusions */ 2825 if (!length) 2826 length++; 2827 2828 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) 2829 length += sizeof(event.array[0]); 2830 2831 length += RB_EVNT_HDR_SIZE; 2832 length = ALIGN(length, RB_ARCH_ALIGNMENT); 2833 2834 /* 2835 * In case the time delta is larger than the 27 bits for it 2836 * in the header, we need to add a timestamp. If another 2837 * event comes in when trying to discard this one to increase 2838 * the length, then the timestamp will be added in the allocated 2839 * space of this event. If length is bigger than the size needed 2840 * for the TIME_EXTEND, then padding has to be used. The events 2841 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal 2842 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding. 2843 * As length is a multiple of 4, we only need to worry if it 2844 * is 12 (RB_LEN_TIME_EXTEND + 4). 2845 */ 2846 if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT) 2847 length += RB_ALIGNMENT; 2848 2849 return length; 2850 } 2851 2852 static u64 rb_time_delta(struct ring_buffer_event *event) 2853 { 2854 switch (event->type_len) { 2855 case RINGBUF_TYPE_PADDING: 2856 return 0; 2857 2858 case RINGBUF_TYPE_TIME_EXTEND: 2859 return rb_event_time_stamp(event); 2860 2861 case RINGBUF_TYPE_TIME_STAMP: 2862 return 0; 2863 2864 case RINGBUF_TYPE_DATA: 2865 return event->time_delta; 2866 default: 2867 return 0; 2868 } 2869 } 2870 2871 static inline int 2872 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, 2873 struct ring_buffer_event *event) 2874 { 2875 unsigned long new_index, old_index; 2876 struct buffer_page *bpage; 2877 unsigned long index; 2878 unsigned long addr; 2879 u64 write_stamp; 2880 u64 delta; 2881 2882 new_index = rb_event_index(event); 2883 old_index = new_index + rb_event_ts_length(event); 2884 addr = (unsigned long)event; 2885 addr &= PAGE_MASK; 2886 2887 bpage = READ_ONCE(cpu_buffer->tail_page); 2888 2889 delta = rb_time_delta(event); 2890 2891 if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp)) 2892 return 0; 2893 2894 /* Make sure the write stamp is read before testing the location */ 2895 barrier(); 2896 2897 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { 2898 unsigned long write_mask = 2899 local_read(&bpage->write) & ~RB_WRITE_MASK; 2900 unsigned long event_length = rb_event_length(event); 2901 2902 /* Something came in, can't discard */ 2903 if (!rb_time_cmpxchg(&cpu_buffer->write_stamp, 2904 write_stamp, write_stamp - delta)) 2905 return 0; 2906 2907 /* 2908 * It's possible that the event time delta is zero 2909 * (has the same time stamp as the previous event) 2910 * in which case write_stamp and before_stamp could 2911 * be the same. In such a case, force before_stamp 2912 * to be different than write_stamp. It doesn't 2913 * matter what it is, as long as its different. 2914 */ 2915 if (!delta) 2916 rb_time_set(&cpu_buffer->before_stamp, 0); 2917 2918 /* 2919 * If an event were to come in now, it would see that the 2920 * write_stamp and the before_stamp are different, and assume 2921 * that this event just added itself before updating 2922 * the write stamp. The interrupting event will fix the 2923 * write stamp for us, and use the before stamp as its delta. 2924 */ 2925 2926 /* 2927 * This is on the tail page. It is possible that 2928 * a write could come in and move the tail page 2929 * and write to the next page. That is fine 2930 * because we just shorten what is on this page. 2931 */ 2932 old_index += write_mask; 2933 new_index += write_mask; 2934 index = local_cmpxchg(&bpage->write, old_index, new_index); 2935 if (index == old_index) { 2936 /* update counters */ 2937 local_sub(event_length, &cpu_buffer->entries_bytes); 2938 return 1; 2939 } 2940 } 2941 2942 /* could not discard */ 2943 return 0; 2944 } 2945 2946 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) 2947 { 2948 local_inc(&cpu_buffer->committing); 2949 local_inc(&cpu_buffer->commits); 2950 } 2951 2952 static __always_inline void 2953 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) 2954 { 2955 unsigned long max_count; 2956 2957 /* 2958 * We only race with interrupts and NMIs on this CPU. 2959 * If we own the commit event, then we can commit 2960 * all others that interrupted us, since the interruptions 2961 * are in stack format (they finish before they come 2962 * back to us). This allows us to do a simple loop to 2963 * assign the commit to the tail. 2964 */ 2965 again: 2966 max_count = cpu_buffer->nr_pages * 100; 2967 2968 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { 2969 if (RB_WARN_ON(cpu_buffer, !(--max_count))) 2970 return; 2971 if (RB_WARN_ON(cpu_buffer, 2972 rb_is_reader_page(cpu_buffer->tail_page))) 2973 return; 2974 local_set(&cpu_buffer->commit_page->page->commit, 2975 rb_page_write(cpu_buffer->commit_page)); 2976 rb_inc_page(&cpu_buffer->commit_page); 2977 /* add barrier to keep gcc from optimizing too much */ 2978 barrier(); 2979 } 2980 while (rb_commit_index(cpu_buffer) != 2981 rb_page_write(cpu_buffer->commit_page)) { 2982 2983 local_set(&cpu_buffer->commit_page->page->commit, 2984 rb_page_write(cpu_buffer->commit_page)); 2985 RB_WARN_ON(cpu_buffer, 2986 local_read(&cpu_buffer->commit_page->page->commit) & 2987 ~RB_WRITE_MASK); 2988 barrier(); 2989 } 2990 2991 /* again, keep gcc from optimizing */ 2992 barrier(); 2993 2994 /* 2995 * If an interrupt came in just after the first while loop 2996 * and pushed the tail page forward, we will be left with 2997 * a dangling commit that will never go forward. 2998 */ 2999 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) 3000 goto again; 3001 } 3002 3003 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) 3004 { 3005 unsigned long commits; 3006 3007 if (RB_WARN_ON(cpu_buffer, 3008 !local_read(&cpu_buffer->committing))) 3009 return; 3010 3011 again: 3012 commits = local_read(&cpu_buffer->commits); 3013 /* synchronize with interrupts */ 3014 barrier(); 3015 if (local_read(&cpu_buffer->committing) == 1) 3016 rb_set_commit_to_write(cpu_buffer); 3017 3018 local_dec(&cpu_buffer->committing); 3019 3020 /* synchronize with interrupts */ 3021 barrier(); 3022 3023 /* 3024 * Need to account for interrupts coming in between the 3025 * updating of the commit page and the clearing of the 3026 * committing counter. 3027 */ 3028 if (unlikely(local_read(&cpu_buffer->commits) != commits) && 3029 !local_read(&cpu_buffer->committing)) { 3030 local_inc(&cpu_buffer->committing); 3031 goto again; 3032 } 3033 } 3034 3035 static inline void rb_event_discard(struct ring_buffer_event *event) 3036 { 3037 if (extended_time(event)) 3038 event = skip_time_extend(event); 3039 3040 /* array[0] holds the actual length for the discarded event */ 3041 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; 3042 event->type_len = RINGBUF_TYPE_PADDING; 3043 /* time delta must be non zero */ 3044 if (!event->time_delta) 3045 event->time_delta = 1; 3046 } 3047 3048 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, 3049 struct ring_buffer_event *event) 3050 { 3051 local_inc(&cpu_buffer->entries); 3052 rb_end_commit(cpu_buffer); 3053 } 3054 3055 static __always_inline void 3056 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) 3057 { 3058 size_t nr_pages; 3059 size_t dirty; 3060 size_t full; 3061 3062 if (buffer->irq_work.waiters_pending) { 3063 buffer->irq_work.waiters_pending = false; 3064 /* irq_work_queue() supplies it's own memory barriers */ 3065 irq_work_queue(&buffer->irq_work.work); 3066 } 3067 3068 if (cpu_buffer->irq_work.waiters_pending) { 3069 cpu_buffer->irq_work.waiters_pending = false; 3070 /* irq_work_queue() supplies it's own memory barriers */ 3071 irq_work_queue(&cpu_buffer->irq_work.work); 3072 } 3073 3074 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched)) 3075 return; 3076 3077 if (cpu_buffer->reader_page == cpu_buffer->commit_page) 3078 return; 3079 3080 if (!cpu_buffer->irq_work.full_waiters_pending) 3081 return; 3082 3083 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); 3084 3085 full = cpu_buffer->shortest_full; 3086 nr_pages = cpu_buffer->nr_pages; 3087 dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu); 3088 if (full && nr_pages && (dirty * 100) <= full * nr_pages) 3089 return; 3090 3091 cpu_buffer->irq_work.wakeup_full = true; 3092 cpu_buffer->irq_work.full_waiters_pending = false; 3093 /* irq_work_queue() supplies it's own memory barriers */ 3094 irq_work_queue(&cpu_buffer->irq_work.work); 3095 } 3096 3097 #ifdef CONFIG_RING_BUFFER_RECORD_RECURSION 3098 # define do_ring_buffer_record_recursion() \ 3099 do_ftrace_record_recursion(_THIS_IP_, _RET_IP_) 3100 #else 3101 # define do_ring_buffer_record_recursion() do { } while (0) 3102 #endif 3103 3104 /* 3105 * The lock and unlock are done within a preempt disable section. 3106 * The current_context per_cpu variable can only be modified 3107 * by the current task between lock and unlock. But it can 3108 * be modified more than once via an interrupt. To pass this 3109 * information from the lock to the unlock without having to 3110 * access the 'in_interrupt()' functions again (which do show 3111 * a bit of overhead in something as critical as function tracing, 3112 * we use a bitmask trick. 3113 * 3114 * bit 1 = NMI context 3115 * bit 2 = IRQ context 3116 * bit 3 = SoftIRQ context 3117 * bit 4 = normal context. 3118 * 3119 * This works because this is the order of contexts that can 3120 * preempt other contexts. A SoftIRQ never preempts an IRQ 3121 * context. 3122 * 3123 * When the context is determined, the corresponding bit is 3124 * checked and set (if it was set, then a recursion of that context 3125 * happened). 3126 * 3127 * On unlock, we need to clear this bit. To do so, just subtract 3128 * 1 from the current_context and AND it to itself. 3129 * 3130 * (binary) 3131 * 101 - 1 = 100 3132 * 101 & 100 = 100 (clearing bit zero) 3133 * 3134 * 1010 - 1 = 1001 3135 * 1010 & 1001 = 1000 (clearing bit 1) 3136 * 3137 * The least significant bit can be cleared this way, and it 3138 * just so happens that it is the same bit corresponding to 3139 * the current context. 3140 * 3141 * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit 3142 * is set when a recursion is detected at the current context, and if 3143 * the TRANSITION bit is already set, it will fail the recursion. 3144 * This is needed because there's a lag between the changing of 3145 * interrupt context and updating the preempt count. In this case, 3146 * a false positive will be found. To handle this, one extra recursion 3147 * is allowed, and this is done by the TRANSITION bit. If the TRANSITION 3148 * bit is already set, then it is considered a recursion and the function 3149 * ends. Otherwise, the TRANSITION bit is set, and that bit is returned. 3150 * 3151 * On the trace_recursive_unlock(), the TRANSITION bit will be the first 3152 * to be cleared. Even if it wasn't the context that set it. That is, 3153 * if an interrupt comes in while NORMAL bit is set and the ring buffer 3154 * is called before preempt_count() is updated, since the check will 3155 * be on the NORMAL bit, the TRANSITION bit will then be set. If an 3156 * NMI then comes in, it will set the NMI bit, but when the NMI code 3157 * does the trace_recursive_unlock() it will clear the TRANSITION bit 3158 * and leave the NMI bit set. But this is fine, because the interrupt 3159 * code that set the TRANSITION bit will then clear the NMI bit when it 3160 * calls trace_recursive_unlock(). If another NMI comes in, it will 3161 * set the TRANSITION bit and continue. 3162 * 3163 * Note: The TRANSITION bit only handles a single transition between context. 3164 */ 3165 3166 static __always_inline int 3167 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) 3168 { 3169 unsigned int val = cpu_buffer->current_context; 3170 unsigned long pc = preempt_count(); 3171 int bit; 3172 3173 if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) 3174 bit = RB_CTX_NORMAL; 3175 else 3176 bit = pc & NMI_MASK ? RB_CTX_NMI : 3177 pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ; 3178 3179 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { 3180 /* 3181 * It is possible that this was called by transitioning 3182 * between interrupt context, and preempt_count() has not 3183 * been updated yet. In this case, use the TRANSITION bit. 3184 */ 3185 bit = RB_CTX_TRANSITION; 3186 if (val & (1 << (bit + cpu_buffer->nest))) { 3187 do_ring_buffer_record_recursion(); 3188 return 1; 3189 } 3190 } 3191 3192 val |= (1 << (bit + cpu_buffer->nest)); 3193 cpu_buffer->current_context = val; 3194 3195 return 0; 3196 } 3197 3198 static __always_inline void 3199 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) 3200 { 3201 cpu_buffer->current_context &= 3202 cpu_buffer->current_context - (1 << cpu_buffer->nest); 3203 } 3204 3205 /* The recursive locking above uses 5 bits */ 3206 #define NESTED_BITS 5 3207 3208 /** 3209 * ring_buffer_nest_start - Allow to trace while nested 3210 * @buffer: The ring buffer to modify 3211 * 3212 * The ring buffer has a safety mechanism to prevent recursion. 3213 * But there may be a case where a trace needs to be done while 3214 * tracing something else. In this case, calling this function 3215 * will allow this function to nest within a currently active 3216 * ring_buffer_lock_reserve(). 3217 * 3218 * Call this function before calling another ring_buffer_lock_reserve() and 3219 * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit(). 3220 */ 3221 void ring_buffer_nest_start(struct trace_buffer *buffer) 3222 { 3223 struct ring_buffer_per_cpu *cpu_buffer; 3224 int cpu; 3225 3226 /* Enabled by ring_buffer_nest_end() */ 3227 preempt_disable_notrace(); 3228 cpu = raw_smp_processor_id(); 3229 cpu_buffer = buffer->buffers[cpu]; 3230 /* This is the shift value for the above recursive locking */ 3231 cpu_buffer->nest += NESTED_BITS; 3232 } 3233 3234 /** 3235 * ring_buffer_nest_end - Allow to trace while nested 3236 * @buffer: The ring buffer to modify 3237 * 3238 * Must be called after ring_buffer_nest_start() and after the 3239 * ring_buffer_unlock_commit(). 3240 */ 3241 void ring_buffer_nest_end(struct trace_buffer *buffer) 3242 { 3243 struct ring_buffer_per_cpu *cpu_buffer; 3244 int cpu; 3245 3246 /* disabled by ring_buffer_nest_start() */ 3247 cpu = raw_smp_processor_id(); 3248 cpu_buffer = buffer->buffers[cpu]; 3249 /* This is the shift value for the above recursive locking */ 3250 cpu_buffer->nest -= NESTED_BITS; 3251 preempt_enable_notrace(); 3252 } 3253 3254 /** 3255 * ring_buffer_unlock_commit - commit a reserved 3256 * @buffer: The buffer to commit to 3257 * @event: The event pointer to commit. 3258 * 3259 * This commits the data to the ring buffer, and releases any locks held. 3260 * 3261 * Must be paired with ring_buffer_lock_reserve. 3262 */ 3263 int ring_buffer_unlock_commit(struct trace_buffer *buffer, 3264 struct ring_buffer_event *event) 3265 { 3266 struct ring_buffer_per_cpu *cpu_buffer; 3267 int cpu = raw_smp_processor_id(); 3268 3269 cpu_buffer = buffer->buffers[cpu]; 3270 3271 rb_commit(cpu_buffer, event); 3272 3273 rb_wakeups(buffer, cpu_buffer); 3274 3275 trace_recursive_unlock(cpu_buffer); 3276 3277 preempt_enable_notrace(); 3278 3279 return 0; 3280 } 3281 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); 3282 3283 /* Special value to validate all deltas on a page. */ 3284 #define CHECK_FULL_PAGE 1L 3285 3286 #ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS 3287 static void dump_buffer_page(struct buffer_data_page *bpage, 3288 struct rb_event_info *info, 3289 unsigned long tail) 3290 { 3291 struct ring_buffer_event *event; 3292 u64 ts, delta; 3293 int e; 3294 3295 ts = bpage->time_stamp; 3296 pr_warn(" [%lld] PAGE TIME STAMP\n", ts); 3297 3298 for (e = 0; e < tail; e += rb_event_length(event)) { 3299 3300 event = (struct ring_buffer_event *)(bpage->data + e); 3301 3302 switch (event->type_len) { 3303 3304 case RINGBUF_TYPE_TIME_EXTEND: 3305 delta = rb_event_time_stamp(event); 3306 ts += delta; 3307 pr_warn(" [%lld] delta:%lld TIME EXTEND\n", ts, delta); 3308 break; 3309 3310 case RINGBUF_TYPE_TIME_STAMP: 3311 delta = rb_event_time_stamp(event); 3312 ts = delta; 3313 pr_warn(" [%lld] absolute:%lld TIME STAMP\n", ts, delta); 3314 break; 3315 3316 case RINGBUF_TYPE_PADDING: 3317 ts += event->time_delta; 3318 pr_warn(" [%lld] delta:%d PADDING\n", ts, event->time_delta); 3319 break; 3320 3321 case RINGBUF_TYPE_DATA: 3322 ts += event->time_delta; 3323 pr_warn(" [%lld] delta:%d\n", ts, event->time_delta); 3324 break; 3325 3326 default: 3327 break; 3328 } 3329 } 3330 } 3331 3332 static DEFINE_PER_CPU(atomic_t, checking); 3333 static atomic_t ts_dump; 3334 3335 /* 3336 * Check if the current event time stamp matches the deltas on 3337 * the buffer page. 3338 */ 3339 static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, 3340 struct rb_event_info *info, 3341 unsigned long tail) 3342 { 3343 struct ring_buffer_event *event; 3344 struct buffer_data_page *bpage; 3345 u64 ts, delta; 3346 bool full = false; 3347 int e; 3348 3349 bpage = info->tail_page->page; 3350 3351 if (tail == CHECK_FULL_PAGE) { 3352 full = true; 3353 tail = local_read(&bpage->commit); 3354 } else if (info->add_timestamp & 3355 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) { 3356 /* Ignore events with absolute time stamps */ 3357 return; 3358 } 3359 3360 /* 3361 * Do not check the first event (skip possible extends too). 3362 * Also do not check if previous events have not been committed. 3363 */ 3364 if (tail <= 8 || tail > local_read(&bpage->commit)) 3365 return; 3366 3367 /* 3368 * If this interrupted another event, 3369 */ 3370 if (atomic_inc_return(this_cpu_ptr(&checking)) != 1) 3371 goto out; 3372 3373 ts = bpage->time_stamp; 3374 3375 for (e = 0; e < tail; e += rb_event_length(event)) { 3376 3377 event = (struct ring_buffer_event *)(bpage->data + e); 3378 3379 switch (event->type_len) { 3380 3381 case RINGBUF_TYPE_TIME_EXTEND: 3382 delta = rb_event_time_stamp(event); 3383 ts += delta; 3384 break; 3385 3386 case RINGBUF_TYPE_TIME_STAMP: 3387 delta = rb_event_time_stamp(event); 3388 ts = delta; 3389 break; 3390 3391 case RINGBUF_TYPE_PADDING: 3392 if (event->time_delta == 1) 3393 break; 3394 fallthrough; 3395 case RINGBUF_TYPE_DATA: 3396 ts += event->time_delta; 3397 break; 3398 3399 default: 3400 RB_WARN_ON(cpu_buffer, 1); 3401 } 3402 } 3403 if ((full && ts > info->ts) || 3404 (!full && ts + info->delta != info->ts)) { 3405 /* If another report is happening, ignore this one */ 3406 if (atomic_inc_return(&ts_dump) != 1) { 3407 atomic_dec(&ts_dump); 3408 goto out; 3409 } 3410 atomic_inc(&cpu_buffer->record_disabled); 3411 /* There's some cases in boot up that this can happen */ 3412 WARN_ON_ONCE(system_state != SYSTEM_BOOTING); 3413 pr_warn("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s\n", 3414 cpu_buffer->cpu, 3415 ts + info->delta, info->ts, info->delta, 3416 info->before, info->after, 3417 full ? " (full)" : ""); 3418 dump_buffer_page(bpage, info, tail); 3419 atomic_dec(&ts_dump); 3420 /* Do not re-enable checking */ 3421 return; 3422 } 3423 out: 3424 atomic_dec(this_cpu_ptr(&checking)); 3425 } 3426 #else 3427 static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, 3428 struct rb_event_info *info, 3429 unsigned long tail) 3430 { 3431 } 3432 #endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */ 3433 3434 static struct ring_buffer_event * 3435 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 3436 struct rb_event_info *info) 3437 { 3438 struct ring_buffer_event *event; 3439 struct buffer_page *tail_page; 3440 unsigned long tail, write, w; 3441 bool a_ok; 3442 bool b_ok; 3443 3444 /* Don't let the compiler play games with cpu_buffer->tail_page */ 3445 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); 3446 3447 /*A*/ w = local_read(&tail_page->write) & RB_WRITE_MASK; 3448 barrier(); 3449 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); 3450 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3451 barrier(); 3452 info->ts = rb_time_stamp(cpu_buffer->buffer); 3453 3454 if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) { 3455 info->delta = info->ts; 3456 } else { 3457 /* 3458 * If interrupting an event time update, we may need an 3459 * absolute timestamp. 3460 * Don't bother if this is the start of a new page (w == 0). 3461 */ 3462 if (unlikely(!a_ok || !b_ok || (info->before != info->after && w))) { 3463 info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND; 3464 info->length += RB_LEN_TIME_EXTEND; 3465 } else { 3466 info->delta = info->ts - info->after; 3467 if (unlikely(test_time_stamp(info->delta))) { 3468 info->add_timestamp |= RB_ADD_STAMP_EXTEND; 3469 info->length += RB_LEN_TIME_EXTEND; 3470 } 3471 } 3472 } 3473 3474 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts); 3475 3476 /*C*/ write = local_add_return(info->length, &tail_page->write); 3477 3478 /* set write to only the index of the write */ 3479 write &= RB_WRITE_MASK; 3480 3481 tail = write - info->length; 3482 3483 /* See if we shot pass the end of this buffer page */ 3484 if (unlikely(write > BUF_PAGE_SIZE)) { 3485 /* before and after may now different, fix it up*/ 3486 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); 3487 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3488 if (a_ok && b_ok && info->before != info->after) 3489 (void)rb_time_cmpxchg(&cpu_buffer->before_stamp, 3490 info->before, info->after); 3491 if (a_ok && b_ok) 3492 check_buffer(cpu_buffer, info, CHECK_FULL_PAGE); 3493 return rb_move_tail(cpu_buffer, tail, info); 3494 } 3495 3496 if (likely(tail == w)) { 3497 u64 save_before; 3498 bool s_ok; 3499 3500 /* Nothing interrupted us between A and C */ 3501 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts); 3502 barrier(); 3503 /*E*/ s_ok = rb_time_read(&cpu_buffer->before_stamp, &save_before); 3504 RB_WARN_ON(cpu_buffer, !s_ok); 3505 if (likely(!(info->add_timestamp & 3506 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)))) 3507 /* This did not interrupt any time update */ 3508 info->delta = info->ts - info->after; 3509 else 3510 /* Just use full timestamp for interrupting event */ 3511 info->delta = info->ts; 3512 barrier(); 3513 check_buffer(cpu_buffer, info, tail); 3514 if (unlikely(info->ts != save_before)) { 3515 /* SLOW PATH - Interrupted between C and E */ 3516 3517 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3518 RB_WARN_ON(cpu_buffer, !a_ok); 3519 3520 /* Write stamp must only go forward */ 3521 if (save_before > info->after) { 3522 /* 3523 * We do not care about the result, only that 3524 * it gets updated atomically. 3525 */ 3526 (void)rb_time_cmpxchg(&cpu_buffer->write_stamp, 3527 info->after, save_before); 3528 } 3529 } 3530 } else { 3531 u64 ts; 3532 /* SLOW PATH - Interrupted between A and C */ 3533 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3534 /* Was interrupted before here, write_stamp must be valid */ 3535 RB_WARN_ON(cpu_buffer, !a_ok); 3536 ts = rb_time_stamp(cpu_buffer->buffer); 3537 barrier(); 3538 /*E*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) && 3539 info->after < ts && 3540 rb_time_cmpxchg(&cpu_buffer->write_stamp, 3541 info->after, ts)) { 3542 /* Nothing came after this event between C and E */ 3543 info->delta = ts - info->after; 3544 } else { 3545 /* 3546 * Interrupted between C and E: 3547 * Lost the previous events time stamp. Just set the 3548 * delta to zero, and this will be the same time as 3549 * the event this event interrupted. And the events that 3550 * came after this will still be correct (as they would 3551 * have built their delta on the previous event. 3552 */ 3553 info->delta = 0; 3554 } 3555 info->ts = ts; 3556 info->add_timestamp &= ~RB_ADD_STAMP_FORCE; 3557 } 3558 3559 /* 3560 * If this is the first commit on the page, then it has the same 3561 * timestamp as the page itself. 3562 */ 3563 if (unlikely(!tail && !(info->add_timestamp & 3564 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)))) 3565 info->delta = 0; 3566 3567 /* We reserved something on the buffer */ 3568 3569 event = __rb_page_index(tail_page, tail); 3570 rb_update_event(cpu_buffer, event, info); 3571 3572 local_inc(&tail_page->entries); 3573 3574 /* 3575 * If this is the first commit on the page, then update 3576 * its timestamp. 3577 */ 3578 if (unlikely(!tail)) 3579 tail_page->page->time_stamp = info->ts; 3580 3581 /* account for these added bytes */ 3582 local_add(info->length, &cpu_buffer->entries_bytes); 3583 3584 return event; 3585 } 3586 3587 static __always_inline struct ring_buffer_event * 3588 rb_reserve_next_event(struct trace_buffer *buffer, 3589 struct ring_buffer_per_cpu *cpu_buffer, 3590 unsigned long length) 3591 { 3592 struct ring_buffer_event *event; 3593 struct rb_event_info info; 3594 int nr_loops = 0; 3595 int add_ts_default; 3596 3597 rb_start_commit(cpu_buffer); 3598 /* The commit page can not change after this */ 3599 3600 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 3601 /* 3602 * Due to the ability to swap a cpu buffer from a buffer 3603 * it is possible it was swapped before we committed. 3604 * (committing stops a swap). We check for it here and 3605 * if it happened, we have to fail the write. 3606 */ 3607 barrier(); 3608 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { 3609 local_dec(&cpu_buffer->committing); 3610 local_dec(&cpu_buffer->commits); 3611 return NULL; 3612 } 3613 #endif 3614 3615 info.length = rb_calculate_event_length(length); 3616 3617 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { 3618 add_ts_default = RB_ADD_STAMP_ABSOLUTE; 3619 info.length += RB_LEN_TIME_EXTEND; 3620 } else { 3621 add_ts_default = RB_ADD_STAMP_NONE; 3622 } 3623 3624 again: 3625 info.add_timestamp = add_ts_default; 3626 info.delta = 0; 3627 3628 /* 3629 * We allow for interrupts to reenter here and do a trace. 3630 * If one does, it will cause this original code to loop 3631 * back here. Even with heavy interrupts happening, this 3632 * should only happen a few times in a row. If this happens 3633 * 1000 times in a row, there must be either an interrupt 3634 * storm or we have something buggy. 3635 * Bail! 3636 */ 3637 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) 3638 goto out_fail; 3639 3640 event = __rb_reserve_next(cpu_buffer, &info); 3641 3642 if (unlikely(PTR_ERR(event) == -EAGAIN)) { 3643 if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND)) 3644 info.length -= RB_LEN_TIME_EXTEND; 3645 goto again; 3646 } 3647 3648 if (likely(event)) 3649 return event; 3650 out_fail: 3651 rb_end_commit(cpu_buffer); 3652 return NULL; 3653 } 3654 3655 /** 3656 * ring_buffer_lock_reserve - reserve a part of the buffer 3657 * @buffer: the ring buffer to reserve from 3658 * @length: the length of the data to reserve (excluding event header) 3659 * 3660 * Returns a reserved event on the ring buffer to copy directly to. 3661 * The user of this interface will need to get the body to write into 3662 * and can use the ring_buffer_event_data() interface. 3663 * 3664 * The length is the length of the data needed, not the event length 3665 * which also includes the event header. 3666 * 3667 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned. 3668 * If NULL is returned, then nothing has been allocated or locked. 3669 */ 3670 struct ring_buffer_event * 3671 ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length) 3672 { 3673 struct ring_buffer_per_cpu *cpu_buffer; 3674 struct ring_buffer_event *event; 3675 int cpu; 3676 3677 /* If we are tracing schedule, we don't want to recurse */ 3678 preempt_disable_notrace(); 3679 3680 if (unlikely(atomic_read(&buffer->record_disabled))) 3681 goto out; 3682 3683 cpu = raw_smp_processor_id(); 3684 3685 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) 3686 goto out; 3687 3688 cpu_buffer = buffer->buffers[cpu]; 3689 3690 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) 3691 goto out; 3692 3693 if (unlikely(length > BUF_MAX_DATA_SIZE)) 3694 goto out; 3695 3696 if (unlikely(trace_recursive_lock(cpu_buffer))) 3697 goto out; 3698 3699 event = rb_reserve_next_event(buffer, cpu_buffer, length); 3700 if (!event) 3701 goto out_unlock; 3702 3703 return event; 3704 3705 out_unlock: 3706 trace_recursive_unlock(cpu_buffer); 3707 out: 3708 preempt_enable_notrace(); 3709 return NULL; 3710 } 3711 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); 3712 3713 /* 3714 * Decrement the entries to the page that an event is on. 3715 * The event does not even need to exist, only the pointer 3716 * to the page it is on. This may only be called before the commit 3717 * takes place. 3718 */ 3719 static inline void 3720 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, 3721 struct ring_buffer_event *event) 3722 { 3723 unsigned long addr = (unsigned long)event; 3724 struct buffer_page *bpage = cpu_buffer->commit_page; 3725 struct buffer_page *start; 3726 3727 addr &= PAGE_MASK; 3728 3729 /* Do the likely case first */ 3730 if (likely(bpage->page == (void *)addr)) { 3731 local_dec(&bpage->entries); 3732 return; 3733 } 3734 3735 /* 3736 * Because the commit page may be on the reader page we 3737 * start with the next page and check the end loop there. 3738 */ 3739 rb_inc_page(&bpage); 3740 start = bpage; 3741 do { 3742 if (bpage->page == (void *)addr) { 3743 local_dec(&bpage->entries); 3744 return; 3745 } 3746 rb_inc_page(&bpage); 3747 } while (bpage != start); 3748 3749 /* commit not part of this buffer?? */ 3750 RB_WARN_ON(cpu_buffer, 1); 3751 } 3752 3753 /** 3754 * ring_buffer_discard_commit - discard an event that has not been committed 3755 * @buffer: the ring buffer 3756 * @event: non committed event to discard 3757 * 3758 * Sometimes an event that is in the ring buffer needs to be ignored. 3759 * This function lets the user discard an event in the ring buffer 3760 * and then that event will not be read later. 3761 * 3762 * This function only works if it is called before the item has been 3763 * committed. It will try to free the event from the ring buffer 3764 * if another event has not been added behind it. 3765 * 3766 * If another event has been added behind it, it will set the event 3767 * up as discarded, and perform the commit. 3768 * 3769 * If this function is called, do not call ring_buffer_unlock_commit on 3770 * the event. 3771 */ 3772 void ring_buffer_discard_commit(struct trace_buffer *buffer, 3773 struct ring_buffer_event *event) 3774 { 3775 struct ring_buffer_per_cpu *cpu_buffer; 3776 int cpu; 3777 3778 /* The event is discarded regardless */ 3779 rb_event_discard(event); 3780 3781 cpu = smp_processor_id(); 3782 cpu_buffer = buffer->buffers[cpu]; 3783 3784 /* 3785 * This must only be called if the event has not been 3786 * committed yet. Thus we can assume that preemption 3787 * is still disabled. 3788 */ 3789 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); 3790 3791 rb_decrement_entry(cpu_buffer, event); 3792 if (rb_try_to_discard(cpu_buffer, event)) 3793 goto out; 3794 3795 out: 3796 rb_end_commit(cpu_buffer); 3797 3798 trace_recursive_unlock(cpu_buffer); 3799 3800 preempt_enable_notrace(); 3801 3802 } 3803 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); 3804 3805 /** 3806 * ring_buffer_write - write data to the buffer without reserving 3807 * @buffer: The ring buffer to write to. 3808 * @length: The length of the data being written (excluding the event header) 3809 * @data: The data to write to the buffer. 3810 * 3811 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as 3812 * one function. If you already have the data to write to the buffer, it 3813 * may be easier to simply call this function. 3814 * 3815 * Note, like ring_buffer_lock_reserve, the length is the length of the data 3816 * and not the length of the event which would hold the header. 3817 */ 3818 int ring_buffer_write(struct trace_buffer *buffer, 3819 unsigned long length, 3820 void *data) 3821 { 3822 struct ring_buffer_per_cpu *cpu_buffer; 3823 struct ring_buffer_event *event; 3824 void *body; 3825 int ret = -EBUSY; 3826 int cpu; 3827 3828 preempt_disable_notrace(); 3829 3830 if (atomic_read(&buffer->record_disabled)) 3831 goto out; 3832 3833 cpu = raw_smp_processor_id(); 3834 3835 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3836 goto out; 3837 3838 cpu_buffer = buffer->buffers[cpu]; 3839 3840 if (atomic_read(&cpu_buffer->record_disabled)) 3841 goto out; 3842 3843 if (length > BUF_MAX_DATA_SIZE) 3844 goto out; 3845 3846 if (unlikely(trace_recursive_lock(cpu_buffer))) 3847 goto out; 3848 3849 event = rb_reserve_next_event(buffer, cpu_buffer, length); 3850 if (!event) 3851 goto out_unlock; 3852 3853 body = rb_event_data(event); 3854 3855 memcpy(body, data, length); 3856 3857 rb_commit(cpu_buffer, event); 3858 3859 rb_wakeups(buffer, cpu_buffer); 3860 3861 ret = 0; 3862 3863 out_unlock: 3864 trace_recursive_unlock(cpu_buffer); 3865 3866 out: 3867 preempt_enable_notrace(); 3868 3869 return ret; 3870 } 3871 EXPORT_SYMBOL_GPL(ring_buffer_write); 3872 3873 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 3874 { 3875 struct buffer_page *reader = cpu_buffer->reader_page; 3876 struct buffer_page *head = rb_set_head_page(cpu_buffer); 3877 struct buffer_page *commit = cpu_buffer->commit_page; 3878 3879 /* In case of error, head will be NULL */ 3880 if (unlikely(!head)) 3881 return true; 3882 3883 /* Reader should exhaust content in reader page */ 3884 if (reader->read != rb_page_commit(reader)) 3885 return false; 3886 3887 /* 3888 * If writers are committing on the reader page, knowing all 3889 * committed content has been read, the ring buffer is empty. 3890 */ 3891 if (commit == reader) 3892 return true; 3893 3894 /* 3895 * If writers are committing on a page other than reader page 3896 * and head page, there should always be content to read. 3897 */ 3898 if (commit != head) 3899 return false; 3900 3901 /* 3902 * Writers are committing on the head page, we just need 3903 * to care about there're committed data, and the reader will 3904 * swap reader page with head page when it is to read data. 3905 */ 3906 return rb_page_commit(commit) == 0; 3907 } 3908 3909 /** 3910 * ring_buffer_record_disable - stop all writes into the buffer 3911 * @buffer: The ring buffer to stop writes to. 3912 * 3913 * This prevents all writes to the buffer. Any attempt to write 3914 * to the buffer after this will fail and return NULL. 3915 * 3916 * The caller should call synchronize_rcu() after this. 3917 */ 3918 void ring_buffer_record_disable(struct trace_buffer *buffer) 3919 { 3920 atomic_inc(&buffer->record_disabled); 3921 } 3922 EXPORT_SYMBOL_GPL(ring_buffer_record_disable); 3923 3924 /** 3925 * ring_buffer_record_enable - enable writes to the buffer 3926 * @buffer: The ring buffer to enable writes 3927 * 3928 * Note, multiple disables will need the same number of enables 3929 * to truly enable the writing (much like preempt_disable). 3930 */ 3931 void ring_buffer_record_enable(struct trace_buffer *buffer) 3932 { 3933 atomic_dec(&buffer->record_disabled); 3934 } 3935 EXPORT_SYMBOL_GPL(ring_buffer_record_enable); 3936 3937 /** 3938 * ring_buffer_record_off - stop all writes into the buffer 3939 * @buffer: The ring buffer to stop writes to. 3940 * 3941 * This prevents all writes to the buffer. Any attempt to write 3942 * to the buffer after this will fail and return NULL. 3943 * 3944 * This is different than ring_buffer_record_disable() as 3945 * it works like an on/off switch, where as the disable() version 3946 * must be paired with a enable(). 3947 */ 3948 void ring_buffer_record_off(struct trace_buffer *buffer) 3949 { 3950 unsigned int rd; 3951 unsigned int new_rd; 3952 3953 do { 3954 rd = atomic_read(&buffer->record_disabled); 3955 new_rd = rd | RB_BUFFER_OFF; 3956 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); 3957 } 3958 EXPORT_SYMBOL_GPL(ring_buffer_record_off); 3959 3960 /** 3961 * ring_buffer_record_on - restart writes into the buffer 3962 * @buffer: The ring buffer to start writes to. 3963 * 3964 * This enables all writes to the buffer that was disabled by 3965 * ring_buffer_record_off(). 3966 * 3967 * This is different than ring_buffer_record_enable() as 3968 * it works like an on/off switch, where as the enable() version 3969 * must be paired with a disable(). 3970 */ 3971 void ring_buffer_record_on(struct trace_buffer *buffer) 3972 { 3973 unsigned int rd; 3974 unsigned int new_rd; 3975 3976 do { 3977 rd = atomic_read(&buffer->record_disabled); 3978 new_rd = rd & ~RB_BUFFER_OFF; 3979 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); 3980 } 3981 EXPORT_SYMBOL_GPL(ring_buffer_record_on); 3982 3983 /** 3984 * ring_buffer_record_is_on - return true if the ring buffer can write 3985 * @buffer: The ring buffer to see if write is enabled 3986 * 3987 * Returns true if the ring buffer is in a state that it accepts writes. 3988 */ 3989 bool ring_buffer_record_is_on(struct trace_buffer *buffer) 3990 { 3991 return !atomic_read(&buffer->record_disabled); 3992 } 3993 3994 /** 3995 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable 3996 * @buffer: The ring buffer to see if write is set enabled 3997 * 3998 * Returns true if the ring buffer is set writable by ring_buffer_record_on(). 3999 * Note that this does NOT mean it is in a writable state. 4000 * 4001 * It may return true when the ring buffer has been disabled by 4002 * ring_buffer_record_disable(), as that is a temporary disabling of 4003 * the ring buffer. 4004 */ 4005 bool ring_buffer_record_is_set_on(struct trace_buffer *buffer) 4006 { 4007 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); 4008 } 4009 4010 /** 4011 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 4012 * @buffer: The ring buffer to stop writes to. 4013 * @cpu: The CPU buffer to stop 4014 * 4015 * This prevents all writes to the buffer. Any attempt to write 4016 * to the buffer after this will fail and return NULL. 4017 * 4018 * The caller should call synchronize_rcu() after this. 4019 */ 4020 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu) 4021 { 4022 struct ring_buffer_per_cpu *cpu_buffer; 4023 4024 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4025 return; 4026 4027 cpu_buffer = buffer->buffers[cpu]; 4028 atomic_inc(&cpu_buffer->record_disabled); 4029 } 4030 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); 4031 4032 /** 4033 * ring_buffer_record_enable_cpu - enable writes to the buffer 4034 * @buffer: The ring buffer to enable writes 4035 * @cpu: The CPU to enable. 4036 * 4037 * Note, multiple disables will need the same number of enables 4038 * to truly enable the writing (much like preempt_disable). 4039 */ 4040 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu) 4041 { 4042 struct ring_buffer_per_cpu *cpu_buffer; 4043 4044 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4045 return; 4046 4047 cpu_buffer = buffer->buffers[cpu]; 4048 atomic_dec(&cpu_buffer->record_disabled); 4049 } 4050 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); 4051 4052 /* 4053 * The total entries in the ring buffer is the running counter 4054 * of entries entered into the ring buffer, minus the sum of 4055 * the entries read from the ring buffer and the number of 4056 * entries that were overwritten. 4057 */ 4058 static inline unsigned long 4059 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) 4060 { 4061 return local_read(&cpu_buffer->entries) - 4062 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); 4063 } 4064 4065 /** 4066 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer 4067 * @buffer: The ring buffer 4068 * @cpu: The per CPU buffer to read from. 4069 */ 4070 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu) 4071 { 4072 unsigned long flags; 4073 struct ring_buffer_per_cpu *cpu_buffer; 4074 struct buffer_page *bpage; 4075 u64 ret = 0; 4076 4077 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4078 return 0; 4079 4080 cpu_buffer = buffer->buffers[cpu]; 4081 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4082 /* 4083 * if the tail is on reader_page, oldest time stamp is on the reader 4084 * page 4085 */ 4086 if (cpu_buffer->tail_page == cpu_buffer->reader_page) 4087 bpage = cpu_buffer->reader_page; 4088 else 4089 bpage = rb_set_head_page(cpu_buffer); 4090 if (bpage) 4091 ret = bpage->page->time_stamp; 4092 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4093 4094 return ret; 4095 } 4096 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts); 4097 4098 /** 4099 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer 4100 * @buffer: The ring buffer 4101 * @cpu: The per CPU buffer to read from. 4102 */ 4103 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu) 4104 { 4105 struct ring_buffer_per_cpu *cpu_buffer; 4106 unsigned long ret; 4107 4108 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4109 return 0; 4110 4111 cpu_buffer = buffer->buffers[cpu]; 4112 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; 4113 4114 return ret; 4115 } 4116 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu); 4117 4118 /** 4119 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer 4120 * @buffer: The ring buffer 4121 * @cpu: The per CPU buffer to get the entries from. 4122 */ 4123 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu) 4124 { 4125 struct ring_buffer_per_cpu *cpu_buffer; 4126 4127 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4128 return 0; 4129 4130 cpu_buffer = buffer->buffers[cpu]; 4131 4132 return rb_num_of_entries(cpu_buffer); 4133 } 4134 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); 4135 4136 /** 4137 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring 4138 * buffer wrapping around (only if RB_FL_OVERWRITE is on). 4139 * @buffer: The ring buffer 4140 * @cpu: The per CPU buffer to get the number of overruns from 4141 */ 4142 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu) 4143 { 4144 struct ring_buffer_per_cpu *cpu_buffer; 4145 unsigned long ret; 4146 4147 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4148 return 0; 4149 4150 cpu_buffer = buffer->buffers[cpu]; 4151 ret = local_read(&cpu_buffer->overrun); 4152 4153 return ret; 4154 } 4155 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); 4156 4157 /** 4158 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by 4159 * commits failing due to the buffer wrapping around while there are uncommitted 4160 * events, such as during an interrupt storm. 4161 * @buffer: The ring buffer 4162 * @cpu: The per CPU buffer to get the number of overruns from 4163 */ 4164 unsigned long 4165 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu) 4166 { 4167 struct ring_buffer_per_cpu *cpu_buffer; 4168 unsigned long ret; 4169 4170 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4171 return 0; 4172 4173 cpu_buffer = buffer->buffers[cpu]; 4174 ret = local_read(&cpu_buffer->commit_overrun); 4175 4176 return ret; 4177 } 4178 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); 4179 4180 /** 4181 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by 4182 * the ring buffer filling up (only if RB_FL_OVERWRITE is off). 4183 * @buffer: The ring buffer 4184 * @cpu: The per CPU buffer to get the number of overruns from 4185 */ 4186 unsigned long 4187 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu) 4188 { 4189 struct ring_buffer_per_cpu *cpu_buffer; 4190 unsigned long ret; 4191 4192 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4193 return 0; 4194 4195 cpu_buffer = buffer->buffers[cpu]; 4196 ret = local_read(&cpu_buffer->dropped_events); 4197 4198 return ret; 4199 } 4200 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu); 4201 4202 /** 4203 * ring_buffer_read_events_cpu - get the number of events successfully read 4204 * @buffer: The ring buffer 4205 * @cpu: The per CPU buffer to get the number of events read 4206 */ 4207 unsigned long 4208 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu) 4209 { 4210 struct ring_buffer_per_cpu *cpu_buffer; 4211 4212 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4213 return 0; 4214 4215 cpu_buffer = buffer->buffers[cpu]; 4216 return cpu_buffer->read; 4217 } 4218 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu); 4219 4220 /** 4221 * ring_buffer_entries - get the number of entries in a buffer 4222 * @buffer: The ring buffer 4223 * 4224 * Returns the total number of entries in the ring buffer 4225 * (all CPU entries) 4226 */ 4227 unsigned long ring_buffer_entries(struct trace_buffer *buffer) 4228 { 4229 struct ring_buffer_per_cpu *cpu_buffer; 4230 unsigned long entries = 0; 4231 int cpu; 4232 4233 /* if you care about this being correct, lock the buffer */ 4234 for_each_buffer_cpu(buffer, cpu) { 4235 cpu_buffer = buffer->buffers[cpu]; 4236 entries += rb_num_of_entries(cpu_buffer); 4237 } 4238 4239 return entries; 4240 } 4241 EXPORT_SYMBOL_GPL(ring_buffer_entries); 4242 4243 /** 4244 * ring_buffer_overruns - get the number of overruns in buffer 4245 * @buffer: The ring buffer 4246 * 4247 * Returns the total number of overruns in the ring buffer 4248 * (all CPU entries) 4249 */ 4250 unsigned long ring_buffer_overruns(struct trace_buffer *buffer) 4251 { 4252 struct ring_buffer_per_cpu *cpu_buffer; 4253 unsigned long overruns = 0; 4254 int cpu; 4255 4256 /* if you care about this being correct, lock the buffer */ 4257 for_each_buffer_cpu(buffer, cpu) { 4258 cpu_buffer = buffer->buffers[cpu]; 4259 overruns += local_read(&cpu_buffer->overrun); 4260 } 4261 4262 return overruns; 4263 } 4264 EXPORT_SYMBOL_GPL(ring_buffer_overruns); 4265 4266 static void rb_iter_reset(struct ring_buffer_iter *iter) 4267 { 4268 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 4269 4270 /* Iterator usage is expected to have record disabled */ 4271 iter->head_page = cpu_buffer->reader_page; 4272 iter->head = cpu_buffer->reader_page->read; 4273 iter->next_event = iter->head; 4274 4275 iter->cache_reader_page = iter->head_page; 4276 iter->cache_read = cpu_buffer->read; 4277 4278 if (iter->head) { 4279 iter->read_stamp = cpu_buffer->read_stamp; 4280 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp; 4281 } else { 4282 iter->read_stamp = iter->head_page->page->time_stamp; 4283 iter->page_stamp = iter->read_stamp; 4284 } 4285 } 4286 4287 /** 4288 * ring_buffer_iter_reset - reset an iterator 4289 * @iter: The iterator to reset 4290 * 4291 * Resets the iterator, so that it will start from the beginning 4292 * again. 4293 */ 4294 void ring_buffer_iter_reset(struct ring_buffer_iter *iter) 4295 { 4296 struct ring_buffer_per_cpu *cpu_buffer; 4297 unsigned long flags; 4298 4299 if (!iter) 4300 return; 4301 4302 cpu_buffer = iter->cpu_buffer; 4303 4304 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4305 rb_iter_reset(iter); 4306 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4307 } 4308 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); 4309 4310 /** 4311 * ring_buffer_iter_empty - check if an iterator has no more to read 4312 * @iter: The iterator to check 4313 */ 4314 int ring_buffer_iter_empty(struct ring_buffer_iter *iter) 4315 { 4316 struct ring_buffer_per_cpu *cpu_buffer; 4317 struct buffer_page *reader; 4318 struct buffer_page *head_page; 4319 struct buffer_page *commit_page; 4320 struct buffer_page *curr_commit_page; 4321 unsigned commit; 4322 u64 curr_commit_ts; 4323 u64 commit_ts; 4324 4325 cpu_buffer = iter->cpu_buffer; 4326 reader = cpu_buffer->reader_page; 4327 head_page = cpu_buffer->head_page; 4328 commit_page = cpu_buffer->commit_page; 4329 commit_ts = commit_page->page->time_stamp; 4330 4331 /* 4332 * When the writer goes across pages, it issues a cmpxchg which 4333 * is a mb(), which will synchronize with the rmb here. 4334 * (see rb_tail_page_update()) 4335 */ 4336 smp_rmb(); 4337 commit = rb_page_commit(commit_page); 4338 /* We want to make sure that the commit page doesn't change */ 4339 smp_rmb(); 4340 4341 /* Make sure commit page didn't change */ 4342 curr_commit_page = READ_ONCE(cpu_buffer->commit_page); 4343 curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp); 4344 4345 /* If the commit page changed, then there's more data */ 4346 if (curr_commit_page != commit_page || 4347 curr_commit_ts != commit_ts) 4348 return 0; 4349 4350 /* Still racy, as it may return a false positive, but that's OK */ 4351 return ((iter->head_page == commit_page && iter->head >= commit) || 4352 (iter->head_page == reader && commit_page == head_page && 4353 head_page->read == commit && 4354 iter->head == rb_page_commit(cpu_buffer->reader_page))); 4355 } 4356 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); 4357 4358 static void 4359 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, 4360 struct ring_buffer_event *event) 4361 { 4362 u64 delta; 4363 4364 switch (event->type_len) { 4365 case RINGBUF_TYPE_PADDING: 4366 return; 4367 4368 case RINGBUF_TYPE_TIME_EXTEND: 4369 delta = rb_event_time_stamp(event); 4370 cpu_buffer->read_stamp += delta; 4371 return; 4372 4373 case RINGBUF_TYPE_TIME_STAMP: 4374 delta = rb_event_time_stamp(event); 4375 cpu_buffer->read_stamp = delta; 4376 return; 4377 4378 case RINGBUF_TYPE_DATA: 4379 cpu_buffer->read_stamp += event->time_delta; 4380 return; 4381 4382 default: 4383 RB_WARN_ON(cpu_buffer, 1); 4384 } 4385 return; 4386 } 4387 4388 static void 4389 rb_update_iter_read_stamp(struct ring_buffer_iter *iter, 4390 struct ring_buffer_event *event) 4391 { 4392 u64 delta; 4393 4394 switch (event->type_len) { 4395 case RINGBUF_TYPE_PADDING: 4396 return; 4397 4398 case RINGBUF_TYPE_TIME_EXTEND: 4399 delta = rb_event_time_stamp(event); 4400 iter->read_stamp += delta; 4401 return; 4402 4403 case RINGBUF_TYPE_TIME_STAMP: 4404 delta = rb_event_time_stamp(event); 4405 iter->read_stamp = delta; 4406 return; 4407 4408 case RINGBUF_TYPE_DATA: 4409 iter->read_stamp += event->time_delta; 4410 return; 4411 4412 default: 4413 RB_WARN_ON(iter->cpu_buffer, 1); 4414 } 4415 return; 4416 } 4417 4418 static struct buffer_page * 4419 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 4420 { 4421 struct buffer_page *reader = NULL; 4422 unsigned long overwrite; 4423 unsigned long flags; 4424 int nr_loops = 0; 4425 int ret; 4426 4427 local_irq_save(flags); 4428 arch_spin_lock(&cpu_buffer->lock); 4429 4430 again: 4431 /* 4432 * This should normally only loop twice. But because the 4433 * start of the reader inserts an empty page, it causes 4434 * a case where we will loop three times. There should be no 4435 * reason to loop four times (that I know of). 4436 */ 4437 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { 4438 reader = NULL; 4439 goto out; 4440 } 4441 4442 reader = cpu_buffer->reader_page; 4443 4444 /* If there's more to read, return this page */ 4445 if (cpu_buffer->reader_page->read < rb_page_size(reader)) 4446 goto out; 4447 4448 /* Never should we have an index greater than the size */ 4449 if (RB_WARN_ON(cpu_buffer, 4450 cpu_buffer->reader_page->read > rb_page_size(reader))) 4451 goto out; 4452 4453 /* check if we caught up to the tail */ 4454 reader = NULL; 4455 if (cpu_buffer->commit_page == cpu_buffer->reader_page) 4456 goto out; 4457 4458 /* Don't bother swapping if the ring buffer is empty */ 4459 if (rb_num_of_entries(cpu_buffer) == 0) 4460 goto out; 4461 4462 /* 4463 * Reset the reader page to size zero. 4464 */ 4465 local_set(&cpu_buffer->reader_page->write, 0); 4466 local_set(&cpu_buffer->reader_page->entries, 0); 4467 local_set(&cpu_buffer->reader_page->page->commit, 0); 4468 cpu_buffer->reader_page->real_end = 0; 4469 4470 spin: 4471 /* 4472 * Splice the empty reader page into the list around the head. 4473 */ 4474 reader = rb_set_head_page(cpu_buffer); 4475 if (!reader) 4476 goto out; 4477 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); 4478 cpu_buffer->reader_page->list.prev = reader->list.prev; 4479 4480 /* 4481 * cpu_buffer->pages just needs to point to the buffer, it 4482 * has no specific buffer page to point to. Lets move it out 4483 * of our way so we don't accidentally swap it. 4484 */ 4485 cpu_buffer->pages = reader->list.prev; 4486 4487 /* The reader page will be pointing to the new head */ 4488 rb_set_list_to_head(&cpu_buffer->reader_page->list); 4489 4490 /* 4491 * We want to make sure we read the overruns after we set up our 4492 * pointers to the next object. The writer side does a 4493 * cmpxchg to cross pages which acts as the mb on the writer 4494 * side. Note, the reader will constantly fail the swap 4495 * while the writer is updating the pointers, so this 4496 * guarantees that the overwrite recorded here is the one we 4497 * want to compare with the last_overrun. 4498 */ 4499 smp_mb(); 4500 overwrite = local_read(&(cpu_buffer->overrun)); 4501 4502 /* 4503 * Here's the tricky part. 4504 * 4505 * We need to move the pointer past the header page. 4506 * But we can only do that if a writer is not currently 4507 * moving it. The page before the header page has the 4508 * flag bit '1' set if it is pointing to the page we want. 4509 * but if the writer is in the process of moving it 4510 * than it will be '2' or already moved '0'. 4511 */ 4512 4513 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); 4514 4515 /* 4516 * If we did not convert it, then we must try again. 4517 */ 4518 if (!ret) 4519 goto spin; 4520 4521 /* 4522 * Yay! We succeeded in replacing the page. 4523 * 4524 * Now make the new head point back to the reader page. 4525 */ 4526 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; 4527 rb_inc_page(&cpu_buffer->head_page); 4528 4529 local_inc(&cpu_buffer->pages_read); 4530 4531 /* Finally update the reader page to the new head */ 4532 cpu_buffer->reader_page = reader; 4533 cpu_buffer->reader_page->read = 0; 4534 4535 if (overwrite != cpu_buffer->last_overrun) { 4536 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; 4537 cpu_buffer->last_overrun = overwrite; 4538 } 4539 4540 goto again; 4541 4542 out: 4543 /* Update the read_stamp on the first event */ 4544 if (reader && reader->read == 0) 4545 cpu_buffer->read_stamp = reader->page->time_stamp; 4546 4547 arch_spin_unlock(&cpu_buffer->lock); 4548 local_irq_restore(flags); 4549 4550 return reader; 4551 } 4552 4553 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) 4554 { 4555 struct ring_buffer_event *event; 4556 struct buffer_page *reader; 4557 unsigned length; 4558 4559 reader = rb_get_reader_page(cpu_buffer); 4560 4561 /* This function should not be called when buffer is empty */ 4562 if (RB_WARN_ON(cpu_buffer, !reader)) 4563 return; 4564 4565 event = rb_reader_event(cpu_buffer); 4566 4567 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 4568 cpu_buffer->read++; 4569 4570 rb_update_read_stamp(cpu_buffer, event); 4571 4572 length = rb_event_length(event); 4573 cpu_buffer->reader_page->read += length; 4574 } 4575 4576 static void rb_advance_iter(struct ring_buffer_iter *iter) 4577 { 4578 struct ring_buffer_per_cpu *cpu_buffer; 4579 4580 cpu_buffer = iter->cpu_buffer; 4581 4582 /* If head == next_event then we need to jump to the next event */ 4583 if (iter->head == iter->next_event) { 4584 /* If the event gets overwritten again, there's nothing to do */ 4585 if (rb_iter_head_event(iter) == NULL) 4586 return; 4587 } 4588 4589 iter->head = iter->next_event; 4590 4591 /* 4592 * Check if we are at the end of the buffer. 4593 */ 4594 if (iter->next_event >= rb_page_size(iter->head_page)) { 4595 /* discarded commits can make the page empty */ 4596 if (iter->head_page == cpu_buffer->commit_page) 4597 return; 4598 rb_inc_iter(iter); 4599 return; 4600 } 4601 4602 rb_update_iter_read_stamp(iter, iter->event); 4603 } 4604 4605 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) 4606 { 4607 return cpu_buffer->lost_events; 4608 } 4609 4610 static struct ring_buffer_event * 4611 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, 4612 unsigned long *lost_events) 4613 { 4614 struct ring_buffer_event *event; 4615 struct buffer_page *reader; 4616 int nr_loops = 0; 4617 4618 if (ts) 4619 *ts = 0; 4620 again: 4621 /* 4622 * We repeat when a time extend is encountered. 4623 * Since the time extend is always attached to a data event, 4624 * we should never loop more than once. 4625 * (We never hit the following condition more than twice). 4626 */ 4627 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) 4628 return NULL; 4629 4630 reader = rb_get_reader_page(cpu_buffer); 4631 if (!reader) 4632 return NULL; 4633 4634 event = rb_reader_event(cpu_buffer); 4635 4636 switch (event->type_len) { 4637 case RINGBUF_TYPE_PADDING: 4638 if (rb_null_event(event)) 4639 RB_WARN_ON(cpu_buffer, 1); 4640 /* 4641 * Because the writer could be discarding every 4642 * event it creates (which would probably be bad) 4643 * if we were to go back to "again" then we may never 4644 * catch up, and will trigger the warn on, or lock 4645 * the box. Return the padding, and we will release 4646 * the current locks, and try again. 4647 */ 4648 return event; 4649 4650 case RINGBUF_TYPE_TIME_EXTEND: 4651 /* Internal data, OK to advance */ 4652 rb_advance_reader(cpu_buffer); 4653 goto again; 4654 4655 case RINGBUF_TYPE_TIME_STAMP: 4656 if (ts) { 4657 *ts = rb_event_time_stamp(event); 4658 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 4659 cpu_buffer->cpu, ts); 4660 } 4661 /* Internal data, OK to advance */ 4662 rb_advance_reader(cpu_buffer); 4663 goto again; 4664 4665 case RINGBUF_TYPE_DATA: 4666 if (ts && !(*ts)) { 4667 *ts = cpu_buffer->read_stamp + event->time_delta; 4668 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 4669 cpu_buffer->cpu, ts); 4670 } 4671 if (lost_events) 4672 *lost_events = rb_lost_events(cpu_buffer); 4673 return event; 4674 4675 default: 4676 RB_WARN_ON(cpu_buffer, 1); 4677 } 4678 4679 return NULL; 4680 } 4681 EXPORT_SYMBOL_GPL(ring_buffer_peek); 4682 4683 static struct ring_buffer_event * 4684 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 4685 { 4686 struct trace_buffer *buffer; 4687 struct ring_buffer_per_cpu *cpu_buffer; 4688 struct ring_buffer_event *event; 4689 int nr_loops = 0; 4690 4691 if (ts) 4692 *ts = 0; 4693 4694 cpu_buffer = iter->cpu_buffer; 4695 buffer = cpu_buffer->buffer; 4696 4697 /* 4698 * Check if someone performed a consuming read to 4699 * the buffer. A consuming read invalidates the iterator 4700 * and we need to reset the iterator in this case. 4701 */ 4702 if (unlikely(iter->cache_read != cpu_buffer->read || 4703 iter->cache_reader_page != cpu_buffer->reader_page)) 4704 rb_iter_reset(iter); 4705 4706 again: 4707 if (ring_buffer_iter_empty(iter)) 4708 return NULL; 4709 4710 /* 4711 * As the writer can mess with what the iterator is trying 4712 * to read, just give up if we fail to get an event after 4713 * three tries. The iterator is not as reliable when reading 4714 * the ring buffer with an active write as the consumer is. 4715 * Do not warn if the three failures is reached. 4716 */ 4717 if (++nr_loops > 3) 4718 return NULL; 4719 4720 if (rb_per_cpu_empty(cpu_buffer)) 4721 return NULL; 4722 4723 if (iter->head >= rb_page_size(iter->head_page)) { 4724 rb_inc_iter(iter); 4725 goto again; 4726 } 4727 4728 event = rb_iter_head_event(iter); 4729 if (!event) 4730 goto again; 4731 4732 switch (event->type_len) { 4733 case RINGBUF_TYPE_PADDING: 4734 if (rb_null_event(event)) { 4735 rb_inc_iter(iter); 4736 goto again; 4737 } 4738 rb_advance_iter(iter); 4739 return event; 4740 4741 case RINGBUF_TYPE_TIME_EXTEND: 4742 /* Internal data, OK to advance */ 4743 rb_advance_iter(iter); 4744 goto again; 4745 4746 case RINGBUF_TYPE_TIME_STAMP: 4747 if (ts) { 4748 *ts = rb_event_time_stamp(event); 4749 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 4750 cpu_buffer->cpu, ts); 4751 } 4752 /* Internal data, OK to advance */ 4753 rb_advance_iter(iter); 4754 goto again; 4755 4756 case RINGBUF_TYPE_DATA: 4757 if (ts && !(*ts)) { 4758 *ts = iter->read_stamp + event->time_delta; 4759 ring_buffer_normalize_time_stamp(buffer, 4760 cpu_buffer->cpu, ts); 4761 } 4762 return event; 4763 4764 default: 4765 RB_WARN_ON(cpu_buffer, 1); 4766 } 4767 4768 return NULL; 4769 } 4770 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); 4771 4772 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) 4773 { 4774 if (likely(!in_nmi())) { 4775 raw_spin_lock(&cpu_buffer->reader_lock); 4776 return true; 4777 } 4778 4779 /* 4780 * If an NMI die dumps out the content of the ring buffer 4781 * trylock must be used to prevent a deadlock if the NMI 4782 * preempted a task that holds the ring buffer locks. If 4783 * we get the lock then all is fine, if not, then continue 4784 * to do the read, but this can corrupt the ring buffer, 4785 * so it must be permanently disabled from future writes. 4786 * Reading from NMI is a oneshot deal. 4787 */ 4788 if (raw_spin_trylock(&cpu_buffer->reader_lock)) 4789 return true; 4790 4791 /* Continue without locking, but disable the ring buffer */ 4792 atomic_inc(&cpu_buffer->record_disabled); 4793 return false; 4794 } 4795 4796 static inline void 4797 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) 4798 { 4799 if (likely(locked)) 4800 raw_spin_unlock(&cpu_buffer->reader_lock); 4801 return; 4802 } 4803 4804 /** 4805 * ring_buffer_peek - peek at the next event to be read 4806 * @buffer: The ring buffer to read 4807 * @cpu: The cpu to peak at 4808 * @ts: The timestamp counter of this event. 4809 * @lost_events: a variable to store if events were lost (may be NULL) 4810 * 4811 * This will return the event that will be read next, but does 4812 * not consume the data. 4813 */ 4814 struct ring_buffer_event * 4815 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, 4816 unsigned long *lost_events) 4817 { 4818 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 4819 struct ring_buffer_event *event; 4820 unsigned long flags; 4821 bool dolock; 4822 4823 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4824 return NULL; 4825 4826 again: 4827 local_irq_save(flags); 4828 dolock = rb_reader_lock(cpu_buffer); 4829 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 4830 if (event && event->type_len == RINGBUF_TYPE_PADDING) 4831 rb_advance_reader(cpu_buffer); 4832 rb_reader_unlock(cpu_buffer, dolock); 4833 local_irq_restore(flags); 4834 4835 if (event && event->type_len == RINGBUF_TYPE_PADDING) 4836 goto again; 4837 4838 return event; 4839 } 4840 4841 /** ring_buffer_iter_dropped - report if there are dropped events 4842 * @iter: The ring buffer iterator 4843 * 4844 * Returns true if there was dropped events since the last peek. 4845 */ 4846 bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter) 4847 { 4848 bool ret = iter->missed_events != 0; 4849 4850 iter->missed_events = 0; 4851 return ret; 4852 } 4853 EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped); 4854 4855 /** 4856 * ring_buffer_iter_peek - peek at the next event to be read 4857 * @iter: The ring buffer iterator 4858 * @ts: The timestamp counter of this event. 4859 * 4860 * This will return the event that will be read next, but does 4861 * not increment the iterator. 4862 */ 4863 struct ring_buffer_event * 4864 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 4865 { 4866 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 4867 struct ring_buffer_event *event; 4868 unsigned long flags; 4869 4870 again: 4871 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4872 event = rb_iter_peek(iter, ts); 4873 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4874 4875 if (event && event->type_len == RINGBUF_TYPE_PADDING) 4876 goto again; 4877 4878 return event; 4879 } 4880 4881 /** 4882 * ring_buffer_consume - return an event and consume it 4883 * @buffer: The ring buffer to get the next event from 4884 * @cpu: the cpu to read the buffer from 4885 * @ts: a variable to store the timestamp (may be NULL) 4886 * @lost_events: a variable to store if events were lost (may be NULL) 4887 * 4888 * Returns the next event in the ring buffer, and that event is consumed. 4889 * Meaning, that sequential reads will keep returning a different event, 4890 * and eventually empty the ring buffer if the producer is slower. 4891 */ 4892 struct ring_buffer_event * 4893 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, 4894 unsigned long *lost_events) 4895 { 4896 struct ring_buffer_per_cpu *cpu_buffer; 4897 struct ring_buffer_event *event = NULL; 4898 unsigned long flags; 4899 bool dolock; 4900 4901 again: 4902 /* might be called in atomic */ 4903 preempt_disable(); 4904 4905 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4906 goto out; 4907 4908 cpu_buffer = buffer->buffers[cpu]; 4909 local_irq_save(flags); 4910 dolock = rb_reader_lock(cpu_buffer); 4911 4912 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 4913 if (event) { 4914 cpu_buffer->lost_events = 0; 4915 rb_advance_reader(cpu_buffer); 4916 } 4917 4918 rb_reader_unlock(cpu_buffer, dolock); 4919 local_irq_restore(flags); 4920 4921 out: 4922 preempt_enable(); 4923 4924 if (event && event->type_len == RINGBUF_TYPE_PADDING) 4925 goto again; 4926 4927 return event; 4928 } 4929 EXPORT_SYMBOL_GPL(ring_buffer_consume); 4930 4931 /** 4932 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer 4933 * @buffer: The ring buffer to read from 4934 * @cpu: The cpu buffer to iterate over 4935 * @flags: gfp flags to use for memory allocation 4936 * 4937 * This performs the initial preparations necessary to iterate 4938 * through the buffer. Memory is allocated, buffer recording 4939 * is disabled, and the iterator pointer is returned to the caller. 4940 * 4941 * Disabling buffer recording prevents the reading from being 4942 * corrupted. This is not a consuming read, so a producer is not 4943 * expected. 4944 * 4945 * After a sequence of ring_buffer_read_prepare calls, the user is 4946 * expected to make at least one call to ring_buffer_read_prepare_sync. 4947 * Afterwards, ring_buffer_read_start is invoked to get things going 4948 * for real. 4949 * 4950 * This overall must be paired with ring_buffer_read_finish. 4951 */ 4952 struct ring_buffer_iter * 4953 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags) 4954 { 4955 struct ring_buffer_per_cpu *cpu_buffer; 4956 struct ring_buffer_iter *iter; 4957 4958 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4959 return NULL; 4960 4961 iter = kzalloc(sizeof(*iter), flags); 4962 if (!iter) 4963 return NULL; 4964 4965 iter->event = kmalloc(BUF_MAX_DATA_SIZE, flags); 4966 if (!iter->event) { 4967 kfree(iter); 4968 return NULL; 4969 } 4970 4971 cpu_buffer = buffer->buffers[cpu]; 4972 4973 iter->cpu_buffer = cpu_buffer; 4974 4975 atomic_inc(&cpu_buffer->resize_disabled); 4976 4977 return iter; 4978 } 4979 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare); 4980 4981 /** 4982 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls 4983 * 4984 * All previously invoked ring_buffer_read_prepare calls to prepare 4985 * iterators will be synchronized. Afterwards, read_buffer_read_start 4986 * calls on those iterators are allowed. 4987 */ 4988 void 4989 ring_buffer_read_prepare_sync(void) 4990 { 4991 synchronize_rcu(); 4992 } 4993 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); 4994 4995 /** 4996 * ring_buffer_read_start - start a non consuming read of the buffer 4997 * @iter: The iterator returned by ring_buffer_read_prepare 4998 * 4999 * This finalizes the startup of an iteration through the buffer. 5000 * The iterator comes from a call to ring_buffer_read_prepare and 5001 * an intervening ring_buffer_read_prepare_sync must have been 5002 * performed. 5003 * 5004 * Must be paired with ring_buffer_read_finish. 5005 */ 5006 void 5007 ring_buffer_read_start(struct ring_buffer_iter *iter) 5008 { 5009 struct ring_buffer_per_cpu *cpu_buffer; 5010 unsigned long flags; 5011 5012 if (!iter) 5013 return; 5014 5015 cpu_buffer = iter->cpu_buffer; 5016 5017 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5018 arch_spin_lock(&cpu_buffer->lock); 5019 rb_iter_reset(iter); 5020 arch_spin_unlock(&cpu_buffer->lock); 5021 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5022 } 5023 EXPORT_SYMBOL_GPL(ring_buffer_read_start); 5024 5025 /** 5026 * ring_buffer_read_finish - finish reading the iterator of the buffer 5027 * @iter: The iterator retrieved by ring_buffer_start 5028 * 5029 * This re-enables the recording to the buffer, and frees the 5030 * iterator. 5031 */ 5032 void 5033 ring_buffer_read_finish(struct ring_buffer_iter *iter) 5034 { 5035 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5036 unsigned long flags; 5037 5038 /* 5039 * Ring buffer is disabled from recording, here's a good place 5040 * to check the integrity of the ring buffer. 5041 * Must prevent readers from trying to read, as the check 5042 * clears the HEAD page and readers require it. 5043 */ 5044 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5045 rb_check_pages(cpu_buffer); 5046 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5047 5048 atomic_dec(&cpu_buffer->resize_disabled); 5049 kfree(iter->event); 5050 kfree(iter); 5051 } 5052 EXPORT_SYMBOL_GPL(ring_buffer_read_finish); 5053 5054 /** 5055 * ring_buffer_iter_advance - advance the iterator to the next location 5056 * @iter: The ring buffer iterator 5057 * 5058 * Move the location of the iterator such that the next read will 5059 * be the next location of the iterator. 5060 */ 5061 void ring_buffer_iter_advance(struct ring_buffer_iter *iter) 5062 { 5063 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5064 unsigned long flags; 5065 5066 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5067 5068 rb_advance_iter(iter); 5069 5070 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5071 } 5072 EXPORT_SYMBOL_GPL(ring_buffer_iter_advance); 5073 5074 /** 5075 * ring_buffer_size - return the size of the ring buffer (in bytes) 5076 * @buffer: The ring buffer. 5077 * @cpu: The CPU to get ring buffer size from. 5078 */ 5079 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu) 5080 { 5081 /* 5082 * Earlier, this method returned 5083 * BUF_PAGE_SIZE * buffer->nr_pages 5084 * Since the nr_pages field is now removed, we have converted this to 5085 * return the per cpu buffer value. 5086 */ 5087 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5088 return 0; 5089 5090 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; 5091 } 5092 EXPORT_SYMBOL_GPL(ring_buffer_size); 5093 5094 static void 5095 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 5096 { 5097 rb_head_page_deactivate(cpu_buffer); 5098 5099 cpu_buffer->head_page 5100 = list_entry(cpu_buffer->pages, struct buffer_page, list); 5101 local_set(&cpu_buffer->head_page->write, 0); 5102 local_set(&cpu_buffer->head_page->entries, 0); 5103 local_set(&cpu_buffer->head_page->page->commit, 0); 5104 5105 cpu_buffer->head_page->read = 0; 5106 5107 cpu_buffer->tail_page = cpu_buffer->head_page; 5108 cpu_buffer->commit_page = cpu_buffer->head_page; 5109 5110 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 5111 INIT_LIST_HEAD(&cpu_buffer->new_pages); 5112 local_set(&cpu_buffer->reader_page->write, 0); 5113 local_set(&cpu_buffer->reader_page->entries, 0); 5114 local_set(&cpu_buffer->reader_page->page->commit, 0); 5115 cpu_buffer->reader_page->read = 0; 5116 5117 local_set(&cpu_buffer->entries_bytes, 0); 5118 local_set(&cpu_buffer->overrun, 0); 5119 local_set(&cpu_buffer->commit_overrun, 0); 5120 local_set(&cpu_buffer->dropped_events, 0); 5121 local_set(&cpu_buffer->entries, 0); 5122 local_set(&cpu_buffer->committing, 0); 5123 local_set(&cpu_buffer->commits, 0); 5124 local_set(&cpu_buffer->pages_touched, 0); 5125 local_set(&cpu_buffer->pages_read, 0); 5126 cpu_buffer->last_pages_touch = 0; 5127 cpu_buffer->shortest_full = 0; 5128 cpu_buffer->read = 0; 5129 cpu_buffer->read_bytes = 0; 5130 5131 rb_time_set(&cpu_buffer->write_stamp, 0); 5132 rb_time_set(&cpu_buffer->before_stamp, 0); 5133 5134 memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp)); 5135 5136 cpu_buffer->lost_events = 0; 5137 cpu_buffer->last_overrun = 0; 5138 5139 rb_head_page_activate(cpu_buffer); 5140 } 5141 5142 /* Must have disabled the cpu buffer then done a synchronize_rcu */ 5143 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 5144 { 5145 unsigned long flags; 5146 5147 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5148 5149 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 5150 goto out; 5151 5152 arch_spin_lock(&cpu_buffer->lock); 5153 5154 rb_reset_cpu(cpu_buffer); 5155 5156 arch_spin_unlock(&cpu_buffer->lock); 5157 5158 out: 5159 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5160 } 5161 5162 /** 5163 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer 5164 * @buffer: The ring buffer to reset a per cpu buffer of 5165 * @cpu: The CPU buffer to be reset 5166 */ 5167 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu) 5168 { 5169 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 5170 5171 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5172 return; 5173 5174 /* prevent another thread from changing buffer sizes */ 5175 mutex_lock(&buffer->mutex); 5176 5177 atomic_inc(&cpu_buffer->resize_disabled); 5178 atomic_inc(&cpu_buffer->record_disabled); 5179 5180 /* Make sure all commits have finished */ 5181 synchronize_rcu(); 5182 5183 reset_disabled_cpu_buffer(cpu_buffer); 5184 5185 atomic_dec(&cpu_buffer->record_disabled); 5186 atomic_dec(&cpu_buffer->resize_disabled); 5187 5188 mutex_unlock(&buffer->mutex); 5189 } 5190 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); 5191 5192 /** 5193 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer 5194 * @buffer: The ring buffer to reset a per cpu buffer of 5195 * @cpu: The CPU buffer to be reset 5196 */ 5197 void ring_buffer_reset_online_cpus(struct trace_buffer *buffer) 5198 { 5199 struct ring_buffer_per_cpu *cpu_buffer; 5200 int cpu; 5201 5202 /* prevent another thread from changing buffer sizes */ 5203 mutex_lock(&buffer->mutex); 5204 5205 for_each_online_buffer_cpu(buffer, cpu) { 5206 cpu_buffer = buffer->buffers[cpu]; 5207 5208 atomic_inc(&cpu_buffer->resize_disabled); 5209 atomic_inc(&cpu_buffer->record_disabled); 5210 } 5211 5212 /* Make sure all commits have finished */ 5213 synchronize_rcu(); 5214 5215 for_each_online_buffer_cpu(buffer, cpu) { 5216 cpu_buffer = buffer->buffers[cpu]; 5217 5218 reset_disabled_cpu_buffer(cpu_buffer); 5219 5220 atomic_dec(&cpu_buffer->record_disabled); 5221 atomic_dec(&cpu_buffer->resize_disabled); 5222 } 5223 5224 mutex_unlock(&buffer->mutex); 5225 } 5226 5227 /** 5228 * ring_buffer_reset - reset a ring buffer 5229 * @buffer: The ring buffer to reset all cpu buffers 5230 */ 5231 void ring_buffer_reset(struct trace_buffer *buffer) 5232 { 5233 struct ring_buffer_per_cpu *cpu_buffer; 5234 int cpu; 5235 5236 for_each_buffer_cpu(buffer, cpu) { 5237 cpu_buffer = buffer->buffers[cpu]; 5238 5239 atomic_inc(&cpu_buffer->resize_disabled); 5240 atomic_inc(&cpu_buffer->record_disabled); 5241 } 5242 5243 /* Make sure all commits have finished */ 5244 synchronize_rcu(); 5245 5246 for_each_buffer_cpu(buffer, cpu) { 5247 cpu_buffer = buffer->buffers[cpu]; 5248 5249 reset_disabled_cpu_buffer(cpu_buffer); 5250 5251 atomic_dec(&cpu_buffer->record_disabled); 5252 atomic_dec(&cpu_buffer->resize_disabled); 5253 } 5254 } 5255 EXPORT_SYMBOL_GPL(ring_buffer_reset); 5256 5257 /** 5258 * rind_buffer_empty - is the ring buffer empty? 5259 * @buffer: The ring buffer to test 5260 */ 5261 bool ring_buffer_empty(struct trace_buffer *buffer) 5262 { 5263 struct ring_buffer_per_cpu *cpu_buffer; 5264 unsigned long flags; 5265 bool dolock; 5266 int cpu; 5267 int ret; 5268 5269 /* yes this is racy, but if you don't like the race, lock the buffer */ 5270 for_each_buffer_cpu(buffer, cpu) { 5271 cpu_buffer = buffer->buffers[cpu]; 5272 local_irq_save(flags); 5273 dolock = rb_reader_lock(cpu_buffer); 5274 ret = rb_per_cpu_empty(cpu_buffer); 5275 rb_reader_unlock(cpu_buffer, dolock); 5276 local_irq_restore(flags); 5277 5278 if (!ret) 5279 return false; 5280 } 5281 5282 return true; 5283 } 5284 EXPORT_SYMBOL_GPL(ring_buffer_empty); 5285 5286 /** 5287 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? 5288 * @buffer: The ring buffer 5289 * @cpu: The CPU buffer to test 5290 */ 5291 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu) 5292 { 5293 struct ring_buffer_per_cpu *cpu_buffer; 5294 unsigned long flags; 5295 bool dolock; 5296 int ret; 5297 5298 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5299 return true; 5300 5301 cpu_buffer = buffer->buffers[cpu]; 5302 local_irq_save(flags); 5303 dolock = rb_reader_lock(cpu_buffer); 5304 ret = rb_per_cpu_empty(cpu_buffer); 5305 rb_reader_unlock(cpu_buffer, dolock); 5306 local_irq_restore(flags); 5307 5308 return ret; 5309 } 5310 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); 5311 5312 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 5313 /** 5314 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers 5315 * @buffer_a: One buffer to swap with 5316 * @buffer_b: The other buffer to swap with 5317 * @cpu: the CPU of the buffers to swap 5318 * 5319 * This function is useful for tracers that want to take a "snapshot" 5320 * of a CPU buffer and has another back up buffer lying around. 5321 * it is expected that the tracer handles the cpu buffer not being 5322 * used at the moment. 5323 */ 5324 int ring_buffer_swap_cpu(struct trace_buffer *buffer_a, 5325 struct trace_buffer *buffer_b, int cpu) 5326 { 5327 struct ring_buffer_per_cpu *cpu_buffer_a; 5328 struct ring_buffer_per_cpu *cpu_buffer_b; 5329 int ret = -EINVAL; 5330 5331 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || 5332 !cpumask_test_cpu(cpu, buffer_b->cpumask)) 5333 goto out; 5334 5335 cpu_buffer_a = buffer_a->buffers[cpu]; 5336 cpu_buffer_b = buffer_b->buffers[cpu]; 5337 5338 /* At least make sure the two buffers are somewhat the same */ 5339 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages) 5340 goto out; 5341 5342 ret = -EAGAIN; 5343 5344 if (atomic_read(&buffer_a->record_disabled)) 5345 goto out; 5346 5347 if (atomic_read(&buffer_b->record_disabled)) 5348 goto out; 5349 5350 if (atomic_read(&cpu_buffer_a->record_disabled)) 5351 goto out; 5352 5353 if (atomic_read(&cpu_buffer_b->record_disabled)) 5354 goto out; 5355 5356 /* 5357 * We can't do a synchronize_rcu here because this 5358 * function can be called in atomic context. 5359 * Normally this will be called from the same CPU as cpu. 5360 * If not it's up to the caller to protect this. 5361 */ 5362 atomic_inc(&cpu_buffer_a->record_disabled); 5363 atomic_inc(&cpu_buffer_b->record_disabled); 5364 5365 ret = -EBUSY; 5366 if (local_read(&cpu_buffer_a->committing)) 5367 goto out_dec; 5368 if (local_read(&cpu_buffer_b->committing)) 5369 goto out_dec; 5370 5371 buffer_a->buffers[cpu] = cpu_buffer_b; 5372 buffer_b->buffers[cpu] = cpu_buffer_a; 5373 5374 cpu_buffer_b->buffer = buffer_a; 5375 cpu_buffer_a->buffer = buffer_b; 5376 5377 ret = 0; 5378 5379 out_dec: 5380 atomic_dec(&cpu_buffer_a->record_disabled); 5381 atomic_dec(&cpu_buffer_b->record_disabled); 5382 out: 5383 return ret; 5384 } 5385 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); 5386 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */ 5387 5388 /** 5389 * ring_buffer_alloc_read_page - allocate a page to read from buffer 5390 * @buffer: the buffer to allocate for. 5391 * @cpu: the cpu buffer to allocate. 5392 * 5393 * This function is used in conjunction with ring_buffer_read_page. 5394 * When reading a full page from the ring buffer, these functions 5395 * can be used to speed up the process. The calling function should 5396 * allocate a few pages first with this function. Then when it 5397 * needs to get pages from the ring buffer, it passes the result 5398 * of this function into ring_buffer_read_page, which will swap 5399 * the page that was allocated, with the read page of the buffer. 5400 * 5401 * Returns: 5402 * The page allocated, or ERR_PTR 5403 */ 5404 void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu) 5405 { 5406 struct ring_buffer_per_cpu *cpu_buffer; 5407 struct buffer_data_page *bpage = NULL; 5408 unsigned long flags; 5409 struct page *page; 5410 5411 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5412 return ERR_PTR(-ENODEV); 5413 5414 cpu_buffer = buffer->buffers[cpu]; 5415 local_irq_save(flags); 5416 arch_spin_lock(&cpu_buffer->lock); 5417 5418 if (cpu_buffer->free_page) { 5419 bpage = cpu_buffer->free_page; 5420 cpu_buffer->free_page = NULL; 5421 } 5422 5423 arch_spin_unlock(&cpu_buffer->lock); 5424 local_irq_restore(flags); 5425 5426 if (bpage) 5427 goto out; 5428 5429 page = alloc_pages_node(cpu_to_node(cpu), 5430 GFP_KERNEL | __GFP_NORETRY, 0); 5431 if (!page) 5432 return ERR_PTR(-ENOMEM); 5433 5434 bpage = page_address(page); 5435 5436 out: 5437 rb_init_page(bpage); 5438 5439 return bpage; 5440 } 5441 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page); 5442 5443 /** 5444 * ring_buffer_free_read_page - free an allocated read page 5445 * @buffer: the buffer the page was allocate for 5446 * @cpu: the cpu buffer the page came from 5447 * @data: the page to free 5448 * 5449 * Free a page allocated from ring_buffer_alloc_read_page. 5450 */ 5451 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data) 5452 { 5453 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 5454 struct buffer_data_page *bpage = data; 5455 struct page *page = virt_to_page(bpage); 5456 unsigned long flags; 5457 5458 /* If the page is still in use someplace else, we can't reuse it */ 5459 if (page_ref_count(page) > 1) 5460 goto out; 5461 5462 local_irq_save(flags); 5463 arch_spin_lock(&cpu_buffer->lock); 5464 5465 if (!cpu_buffer->free_page) { 5466 cpu_buffer->free_page = bpage; 5467 bpage = NULL; 5468 } 5469 5470 arch_spin_unlock(&cpu_buffer->lock); 5471 local_irq_restore(flags); 5472 5473 out: 5474 free_page((unsigned long)bpage); 5475 } 5476 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); 5477 5478 /** 5479 * ring_buffer_read_page - extract a page from the ring buffer 5480 * @buffer: buffer to extract from 5481 * @data_page: the page to use allocated from ring_buffer_alloc_read_page 5482 * @len: amount to extract 5483 * @cpu: the cpu of the buffer to extract 5484 * @full: should the extraction only happen when the page is full. 5485 * 5486 * This function will pull out a page from the ring buffer and consume it. 5487 * @data_page must be the address of the variable that was returned 5488 * from ring_buffer_alloc_read_page. This is because the page might be used 5489 * to swap with a page in the ring buffer. 5490 * 5491 * for example: 5492 * rpage = ring_buffer_alloc_read_page(buffer, cpu); 5493 * if (IS_ERR(rpage)) 5494 * return PTR_ERR(rpage); 5495 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); 5496 * if (ret >= 0) 5497 * process_page(rpage, ret); 5498 * 5499 * When @full is set, the function will not return true unless 5500 * the writer is off the reader page. 5501 * 5502 * Note: it is up to the calling functions to handle sleeps and wakeups. 5503 * The ring buffer can be used anywhere in the kernel and can not 5504 * blindly call wake_up. The layer that uses the ring buffer must be 5505 * responsible for that. 5506 * 5507 * Returns: 5508 * >=0 if data has been transferred, returns the offset of consumed data. 5509 * <0 if no data has been transferred. 5510 */ 5511 int ring_buffer_read_page(struct trace_buffer *buffer, 5512 void **data_page, size_t len, int cpu, int full) 5513 { 5514 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 5515 struct ring_buffer_event *event; 5516 struct buffer_data_page *bpage; 5517 struct buffer_page *reader; 5518 unsigned long missed_events; 5519 unsigned long flags; 5520 unsigned int commit; 5521 unsigned int read; 5522 u64 save_timestamp; 5523 int ret = -1; 5524 5525 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5526 goto out; 5527 5528 /* 5529 * If len is not big enough to hold the page header, then 5530 * we can not copy anything. 5531 */ 5532 if (len <= BUF_PAGE_HDR_SIZE) 5533 goto out; 5534 5535 len -= BUF_PAGE_HDR_SIZE; 5536 5537 if (!data_page) 5538 goto out; 5539 5540 bpage = *data_page; 5541 if (!bpage) 5542 goto out; 5543 5544 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5545 5546 reader = rb_get_reader_page(cpu_buffer); 5547 if (!reader) 5548 goto out_unlock; 5549 5550 event = rb_reader_event(cpu_buffer); 5551 5552 read = reader->read; 5553 commit = rb_page_commit(reader); 5554 5555 /* Check if any events were dropped */ 5556 missed_events = cpu_buffer->lost_events; 5557 5558 /* 5559 * If this page has been partially read or 5560 * if len is not big enough to read the rest of the page or 5561 * a writer is still on the page, then 5562 * we must copy the data from the page to the buffer. 5563 * Otherwise, we can simply swap the page with the one passed in. 5564 */ 5565 if (read || (len < (commit - read)) || 5566 cpu_buffer->reader_page == cpu_buffer->commit_page) { 5567 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; 5568 unsigned int rpos = read; 5569 unsigned int pos = 0; 5570 unsigned int size; 5571 5572 if (full) 5573 goto out_unlock; 5574 5575 if (len > (commit - read)) 5576 len = (commit - read); 5577 5578 /* Always keep the time extend and data together */ 5579 size = rb_event_ts_length(event); 5580 5581 if (len < size) 5582 goto out_unlock; 5583 5584 /* save the current timestamp, since the user will need it */ 5585 save_timestamp = cpu_buffer->read_stamp; 5586 5587 /* Need to copy one event at a time */ 5588 do { 5589 /* We need the size of one event, because 5590 * rb_advance_reader only advances by one event, 5591 * whereas rb_event_ts_length may include the size of 5592 * one or two events. 5593 * We have already ensured there's enough space if this 5594 * is a time extend. */ 5595 size = rb_event_length(event); 5596 memcpy(bpage->data + pos, rpage->data + rpos, size); 5597 5598 len -= size; 5599 5600 rb_advance_reader(cpu_buffer); 5601 rpos = reader->read; 5602 pos += size; 5603 5604 if (rpos >= commit) 5605 break; 5606 5607 event = rb_reader_event(cpu_buffer); 5608 /* Always keep the time extend and data together */ 5609 size = rb_event_ts_length(event); 5610 } while (len >= size); 5611 5612 /* update bpage */ 5613 local_set(&bpage->commit, pos); 5614 bpage->time_stamp = save_timestamp; 5615 5616 /* we copied everything to the beginning */ 5617 read = 0; 5618 } else { 5619 /* update the entry counter */ 5620 cpu_buffer->read += rb_page_entries(reader); 5621 cpu_buffer->read_bytes += BUF_PAGE_SIZE; 5622 5623 /* swap the pages */ 5624 rb_init_page(bpage); 5625 bpage = reader->page; 5626 reader->page = *data_page; 5627 local_set(&reader->write, 0); 5628 local_set(&reader->entries, 0); 5629 reader->read = 0; 5630 *data_page = bpage; 5631 5632 /* 5633 * Use the real_end for the data size, 5634 * This gives us a chance to store the lost events 5635 * on the page. 5636 */ 5637 if (reader->real_end) 5638 local_set(&bpage->commit, reader->real_end); 5639 } 5640 ret = read; 5641 5642 cpu_buffer->lost_events = 0; 5643 5644 commit = local_read(&bpage->commit); 5645 /* 5646 * Set a flag in the commit field if we lost events 5647 */ 5648 if (missed_events) { 5649 /* If there is room at the end of the page to save the 5650 * missed events, then record it there. 5651 */ 5652 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) { 5653 memcpy(&bpage->data[commit], &missed_events, 5654 sizeof(missed_events)); 5655 local_add(RB_MISSED_STORED, &bpage->commit); 5656 commit += sizeof(missed_events); 5657 } 5658 local_add(RB_MISSED_EVENTS, &bpage->commit); 5659 } 5660 5661 /* 5662 * This page may be off to user land. Zero it out here. 5663 */ 5664 if (commit < BUF_PAGE_SIZE) 5665 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); 5666 5667 out_unlock: 5668 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5669 5670 out: 5671 return ret; 5672 } 5673 EXPORT_SYMBOL_GPL(ring_buffer_read_page); 5674 5675 /* 5676 * We only allocate new buffers, never free them if the CPU goes down. 5677 * If we were to free the buffer, then the user would lose any trace that was in 5678 * the buffer. 5679 */ 5680 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node) 5681 { 5682 struct trace_buffer *buffer; 5683 long nr_pages_same; 5684 int cpu_i; 5685 unsigned long nr_pages; 5686 5687 buffer = container_of(node, struct trace_buffer, node); 5688 if (cpumask_test_cpu(cpu, buffer->cpumask)) 5689 return 0; 5690 5691 nr_pages = 0; 5692 nr_pages_same = 1; 5693 /* check if all cpu sizes are same */ 5694 for_each_buffer_cpu(buffer, cpu_i) { 5695 /* fill in the size from first enabled cpu */ 5696 if (nr_pages == 0) 5697 nr_pages = buffer->buffers[cpu_i]->nr_pages; 5698 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { 5699 nr_pages_same = 0; 5700 break; 5701 } 5702 } 5703 /* allocate minimum pages, user can later expand it */ 5704 if (!nr_pages_same) 5705 nr_pages = 2; 5706 buffer->buffers[cpu] = 5707 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 5708 if (!buffer->buffers[cpu]) { 5709 WARN(1, "failed to allocate ring buffer on CPU %u\n", 5710 cpu); 5711 return -ENOMEM; 5712 } 5713 smp_wmb(); 5714 cpumask_set_cpu(cpu, buffer->cpumask); 5715 return 0; 5716 } 5717 5718 #ifdef CONFIG_RING_BUFFER_STARTUP_TEST 5719 /* 5720 * This is a basic integrity check of the ring buffer. 5721 * Late in the boot cycle this test will run when configured in. 5722 * It will kick off a thread per CPU that will go into a loop 5723 * writing to the per cpu ring buffer various sizes of data. 5724 * Some of the data will be large items, some small. 5725 * 5726 * Another thread is created that goes into a spin, sending out 5727 * IPIs to the other CPUs to also write into the ring buffer. 5728 * this is to test the nesting ability of the buffer. 5729 * 5730 * Basic stats are recorded and reported. If something in the 5731 * ring buffer should happen that's not expected, a big warning 5732 * is displayed and all ring buffers are disabled. 5733 */ 5734 static struct task_struct *rb_threads[NR_CPUS] __initdata; 5735 5736 struct rb_test_data { 5737 struct trace_buffer *buffer; 5738 unsigned long events; 5739 unsigned long bytes_written; 5740 unsigned long bytes_alloc; 5741 unsigned long bytes_dropped; 5742 unsigned long events_nested; 5743 unsigned long bytes_written_nested; 5744 unsigned long bytes_alloc_nested; 5745 unsigned long bytes_dropped_nested; 5746 int min_size_nested; 5747 int max_size_nested; 5748 int max_size; 5749 int min_size; 5750 int cpu; 5751 int cnt; 5752 }; 5753 5754 static struct rb_test_data rb_data[NR_CPUS] __initdata; 5755 5756 /* 1 meg per cpu */ 5757 #define RB_TEST_BUFFER_SIZE 1048576 5758 5759 static char rb_string[] __initdata = 5760 "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\" 5761 "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890" 5762 "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv"; 5763 5764 static bool rb_test_started __initdata; 5765 5766 struct rb_item { 5767 int size; 5768 char str[]; 5769 }; 5770 5771 static __init int rb_write_something(struct rb_test_data *data, bool nested) 5772 { 5773 struct ring_buffer_event *event; 5774 struct rb_item *item; 5775 bool started; 5776 int event_len; 5777 int size; 5778 int len; 5779 int cnt; 5780 5781 /* Have nested writes different that what is written */ 5782 cnt = data->cnt + (nested ? 27 : 0); 5783 5784 /* Multiply cnt by ~e, to make some unique increment */ 5785 size = (cnt * 68 / 25) % (sizeof(rb_string) - 1); 5786 5787 len = size + sizeof(struct rb_item); 5788 5789 started = rb_test_started; 5790 /* read rb_test_started before checking buffer enabled */ 5791 smp_rmb(); 5792 5793 event = ring_buffer_lock_reserve(data->buffer, len); 5794 if (!event) { 5795 /* Ignore dropped events before test starts. */ 5796 if (started) { 5797 if (nested) 5798 data->bytes_dropped += len; 5799 else 5800 data->bytes_dropped_nested += len; 5801 } 5802 return len; 5803 } 5804 5805 event_len = ring_buffer_event_length(event); 5806 5807 if (RB_WARN_ON(data->buffer, event_len < len)) 5808 goto out; 5809 5810 item = ring_buffer_event_data(event); 5811 item->size = size; 5812 memcpy(item->str, rb_string, size); 5813 5814 if (nested) { 5815 data->bytes_alloc_nested += event_len; 5816 data->bytes_written_nested += len; 5817 data->events_nested++; 5818 if (!data->min_size_nested || len < data->min_size_nested) 5819 data->min_size_nested = len; 5820 if (len > data->max_size_nested) 5821 data->max_size_nested = len; 5822 } else { 5823 data->bytes_alloc += event_len; 5824 data->bytes_written += len; 5825 data->events++; 5826 if (!data->min_size || len < data->min_size) 5827 data->max_size = len; 5828 if (len > data->max_size) 5829 data->max_size = len; 5830 } 5831 5832 out: 5833 ring_buffer_unlock_commit(data->buffer, event); 5834 5835 return 0; 5836 } 5837 5838 static __init int rb_test(void *arg) 5839 { 5840 struct rb_test_data *data = arg; 5841 5842 while (!kthread_should_stop()) { 5843 rb_write_something(data, false); 5844 data->cnt++; 5845 5846 set_current_state(TASK_INTERRUPTIBLE); 5847 /* Now sleep between a min of 100-300us and a max of 1ms */ 5848 usleep_range(((data->cnt % 3) + 1) * 100, 1000); 5849 } 5850 5851 return 0; 5852 } 5853 5854 static __init void rb_ipi(void *ignore) 5855 { 5856 struct rb_test_data *data; 5857 int cpu = smp_processor_id(); 5858 5859 data = &rb_data[cpu]; 5860 rb_write_something(data, true); 5861 } 5862 5863 static __init int rb_hammer_test(void *arg) 5864 { 5865 while (!kthread_should_stop()) { 5866 5867 /* Send an IPI to all cpus to write data! */ 5868 smp_call_function(rb_ipi, NULL, 1); 5869 /* No sleep, but for non preempt, let others run */ 5870 schedule(); 5871 } 5872 5873 return 0; 5874 } 5875 5876 static __init int test_ringbuffer(void) 5877 { 5878 struct task_struct *rb_hammer; 5879 struct trace_buffer *buffer; 5880 int cpu; 5881 int ret = 0; 5882 5883 if (security_locked_down(LOCKDOWN_TRACEFS)) { 5884 pr_warn("Lockdown is enabled, skipping ring buffer tests\n"); 5885 return 0; 5886 } 5887 5888 pr_info("Running ring buffer tests...\n"); 5889 5890 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE); 5891 if (WARN_ON(!buffer)) 5892 return 0; 5893 5894 /* Disable buffer so that threads can't write to it yet */ 5895 ring_buffer_record_off(buffer); 5896 5897 for_each_online_cpu(cpu) { 5898 rb_data[cpu].buffer = buffer; 5899 rb_data[cpu].cpu = cpu; 5900 rb_data[cpu].cnt = cpu; 5901 rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu], 5902 "rbtester/%d", cpu); 5903 if (WARN_ON(IS_ERR(rb_threads[cpu]))) { 5904 pr_cont("FAILED\n"); 5905 ret = PTR_ERR(rb_threads[cpu]); 5906 goto out_free; 5907 } 5908 5909 kthread_bind(rb_threads[cpu], cpu); 5910 wake_up_process(rb_threads[cpu]); 5911 } 5912 5913 /* Now create the rb hammer! */ 5914 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer"); 5915 if (WARN_ON(IS_ERR(rb_hammer))) { 5916 pr_cont("FAILED\n"); 5917 ret = PTR_ERR(rb_hammer); 5918 goto out_free; 5919 } 5920 5921 ring_buffer_record_on(buffer); 5922 /* 5923 * Show buffer is enabled before setting rb_test_started. 5924 * Yes there's a small race window where events could be 5925 * dropped and the thread wont catch it. But when a ring 5926 * buffer gets enabled, there will always be some kind of 5927 * delay before other CPUs see it. Thus, we don't care about 5928 * those dropped events. We care about events dropped after 5929 * the threads see that the buffer is active. 5930 */ 5931 smp_wmb(); 5932 rb_test_started = true; 5933 5934 set_current_state(TASK_INTERRUPTIBLE); 5935 /* Just run for 10 seconds */; 5936 schedule_timeout(10 * HZ); 5937 5938 kthread_stop(rb_hammer); 5939 5940 out_free: 5941 for_each_online_cpu(cpu) { 5942 if (!rb_threads[cpu]) 5943 break; 5944 kthread_stop(rb_threads[cpu]); 5945 } 5946 if (ret) { 5947 ring_buffer_free(buffer); 5948 return ret; 5949 } 5950 5951 /* Report! */ 5952 pr_info("finished\n"); 5953 for_each_online_cpu(cpu) { 5954 struct ring_buffer_event *event; 5955 struct rb_test_data *data = &rb_data[cpu]; 5956 struct rb_item *item; 5957 unsigned long total_events; 5958 unsigned long total_dropped; 5959 unsigned long total_written; 5960 unsigned long total_alloc; 5961 unsigned long total_read = 0; 5962 unsigned long total_size = 0; 5963 unsigned long total_len = 0; 5964 unsigned long total_lost = 0; 5965 unsigned long lost; 5966 int big_event_size; 5967 int small_event_size; 5968 5969 ret = -1; 5970 5971 total_events = data->events + data->events_nested; 5972 total_written = data->bytes_written + data->bytes_written_nested; 5973 total_alloc = data->bytes_alloc + data->bytes_alloc_nested; 5974 total_dropped = data->bytes_dropped + data->bytes_dropped_nested; 5975 5976 big_event_size = data->max_size + data->max_size_nested; 5977 small_event_size = data->min_size + data->min_size_nested; 5978 5979 pr_info("CPU %d:\n", cpu); 5980 pr_info(" events: %ld\n", total_events); 5981 pr_info(" dropped bytes: %ld\n", total_dropped); 5982 pr_info(" alloced bytes: %ld\n", total_alloc); 5983 pr_info(" written bytes: %ld\n", total_written); 5984 pr_info(" biggest event: %d\n", big_event_size); 5985 pr_info(" smallest event: %d\n", small_event_size); 5986 5987 if (RB_WARN_ON(buffer, total_dropped)) 5988 break; 5989 5990 ret = 0; 5991 5992 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { 5993 total_lost += lost; 5994 item = ring_buffer_event_data(event); 5995 total_len += ring_buffer_event_length(event); 5996 total_size += item->size + sizeof(struct rb_item); 5997 if (memcmp(&item->str[0], rb_string, item->size) != 0) { 5998 pr_info("FAILED!\n"); 5999 pr_info("buffer had: %.*s\n", item->size, item->str); 6000 pr_info("expected: %.*s\n", item->size, rb_string); 6001 RB_WARN_ON(buffer, 1); 6002 ret = -1; 6003 break; 6004 } 6005 total_read++; 6006 } 6007 if (ret) 6008 break; 6009 6010 ret = -1; 6011 6012 pr_info(" read events: %ld\n", total_read); 6013 pr_info(" lost events: %ld\n", total_lost); 6014 pr_info(" total events: %ld\n", total_lost + total_read); 6015 pr_info(" recorded len bytes: %ld\n", total_len); 6016 pr_info(" recorded size bytes: %ld\n", total_size); 6017 if (total_lost) 6018 pr_info(" With dropped events, record len and size may not match\n" 6019 " alloced and written from above\n"); 6020 if (!total_lost) { 6021 if (RB_WARN_ON(buffer, total_len != total_alloc || 6022 total_size != total_written)) 6023 break; 6024 } 6025 if (RB_WARN_ON(buffer, total_lost + total_read != total_events)) 6026 break; 6027 6028 ret = 0; 6029 } 6030 if (!ret) 6031 pr_info("Ring buffer PASSED!\n"); 6032 6033 ring_buffer_free(buffer); 6034 return 0; 6035 } 6036 6037 late_initcall(test_ringbuffer); 6038 #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */ 6039