1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Generic ring buffer 4 * 5 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> 6 */ 7 #include <linux/trace_recursion.h> 8 #include <linux/trace_events.h> 9 #include <linux/ring_buffer.h> 10 #include <linux/trace_clock.h> 11 #include <linux/sched/clock.h> 12 #include <linux/trace_seq.h> 13 #include <linux/spinlock.h> 14 #include <linux/irq_work.h> 15 #include <linux/security.h> 16 #include <linux/uaccess.h> 17 #include <linux/hardirq.h> 18 #include <linux/kthread.h> /* for self test */ 19 #include <linux/module.h> 20 #include <linux/percpu.h> 21 #include <linux/mutex.h> 22 #include <linux/delay.h> 23 #include <linux/slab.h> 24 #include <linux/init.h> 25 #include <linux/hash.h> 26 #include <linux/list.h> 27 #include <linux/cpu.h> 28 #include <linux/oom.h> 29 30 #include <asm/local.h> 31 32 /* 33 * The "absolute" timestamp in the buffer is only 59 bits. 34 * If a clock has the 5 MSBs set, it needs to be saved and 35 * reinserted. 36 */ 37 #define TS_MSB (0xf8ULL << 56) 38 #define ABS_TS_MASK (~TS_MSB) 39 40 static void update_pages_handler(struct work_struct *work); 41 42 /* 43 * The ring buffer header is special. We must manually up keep it. 44 */ 45 int ring_buffer_print_entry_header(struct trace_seq *s) 46 { 47 trace_seq_puts(s, "# compressed entry header\n"); 48 trace_seq_puts(s, "\ttype_len : 5 bits\n"); 49 trace_seq_puts(s, "\ttime_delta : 27 bits\n"); 50 trace_seq_puts(s, "\tarray : 32 bits\n"); 51 trace_seq_putc(s, '\n'); 52 trace_seq_printf(s, "\tpadding : type == %d\n", 53 RINGBUF_TYPE_PADDING); 54 trace_seq_printf(s, "\ttime_extend : type == %d\n", 55 RINGBUF_TYPE_TIME_EXTEND); 56 trace_seq_printf(s, "\ttime_stamp : type == %d\n", 57 RINGBUF_TYPE_TIME_STAMP); 58 trace_seq_printf(s, "\tdata max type_len == %d\n", 59 RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 60 61 return !trace_seq_has_overflowed(s); 62 } 63 64 /* 65 * The ring buffer is made up of a list of pages. A separate list of pages is 66 * allocated for each CPU. A writer may only write to a buffer that is 67 * associated with the CPU it is currently executing on. A reader may read 68 * from any per cpu buffer. 69 * 70 * The reader is special. For each per cpu buffer, the reader has its own 71 * reader page. When a reader has read the entire reader page, this reader 72 * page is swapped with another page in the ring buffer. 73 * 74 * Now, as long as the writer is off the reader page, the reader can do what 75 * ever it wants with that page. The writer will never write to that page 76 * again (as long as it is out of the ring buffer). 77 * 78 * Here's some silly ASCII art. 79 * 80 * +------+ 81 * |reader| RING BUFFER 82 * |page | 83 * +------+ +---+ +---+ +---+ 84 * | |-->| |-->| | 85 * +---+ +---+ +---+ 86 * ^ | 87 * | | 88 * +---------------+ 89 * 90 * 91 * +------+ 92 * |reader| RING BUFFER 93 * |page |------------------v 94 * +------+ +---+ +---+ +---+ 95 * | |-->| |-->| | 96 * +---+ +---+ +---+ 97 * ^ | 98 * | | 99 * +---------------+ 100 * 101 * 102 * +------+ 103 * |reader| RING BUFFER 104 * |page |------------------v 105 * +------+ +---+ +---+ +---+ 106 * ^ | |-->| |-->| | 107 * | +---+ +---+ +---+ 108 * | | 109 * | | 110 * +------------------------------+ 111 * 112 * 113 * +------+ 114 * |buffer| RING BUFFER 115 * |page |------------------v 116 * +------+ +---+ +---+ +---+ 117 * ^ | | | |-->| | 118 * | New +---+ +---+ +---+ 119 * | Reader------^ | 120 * | page | 121 * +------------------------------+ 122 * 123 * 124 * After we make this swap, the reader can hand this page off to the splice 125 * code and be done with it. It can even allocate a new page if it needs to 126 * and swap that into the ring buffer. 127 * 128 * We will be using cmpxchg soon to make all this lockless. 129 * 130 */ 131 132 /* Used for individual buffers (after the counter) */ 133 #define RB_BUFFER_OFF (1 << 20) 134 135 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) 136 137 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) 138 #define RB_ALIGNMENT 4U 139 #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 140 #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ 141 142 #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS 143 # define RB_FORCE_8BYTE_ALIGNMENT 0 144 # define RB_ARCH_ALIGNMENT RB_ALIGNMENT 145 #else 146 # define RB_FORCE_8BYTE_ALIGNMENT 1 147 # define RB_ARCH_ALIGNMENT 8U 148 #endif 149 150 #define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT) 151 152 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ 153 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX 154 155 enum { 156 RB_LEN_TIME_EXTEND = 8, 157 RB_LEN_TIME_STAMP = 8, 158 }; 159 160 #define skip_time_extend(event) \ 161 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND)) 162 163 #define extended_time(event) \ 164 (event->type_len >= RINGBUF_TYPE_TIME_EXTEND) 165 166 static inline int rb_null_event(struct ring_buffer_event *event) 167 { 168 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; 169 } 170 171 static void rb_event_set_padding(struct ring_buffer_event *event) 172 { 173 /* padding has a NULL time_delta */ 174 event->type_len = RINGBUF_TYPE_PADDING; 175 event->time_delta = 0; 176 } 177 178 static unsigned 179 rb_event_data_length(struct ring_buffer_event *event) 180 { 181 unsigned length; 182 183 if (event->type_len) 184 length = event->type_len * RB_ALIGNMENT; 185 else 186 length = event->array[0]; 187 return length + RB_EVNT_HDR_SIZE; 188 } 189 190 /* 191 * Return the length of the given event. Will return 192 * the length of the time extend if the event is a 193 * time extend. 194 */ 195 static inline unsigned 196 rb_event_length(struct ring_buffer_event *event) 197 { 198 switch (event->type_len) { 199 case RINGBUF_TYPE_PADDING: 200 if (rb_null_event(event)) 201 /* undefined */ 202 return -1; 203 return event->array[0] + RB_EVNT_HDR_SIZE; 204 205 case RINGBUF_TYPE_TIME_EXTEND: 206 return RB_LEN_TIME_EXTEND; 207 208 case RINGBUF_TYPE_TIME_STAMP: 209 return RB_LEN_TIME_STAMP; 210 211 case RINGBUF_TYPE_DATA: 212 return rb_event_data_length(event); 213 default: 214 WARN_ON_ONCE(1); 215 } 216 /* not hit */ 217 return 0; 218 } 219 220 /* 221 * Return total length of time extend and data, 222 * or just the event length for all other events. 223 */ 224 static inline unsigned 225 rb_event_ts_length(struct ring_buffer_event *event) 226 { 227 unsigned len = 0; 228 229 if (extended_time(event)) { 230 /* time extends include the data event after it */ 231 len = RB_LEN_TIME_EXTEND; 232 event = skip_time_extend(event); 233 } 234 return len + rb_event_length(event); 235 } 236 237 /** 238 * ring_buffer_event_length - return the length of the event 239 * @event: the event to get the length of 240 * 241 * Returns the size of the data load of a data event. 242 * If the event is something other than a data event, it 243 * returns the size of the event itself. With the exception 244 * of a TIME EXTEND, where it still returns the size of the 245 * data load of the data event after it. 246 */ 247 unsigned ring_buffer_event_length(struct ring_buffer_event *event) 248 { 249 unsigned length; 250 251 if (extended_time(event)) 252 event = skip_time_extend(event); 253 254 length = rb_event_length(event); 255 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 256 return length; 257 length -= RB_EVNT_HDR_SIZE; 258 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) 259 length -= sizeof(event->array[0]); 260 return length; 261 } 262 EXPORT_SYMBOL_GPL(ring_buffer_event_length); 263 264 /* inline for ring buffer fast paths */ 265 static __always_inline void * 266 rb_event_data(struct ring_buffer_event *event) 267 { 268 if (extended_time(event)) 269 event = skip_time_extend(event); 270 WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 271 /* If length is in len field, then array[0] has the data */ 272 if (event->type_len) 273 return (void *)&event->array[0]; 274 /* Otherwise length is in array[0] and array[1] has the data */ 275 return (void *)&event->array[1]; 276 } 277 278 /** 279 * ring_buffer_event_data - return the data of the event 280 * @event: the event to get the data from 281 */ 282 void *ring_buffer_event_data(struct ring_buffer_event *event) 283 { 284 return rb_event_data(event); 285 } 286 EXPORT_SYMBOL_GPL(ring_buffer_event_data); 287 288 #define for_each_buffer_cpu(buffer, cpu) \ 289 for_each_cpu(cpu, buffer->cpumask) 290 291 #define for_each_online_buffer_cpu(buffer, cpu) \ 292 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask) 293 294 #define TS_SHIFT 27 295 #define TS_MASK ((1ULL << TS_SHIFT) - 1) 296 #define TS_DELTA_TEST (~TS_MASK) 297 298 static u64 rb_event_time_stamp(struct ring_buffer_event *event) 299 { 300 u64 ts; 301 302 ts = event->array[0]; 303 ts <<= TS_SHIFT; 304 ts += event->time_delta; 305 306 return ts; 307 } 308 309 /* Flag when events were overwritten */ 310 #define RB_MISSED_EVENTS (1 << 31) 311 /* Missed count stored at end */ 312 #define RB_MISSED_STORED (1 << 30) 313 314 struct buffer_data_page { 315 u64 time_stamp; /* page time stamp */ 316 local_t commit; /* write committed index */ 317 unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */ 318 }; 319 320 /* 321 * Note, the buffer_page list must be first. The buffer pages 322 * are allocated in cache lines, which means that each buffer 323 * page will be at the beginning of a cache line, and thus 324 * the least significant bits will be zero. We use this to 325 * add flags in the list struct pointers, to make the ring buffer 326 * lockless. 327 */ 328 struct buffer_page { 329 struct list_head list; /* list of buffer pages */ 330 local_t write; /* index for next write */ 331 unsigned read; /* index for next read */ 332 local_t entries; /* entries on this page */ 333 unsigned long real_end; /* real end of data */ 334 struct buffer_data_page *page; /* Actual data page */ 335 }; 336 337 /* 338 * The buffer page counters, write and entries, must be reset 339 * atomically when crossing page boundaries. To synchronize this 340 * update, two counters are inserted into the number. One is 341 * the actual counter for the write position or count on the page. 342 * 343 * The other is a counter of updaters. Before an update happens 344 * the update partition of the counter is incremented. This will 345 * allow the updater to update the counter atomically. 346 * 347 * The counter is 20 bits, and the state data is 12. 348 */ 349 #define RB_WRITE_MASK 0xfffff 350 #define RB_WRITE_INTCNT (1 << 20) 351 352 static void rb_init_page(struct buffer_data_page *bpage) 353 { 354 local_set(&bpage->commit, 0); 355 } 356 357 /* 358 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing 359 * this issue out. 360 */ 361 static void free_buffer_page(struct buffer_page *bpage) 362 { 363 free_page((unsigned long)bpage->page); 364 kfree(bpage); 365 } 366 367 /* 368 * We need to fit the time_stamp delta into 27 bits. 369 */ 370 static inline int test_time_stamp(u64 delta) 371 { 372 if (delta & TS_DELTA_TEST) 373 return 1; 374 return 0; 375 } 376 377 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE) 378 379 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */ 380 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) 381 382 int ring_buffer_print_page_header(struct trace_seq *s) 383 { 384 struct buffer_data_page field; 385 386 trace_seq_printf(s, "\tfield: u64 timestamp;\t" 387 "offset:0;\tsize:%u;\tsigned:%u;\n", 388 (unsigned int)sizeof(field.time_stamp), 389 (unsigned int)is_signed_type(u64)); 390 391 trace_seq_printf(s, "\tfield: local_t commit;\t" 392 "offset:%u;\tsize:%u;\tsigned:%u;\n", 393 (unsigned int)offsetof(typeof(field), commit), 394 (unsigned int)sizeof(field.commit), 395 (unsigned int)is_signed_type(long)); 396 397 trace_seq_printf(s, "\tfield: int overwrite;\t" 398 "offset:%u;\tsize:%u;\tsigned:%u;\n", 399 (unsigned int)offsetof(typeof(field), commit), 400 1, 401 (unsigned int)is_signed_type(long)); 402 403 trace_seq_printf(s, "\tfield: char data;\t" 404 "offset:%u;\tsize:%u;\tsigned:%u;\n", 405 (unsigned int)offsetof(typeof(field), data), 406 (unsigned int)BUF_PAGE_SIZE, 407 (unsigned int)is_signed_type(char)); 408 409 return !trace_seq_has_overflowed(s); 410 } 411 412 struct rb_irq_work { 413 struct irq_work work; 414 wait_queue_head_t waiters; 415 wait_queue_head_t full_waiters; 416 long wait_index; 417 bool waiters_pending; 418 bool full_waiters_pending; 419 bool wakeup_full; 420 }; 421 422 /* 423 * Structure to hold event state and handle nested events. 424 */ 425 struct rb_event_info { 426 u64 ts; 427 u64 delta; 428 u64 before; 429 u64 after; 430 unsigned long length; 431 struct buffer_page *tail_page; 432 int add_timestamp; 433 }; 434 435 /* 436 * Used for the add_timestamp 437 * NONE 438 * EXTEND - wants a time extend 439 * ABSOLUTE - the buffer requests all events to have absolute time stamps 440 * FORCE - force a full time stamp. 441 */ 442 enum { 443 RB_ADD_STAMP_NONE = 0, 444 RB_ADD_STAMP_EXTEND = BIT(1), 445 RB_ADD_STAMP_ABSOLUTE = BIT(2), 446 RB_ADD_STAMP_FORCE = BIT(3) 447 }; 448 /* 449 * Used for which event context the event is in. 450 * TRANSITION = 0 451 * NMI = 1 452 * IRQ = 2 453 * SOFTIRQ = 3 454 * NORMAL = 4 455 * 456 * See trace_recursive_lock() comment below for more details. 457 */ 458 enum { 459 RB_CTX_TRANSITION, 460 RB_CTX_NMI, 461 RB_CTX_IRQ, 462 RB_CTX_SOFTIRQ, 463 RB_CTX_NORMAL, 464 RB_CTX_MAX 465 }; 466 467 #if BITS_PER_LONG == 32 468 #define RB_TIME_32 469 #endif 470 471 /* To test on 64 bit machines */ 472 //#define RB_TIME_32 473 474 #ifdef RB_TIME_32 475 476 struct rb_time_struct { 477 local_t cnt; 478 local_t top; 479 local_t bottom; 480 local_t msb; 481 }; 482 #else 483 #include <asm/local64.h> 484 struct rb_time_struct { 485 local64_t time; 486 }; 487 #endif 488 typedef struct rb_time_struct rb_time_t; 489 490 #define MAX_NEST 5 491 492 /* 493 * head_page == tail_page && head == tail then buffer is empty. 494 */ 495 struct ring_buffer_per_cpu { 496 int cpu; 497 atomic_t record_disabled; 498 atomic_t resize_disabled; 499 struct trace_buffer *buffer; 500 raw_spinlock_t reader_lock; /* serialize readers */ 501 arch_spinlock_t lock; 502 struct lock_class_key lock_key; 503 struct buffer_data_page *free_page; 504 unsigned long nr_pages; 505 unsigned int current_context; 506 struct list_head *pages; 507 struct buffer_page *head_page; /* read from head */ 508 struct buffer_page *tail_page; /* write to tail */ 509 struct buffer_page *commit_page; /* committed pages */ 510 struct buffer_page *reader_page; 511 unsigned long lost_events; 512 unsigned long last_overrun; 513 unsigned long nest; 514 local_t entries_bytes; 515 local_t entries; 516 local_t overrun; 517 local_t commit_overrun; 518 local_t dropped_events; 519 local_t committing; 520 local_t commits; 521 local_t pages_touched; 522 local_t pages_lost; 523 local_t pages_read; 524 long last_pages_touch; 525 size_t shortest_full; 526 unsigned long read; 527 unsigned long read_bytes; 528 rb_time_t write_stamp; 529 rb_time_t before_stamp; 530 u64 event_stamp[MAX_NEST]; 531 u64 read_stamp; 532 /* ring buffer pages to update, > 0 to add, < 0 to remove */ 533 long nr_pages_to_update; 534 struct list_head new_pages; /* new pages to add */ 535 struct work_struct update_pages_work; 536 struct completion update_done; 537 538 struct rb_irq_work irq_work; 539 }; 540 541 struct trace_buffer { 542 unsigned flags; 543 int cpus; 544 atomic_t record_disabled; 545 cpumask_var_t cpumask; 546 547 struct lock_class_key *reader_lock_key; 548 549 struct mutex mutex; 550 551 struct ring_buffer_per_cpu **buffers; 552 553 struct hlist_node node; 554 u64 (*clock)(void); 555 556 struct rb_irq_work irq_work; 557 bool time_stamp_abs; 558 }; 559 560 struct ring_buffer_iter { 561 struct ring_buffer_per_cpu *cpu_buffer; 562 unsigned long head; 563 unsigned long next_event; 564 struct buffer_page *head_page; 565 struct buffer_page *cache_reader_page; 566 unsigned long cache_read; 567 u64 read_stamp; 568 u64 page_stamp; 569 struct ring_buffer_event *event; 570 int missed_events; 571 }; 572 573 #ifdef RB_TIME_32 574 575 /* 576 * On 32 bit machines, local64_t is very expensive. As the ring 577 * buffer doesn't need all the features of a true 64 bit atomic, 578 * on 32 bit, it uses these functions (64 still uses local64_t). 579 * 580 * For the ring buffer, 64 bit required operations for the time is 581 * the following: 582 * 583 * - Reads may fail if it interrupted a modification of the time stamp. 584 * It will succeed if it did not interrupt another write even if 585 * the read itself is interrupted by a write. 586 * It returns whether it was successful or not. 587 * 588 * - Writes always succeed and will overwrite other writes and writes 589 * that were done by events interrupting the current write. 590 * 591 * - A write followed by a read of the same time stamp will always succeed, 592 * but may not contain the same value. 593 * 594 * - A cmpxchg will fail if it interrupted another write or cmpxchg. 595 * Other than that, it acts like a normal cmpxchg. 596 * 597 * The 60 bit time stamp is broken up by 30 bits in a top and bottom half 598 * (bottom being the least significant 30 bits of the 60 bit time stamp). 599 * 600 * The two most significant bits of each half holds a 2 bit counter (0-3). 601 * Each update will increment this counter by one. 602 * When reading the top and bottom, if the two counter bits match then the 603 * top and bottom together make a valid 60 bit number. 604 */ 605 #define RB_TIME_SHIFT 30 606 #define RB_TIME_VAL_MASK ((1 << RB_TIME_SHIFT) - 1) 607 #define RB_TIME_MSB_SHIFT 60 608 609 static inline int rb_time_cnt(unsigned long val) 610 { 611 return (val >> RB_TIME_SHIFT) & 3; 612 } 613 614 static inline u64 rb_time_val(unsigned long top, unsigned long bottom) 615 { 616 u64 val; 617 618 val = top & RB_TIME_VAL_MASK; 619 val <<= RB_TIME_SHIFT; 620 val |= bottom & RB_TIME_VAL_MASK; 621 622 return val; 623 } 624 625 static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt) 626 { 627 unsigned long top, bottom, msb; 628 unsigned long c; 629 630 /* 631 * If the read is interrupted by a write, then the cnt will 632 * be different. Loop until both top and bottom have been read 633 * without interruption. 634 */ 635 do { 636 c = local_read(&t->cnt); 637 top = local_read(&t->top); 638 bottom = local_read(&t->bottom); 639 msb = local_read(&t->msb); 640 } while (c != local_read(&t->cnt)); 641 642 *cnt = rb_time_cnt(top); 643 644 /* If top and bottom counts don't match, this interrupted a write */ 645 if (*cnt != rb_time_cnt(bottom)) 646 return false; 647 648 /* The shift to msb will lose its cnt bits */ 649 *ret = rb_time_val(top, bottom) | ((u64)msb << RB_TIME_MSB_SHIFT); 650 return true; 651 } 652 653 static bool rb_time_read(rb_time_t *t, u64 *ret) 654 { 655 unsigned long cnt; 656 657 return __rb_time_read(t, ret, &cnt); 658 } 659 660 static inline unsigned long rb_time_val_cnt(unsigned long val, unsigned long cnt) 661 { 662 return (val & RB_TIME_VAL_MASK) | ((cnt & 3) << RB_TIME_SHIFT); 663 } 664 665 static inline void rb_time_split(u64 val, unsigned long *top, unsigned long *bottom, 666 unsigned long *msb) 667 { 668 *top = (unsigned long)((val >> RB_TIME_SHIFT) & RB_TIME_VAL_MASK); 669 *bottom = (unsigned long)(val & RB_TIME_VAL_MASK); 670 *msb = (unsigned long)(val >> RB_TIME_MSB_SHIFT); 671 } 672 673 static inline void rb_time_val_set(local_t *t, unsigned long val, unsigned long cnt) 674 { 675 val = rb_time_val_cnt(val, cnt); 676 local_set(t, val); 677 } 678 679 static void rb_time_set(rb_time_t *t, u64 val) 680 { 681 unsigned long cnt, top, bottom, msb; 682 683 rb_time_split(val, &top, &bottom, &msb); 684 685 /* Writes always succeed with a valid number even if it gets interrupted. */ 686 do { 687 cnt = local_inc_return(&t->cnt); 688 rb_time_val_set(&t->top, top, cnt); 689 rb_time_val_set(&t->bottom, bottom, cnt); 690 rb_time_val_set(&t->msb, val >> RB_TIME_MSB_SHIFT, cnt); 691 } while (cnt != local_read(&t->cnt)); 692 } 693 694 static inline bool 695 rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set) 696 { 697 unsigned long ret; 698 699 ret = local_cmpxchg(l, expect, set); 700 return ret == expect; 701 } 702 703 static int rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set) 704 { 705 unsigned long cnt, top, bottom, msb; 706 unsigned long cnt2, top2, bottom2, msb2; 707 u64 val; 708 709 /* The cmpxchg always fails if it interrupted an update */ 710 if (!__rb_time_read(t, &val, &cnt2)) 711 return false; 712 713 if (val != expect) 714 return false; 715 716 cnt = local_read(&t->cnt); 717 if ((cnt & 3) != cnt2) 718 return false; 719 720 cnt2 = cnt + 1; 721 722 rb_time_split(val, &top, &bottom, &msb); 723 top = rb_time_val_cnt(top, cnt); 724 bottom = rb_time_val_cnt(bottom, cnt); 725 726 rb_time_split(set, &top2, &bottom2, &msb2); 727 top2 = rb_time_val_cnt(top2, cnt2); 728 bottom2 = rb_time_val_cnt(bottom2, cnt2); 729 730 if (!rb_time_read_cmpxchg(&t->cnt, cnt, cnt2)) 731 return false; 732 if (!rb_time_read_cmpxchg(&t->msb, msb, msb2)) 733 return false; 734 if (!rb_time_read_cmpxchg(&t->top, top, top2)) 735 return false; 736 if (!rb_time_read_cmpxchg(&t->bottom, bottom, bottom2)) 737 return false; 738 return true; 739 } 740 741 #else /* 64 bits */ 742 743 /* local64_t always succeeds */ 744 745 static inline bool rb_time_read(rb_time_t *t, u64 *ret) 746 { 747 *ret = local64_read(&t->time); 748 return true; 749 } 750 static void rb_time_set(rb_time_t *t, u64 val) 751 { 752 local64_set(&t->time, val); 753 } 754 755 static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set) 756 { 757 u64 val; 758 val = local64_cmpxchg(&t->time, expect, set); 759 return val == expect; 760 } 761 #endif 762 763 /* 764 * Enable this to make sure that the event passed to 765 * ring_buffer_event_time_stamp() is not committed and also 766 * is on the buffer that it passed in. 767 */ 768 //#define RB_VERIFY_EVENT 769 #ifdef RB_VERIFY_EVENT 770 static struct list_head *rb_list_head(struct list_head *list); 771 static void verify_event(struct ring_buffer_per_cpu *cpu_buffer, 772 void *event) 773 { 774 struct buffer_page *page = cpu_buffer->commit_page; 775 struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page); 776 struct list_head *next; 777 long commit, write; 778 unsigned long addr = (unsigned long)event; 779 bool done = false; 780 int stop = 0; 781 782 /* Make sure the event exists and is not committed yet */ 783 do { 784 if (page == tail_page || WARN_ON_ONCE(stop++ > 100)) 785 done = true; 786 commit = local_read(&page->page->commit); 787 write = local_read(&page->write); 788 if (addr >= (unsigned long)&page->page->data[commit] && 789 addr < (unsigned long)&page->page->data[write]) 790 return; 791 792 next = rb_list_head(page->list.next); 793 page = list_entry(next, struct buffer_page, list); 794 } while (!done); 795 WARN_ON_ONCE(1); 796 } 797 #else 798 static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer, 799 void *event) 800 { 801 } 802 #endif 803 804 /* 805 * The absolute time stamp drops the 5 MSBs and some clocks may 806 * require them. The rb_fix_abs_ts() will take a previous full 807 * time stamp, and add the 5 MSB of that time stamp on to the 808 * saved absolute time stamp. Then they are compared in case of 809 * the unlikely event that the latest time stamp incremented 810 * the 5 MSB. 811 */ 812 static inline u64 rb_fix_abs_ts(u64 abs, u64 save_ts) 813 { 814 if (save_ts & TS_MSB) { 815 abs |= save_ts & TS_MSB; 816 /* Check for overflow */ 817 if (unlikely(abs < save_ts)) 818 abs += 1ULL << 59; 819 } 820 return abs; 821 } 822 823 static inline u64 rb_time_stamp(struct trace_buffer *buffer); 824 825 /** 826 * ring_buffer_event_time_stamp - return the event's current time stamp 827 * @buffer: The buffer that the event is on 828 * @event: the event to get the time stamp of 829 * 830 * Note, this must be called after @event is reserved, and before it is 831 * committed to the ring buffer. And must be called from the same 832 * context where the event was reserved (normal, softirq, irq, etc). 833 * 834 * Returns the time stamp associated with the current event. 835 * If the event has an extended time stamp, then that is used as 836 * the time stamp to return. 837 * In the highly unlikely case that the event was nested more than 838 * the max nesting, then the write_stamp of the buffer is returned, 839 * otherwise current time is returned, but that really neither of 840 * the last two cases should ever happen. 841 */ 842 u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer, 843 struct ring_buffer_event *event) 844 { 845 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; 846 unsigned int nest; 847 u64 ts; 848 849 /* If the event includes an absolute time, then just use that */ 850 if (event->type_len == RINGBUF_TYPE_TIME_STAMP) { 851 ts = rb_event_time_stamp(event); 852 return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp); 853 } 854 855 nest = local_read(&cpu_buffer->committing); 856 verify_event(cpu_buffer, event); 857 if (WARN_ON_ONCE(!nest)) 858 goto fail; 859 860 /* Read the current saved nesting level time stamp */ 861 if (likely(--nest < MAX_NEST)) 862 return cpu_buffer->event_stamp[nest]; 863 864 /* Shouldn't happen, warn if it does */ 865 WARN_ONCE(1, "nest (%d) greater than max", nest); 866 867 fail: 868 /* Can only fail on 32 bit */ 869 if (!rb_time_read(&cpu_buffer->write_stamp, &ts)) 870 /* Screw it, just read the current time */ 871 ts = rb_time_stamp(cpu_buffer->buffer); 872 873 return ts; 874 } 875 876 /** 877 * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer 878 * @buffer: The ring_buffer to get the number of pages from 879 * @cpu: The cpu of the ring_buffer to get the number of pages from 880 * 881 * Returns the number of pages used by a per_cpu buffer of the ring buffer. 882 */ 883 size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu) 884 { 885 return buffer->buffers[cpu]->nr_pages; 886 } 887 888 /** 889 * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer 890 * @buffer: The ring_buffer to get the number of pages from 891 * @cpu: The cpu of the ring_buffer to get the number of pages from 892 * 893 * Returns the number of pages that have content in the ring buffer. 894 */ 895 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) 896 { 897 size_t read; 898 size_t lost; 899 size_t cnt; 900 901 read = local_read(&buffer->buffers[cpu]->pages_read); 902 lost = local_read(&buffer->buffers[cpu]->pages_lost); 903 cnt = local_read(&buffer->buffers[cpu]->pages_touched); 904 905 if (WARN_ON_ONCE(cnt < lost)) 906 return 0; 907 908 cnt -= lost; 909 910 /* The reader can read an empty page, but not more than that */ 911 if (cnt < read) { 912 WARN_ON_ONCE(read > cnt + 1); 913 return 0; 914 } 915 916 return cnt - read; 917 } 918 919 static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full) 920 { 921 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 922 size_t nr_pages; 923 size_t dirty; 924 925 nr_pages = cpu_buffer->nr_pages; 926 if (!nr_pages || !full) 927 return true; 928 929 dirty = ring_buffer_nr_dirty_pages(buffer, cpu); 930 931 return (dirty * 100) > (full * nr_pages); 932 } 933 934 /* 935 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input 936 * 937 * Schedules a delayed work to wake up any task that is blocked on the 938 * ring buffer waiters queue. 939 */ 940 static void rb_wake_up_waiters(struct irq_work *work) 941 { 942 struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work); 943 944 wake_up_all(&rbwork->waiters); 945 if (rbwork->full_waiters_pending || rbwork->wakeup_full) { 946 rbwork->wakeup_full = false; 947 rbwork->full_waiters_pending = false; 948 wake_up_all(&rbwork->full_waiters); 949 } 950 } 951 952 /** 953 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer 954 * @buffer: The ring buffer to wake waiters on 955 * 956 * In the case of a file that represents a ring buffer is closing, 957 * it is prudent to wake up any waiters that are on this. 958 */ 959 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu) 960 { 961 struct ring_buffer_per_cpu *cpu_buffer; 962 struct rb_irq_work *rbwork; 963 964 if (!buffer) 965 return; 966 967 if (cpu == RING_BUFFER_ALL_CPUS) { 968 969 /* Wake up individual ones too. One level recursion */ 970 for_each_buffer_cpu(buffer, cpu) 971 ring_buffer_wake_waiters(buffer, cpu); 972 973 rbwork = &buffer->irq_work; 974 } else { 975 if (WARN_ON_ONCE(!buffer->buffers)) 976 return; 977 if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) 978 return; 979 980 cpu_buffer = buffer->buffers[cpu]; 981 /* The CPU buffer may not have been initialized yet */ 982 if (!cpu_buffer) 983 return; 984 rbwork = &cpu_buffer->irq_work; 985 } 986 987 rbwork->wait_index++; 988 /* make sure the waiters see the new index */ 989 smp_wmb(); 990 991 rb_wake_up_waiters(&rbwork->work); 992 } 993 994 /** 995 * ring_buffer_wait - wait for input to the ring buffer 996 * @buffer: buffer to wait on 997 * @cpu: the cpu buffer to wait on 998 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS 999 * 1000 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 1001 * as data is added to any of the @buffer's cpu buffers. Otherwise 1002 * it will wait for data to be added to a specific cpu buffer. 1003 */ 1004 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) 1005 { 1006 struct ring_buffer_per_cpu *cpu_buffer; 1007 DEFINE_WAIT(wait); 1008 struct rb_irq_work *work; 1009 long wait_index; 1010 int ret = 0; 1011 1012 /* 1013 * Depending on what the caller is waiting for, either any 1014 * data in any cpu buffer, or a specific buffer, put the 1015 * caller on the appropriate wait queue. 1016 */ 1017 if (cpu == RING_BUFFER_ALL_CPUS) { 1018 work = &buffer->irq_work; 1019 /* Full only makes sense on per cpu reads */ 1020 full = 0; 1021 } else { 1022 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1023 return -ENODEV; 1024 cpu_buffer = buffer->buffers[cpu]; 1025 work = &cpu_buffer->irq_work; 1026 } 1027 1028 wait_index = READ_ONCE(work->wait_index); 1029 1030 while (true) { 1031 if (full) 1032 prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE); 1033 else 1034 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); 1035 1036 /* 1037 * The events can happen in critical sections where 1038 * checking a work queue can cause deadlocks. 1039 * After adding a task to the queue, this flag is set 1040 * only to notify events to try to wake up the queue 1041 * using irq_work. 1042 * 1043 * We don't clear it even if the buffer is no longer 1044 * empty. The flag only causes the next event to run 1045 * irq_work to do the work queue wake up. The worse 1046 * that can happen if we race with !trace_empty() is that 1047 * an event will cause an irq_work to try to wake up 1048 * an empty queue. 1049 * 1050 * There's no reason to protect this flag either, as 1051 * the work queue and irq_work logic will do the necessary 1052 * synchronization for the wake ups. The only thing 1053 * that is necessary is that the wake up happens after 1054 * a task has been queued. It's OK for spurious wake ups. 1055 */ 1056 if (full) 1057 work->full_waiters_pending = true; 1058 else 1059 work->waiters_pending = true; 1060 1061 if (signal_pending(current)) { 1062 ret = -EINTR; 1063 break; 1064 } 1065 1066 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) 1067 break; 1068 1069 if (cpu != RING_BUFFER_ALL_CPUS && 1070 !ring_buffer_empty_cpu(buffer, cpu)) { 1071 unsigned long flags; 1072 bool pagebusy; 1073 bool done; 1074 1075 if (!full) 1076 break; 1077 1078 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 1079 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; 1080 done = !pagebusy && full_hit(buffer, cpu, full); 1081 1082 if (!cpu_buffer->shortest_full || 1083 cpu_buffer->shortest_full > full) 1084 cpu_buffer->shortest_full = full; 1085 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 1086 if (done) 1087 break; 1088 } 1089 1090 schedule(); 1091 1092 /* Make sure to see the new wait index */ 1093 smp_rmb(); 1094 if (wait_index != work->wait_index) 1095 break; 1096 } 1097 1098 if (full) 1099 finish_wait(&work->full_waiters, &wait); 1100 else 1101 finish_wait(&work->waiters, &wait); 1102 1103 return ret; 1104 } 1105 1106 /** 1107 * ring_buffer_poll_wait - poll on buffer input 1108 * @buffer: buffer to wait on 1109 * @cpu: the cpu buffer to wait on 1110 * @filp: the file descriptor 1111 * @poll_table: The poll descriptor 1112 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS 1113 * 1114 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 1115 * as data is added to any of the @buffer's cpu buffers. Otherwise 1116 * it will wait for data to be added to a specific cpu buffer. 1117 * 1118 * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers, 1119 * zero otherwise. 1120 */ 1121 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, 1122 struct file *filp, poll_table *poll_table, int full) 1123 { 1124 struct ring_buffer_per_cpu *cpu_buffer; 1125 struct rb_irq_work *work; 1126 1127 if (cpu == RING_BUFFER_ALL_CPUS) { 1128 work = &buffer->irq_work; 1129 full = 0; 1130 } else { 1131 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1132 return -EINVAL; 1133 1134 cpu_buffer = buffer->buffers[cpu]; 1135 work = &cpu_buffer->irq_work; 1136 } 1137 1138 if (full) { 1139 poll_wait(filp, &work->full_waiters, poll_table); 1140 work->full_waiters_pending = true; 1141 } else { 1142 poll_wait(filp, &work->waiters, poll_table); 1143 work->waiters_pending = true; 1144 } 1145 1146 /* 1147 * There's a tight race between setting the waiters_pending and 1148 * checking if the ring buffer is empty. Once the waiters_pending bit 1149 * is set, the next event will wake the task up, but we can get stuck 1150 * if there's only a single event in. 1151 * 1152 * FIXME: Ideally, we need a memory barrier on the writer side as well, 1153 * but adding a memory barrier to all events will cause too much of a 1154 * performance hit in the fast path. We only need a memory barrier when 1155 * the buffer goes from empty to having content. But as this race is 1156 * extremely small, and it's not a problem if another event comes in, we 1157 * will fix it later. 1158 */ 1159 smp_mb(); 1160 1161 if (full) 1162 return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0; 1163 1164 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || 1165 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) 1166 return EPOLLIN | EPOLLRDNORM; 1167 return 0; 1168 } 1169 1170 /* buffer may be either ring_buffer or ring_buffer_per_cpu */ 1171 #define RB_WARN_ON(b, cond) \ 1172 ({ \ 1173 int _____ret = unlikely(cond); \ 1174 if (_____ret) { \ 1175 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \ 1176 struct ring_buffer_per_cpu *__b = \ 1177 (void *)b; \ 1178 atomic_inc(&__b->buffer->record_disabled); \ 1179 } else \ 1180 atomic_inc(&b->record_disabled); \ 1181 WARN_ON(1); \ 1182 } \ 1183 _____ret; \ 1184 }) 1185 1186 /* Up this if you want to test the TIME_EXTENTS and normalization */ 1187 #define DEBUG_SHIFT 0 1188 1189 static inline u64 rb_time_stamp(struct trace_buffer *buffer) 1190 { 1191 u64 ts; 1192 1193 /* Skip retpolines :-( */ 1194 if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local)) 1195 ts = trace_clock_local(); 1196 else 1197 ts = buffer->clock(); 1198 1199 /* shift to debug/test normalization and TIME_EXTENTS */ 1200 return ts << DEBUG_SHIFT; 1201 } 1202 1203 u64 ring_buffer_time_stamp(struct trace_buffer *buffer) 1204 { 1205 u64 time; 1206 1207 preempt_disable_notrace(); 1208 time = rb_time_stamp(buffer); 1209 preempt_enable_notrace(); 1210 1211 return time; 1212 } 1213 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); 1214 1215 void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer, 1216 int cpu, u64 *ts) 1217 { 1218 /* Just stupid testing the normalize function and deltas */ 1219 *ts >>= DEBUG_SHIFT; 1220 } 1221 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); 1222 1223 /* 1224 * Making the ring buffer lockless makes things tricky. 1225 * Although writes only happen on the CPU that they are on, 1226 * and they only need to worry about interrupts. Reads can 1227 * happen on any CPU. 1228 * 1229 * The reader page is always off the ring buffer, but when the 1230 * reader finishes with a page, it needs to swap its page with 1231 * a new one from the buffer. The reader needs to take from 1232 * the head (writes go to the tail). But if a writer is in overwrite 1233 * mode and wraps, it must push the head page forward. 1234 * 1235 * Here lies the problem. 1236 * 1237 * The reader must be careful to replace only the head page, and 1238 * not another one. As described at the top of the file in the 1239 * ASCII art, the reader sets its old page to point to the next 1240 * page after head. It then sets the page after head to point to 1241 * the old reader page. But if the writer moves the head page 1242 * during this operation, the reader could end up with the tail. 1243 * 1244 * We use cmpxchg to help prevent this race. We also do something 1245 * special with the page before head. We set the LSB to 1. 1246 * 1247 * When the writer must push the page forward, it will clear the 1248 * bit that points to the head page, move the head, and then set 1249 * the bit that points to the new head page. 1250 * 1251 * We also don't want an interrupt coming in and moving the head 1252 * page on another writer. Thus we use the second LSB to catch 1253 * that too. Thus: 1254 * 1255 * head->list->prev->next bit 1 bit 0 1256 * ------- ------- 1257 * Normal page 0 0 1258 * Points to head page 0 1 1259 * New head page 1 0 1260 * 1261 * Note we can not trust the prev pointer of the head page, because: 1262 * 1263 * +----+ +-----+ +-----+ 1264 * | |------>| T |---X--->| N | 1265 * | |<------| | | | 1266 * +----+ +-----+ +-----+ 1267 * ^ ^ | 1268 * | +-----+ | | 1269 * +----------| R |----------+ | 1270 * | |<-----------+ 1271 * +-----+ 1272 * 1273 * Key: ---X--> HEAD flag set in pointer 1274 * T Tail page 1275 * R Reader page 1276 * N Next page 1277 * 1278 * (see __rb_reserve_next() to see where this happens) 1279 * 1280 * What the above shows is that the reader just swapped out 1281 * the reader page with a page in the buffer, but before it 1282 * could make the new header point back to the new page added 1283 * it was preempted by a writer. The writer moved forward onto 1284 * the new page added by the reader and is about to move forward 1285 * again. 1286 * 1287 * You can see, it is legitimate for the previous pointer of 1288 * the head (or any page) not to point back to itself. But only 1289 * temporarily. 1290 */ 1291 1292 #define RB_PAGE_NORMAL 0UL 1293 #define RB_PAGE_HEAD 1UL 1294 #define RB_PAGE_UPDATE 2UL 1295 1296 1297 #define RB_FLAG_MASK 3UL 1298 1299 /* PAGE_MOVED is not part of the mask */ 1300 #define RB_PAGE_MOVED 4UL 1301 1302 /* 1303 * rb_list_head - remove any bit 1304 */ 1305 static struct list_head *rb_list_head(struct list_head *list) 1306 { 1307 unsigned long val = (unsigned long)list; 1308 1309 return (struct list_head *)(val & ~RB_FLAG_MASK); 1310 } 1311 1312 /* 1313 * rb_is_head_page - test if the given page is the head page 1314 * 1315 * Because the reader may move the head_page pointer, we can 1316 * not trust what the head page is (it may be pointing to 1317 * the reader page). But if the next page is a header page, 1318 * its flags will be non zero. 1319 */ 1320 static inline int 1321 rb_is_head_page(struct buffer_page *page, struct list_head *list) 1322 { 1323 unsigned long val; 1324 1325 val = (unsigned long)list->next; 1326 1327 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) 1328 return RB_PAGE_MOVED; 1329 1330 return val & RB_FLAG_MASK; 1331 } 1332 1333 /* 1334 * rb_is_reader_page 1335 * 1336 * The unique thing about the reader page, is that, if the 1337 * writer is ever on it, the previous pointer never points 1338 * back to the reader page. 1339 */ 1340 static bool rb_is_reader_page(struct buffer_page *page) 1341 { 1342 struct list_head *list = page->list.prev; 1343 1344 return rb_list_head(list->next) != &page->list; 1345 } 1346 1347 /* 1348 * rb_set_list_to_head - set a list_head to be pointing to head. 1349 */ 1350 static void rb_set_list_to_head(struct list_head *list) 1351 { 1352 unsigned long *ptr; 1353 1354 ptr = (unsigned long *)&list->next; 1355 *ptr |= RB_PAGE_HEAD; 1356 *ptr &= ~RB_PAGE_UPDATE; 1357 } 1358 1359 /* 1360 * rb_head_page_activate - sets up head page 1361 */ 1362 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) 1363 { 1364 struct buffer_page *head; 1365 1366 head = cpu_buffer->head_page; 1367 if (!head) 1368 return; 1369 1370 /* 1371 * Set the previous list pointer to have the HEAD flag. 1372 */ 1373 rb_set_list_to_head(head->list.prev); 1374 } 1375 1376 static void rb_list_head_clear(struct list_head *list) 1377 { 1378 unsigned long *ptr = (unsigned long *)&list->next; 1379 1380 *ptr &= ~RB_FLAG_MASK; 1381 } 1382 1383 /* 1384 * rb_head_page_deactivate - clears head page ptr (for free list) 1385 */ 1386 static void 1387 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) 1388 { 1389 struct list_head *hd; 1390 1391 /* Go through the whole list and clear any pointers found. */ 1392 rb_list_head_clear(cpu_buffer->pages); 1393 1394 list_for_each(hd, cpu_buffer->pages) 1395 rb_list_head_clear(hd); 1396 } 1397 1398 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, 1399 struct buffer_page *head, 1400 struct buffer_page *prev, 1401 int old_flag, int new_flag) 1402 { 1403 struct list_head *list; 1404 unsigned long val = (unsigned long)&head->list; 1405 unsigned long ret; 1406 1407 list = &prev->list; 1408 1409 val &= ~RB_FLAG_MASK; 1410 1411 ret = cmpxchg((unsigned long *)&list->next, 1412 val | old_flag, val | new_flag); 1413 1414 /* check if the reader took the page */ 1415 if ((ret & ~RB_FLAG_MASK) != val) 1416 return RB_PAGE_MOVED; 1417 1418 return ret & RB_FLAG_MASK; 1419 } 1420 1421 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, 1422 struct buffer_page *head, 1423 struct buffer_page *prev, 1424 int old_flag) 1425 { 1426 return rb_head_page_set(cpu_buffer, head, prev, 1427 old_flag, RB_PAGE_UPDATE); 1428 } 1429 1430 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, 1431 struct buffer_page *head, 1432 struct buffer_page *prev, 1433 int old_flag) 1434 { 1435 return rb_head_page_set(cpu_buffer, head, prev, 1436 old_flag, RB_PAGE_HEAD); 1437 } 1438 1439 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, 1440 struct buffer_page *head, 1441 struct buffer_page *prev, 1442 int old_flag) 1443 { 1444 return rb_head_page_set(cpu_buffer, head, prev, 1445 old_flag, RB_PAGE_NORMAL); 1446 } 1447 1448 static inline void rb_inc_page(struct buffer_page **bpage) 1449 { 1450 struct list_head *p = rb_list_head((*bpage)->list.next); 1451 1452 *bpage = list_entry(p, struct buffer_page, list); 1453 } 1454 1455 static struct buffer_page * 1456 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) 1457 { 1458 struct buffer_page *head; 1459 struct buffer_page *page; 1460 struct list_head *list; 1461 int i; 1462 1463 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) 1464 return NULL; 1465 1466 /* sanity check */ 1467 list = cpu_buffer->pages; 1468 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) 1469 return NULL; 1470 1471 page = head = cpu_buffer->head_page; 1472 /* 1473 * It is possible that the writer moves the header behind 1474 * where we started, and we miss in one loop. 1475 * A second loop should grab the header, but we'll do 1476 * three loops just because I'm paranoid. 1477 */ 1478 for (i = 0; i < 3; i++) { 1479 do { 1480 if (rb_is_head_page(page, page->list.prev)) { 1481 cpu_buffer->head_page = page; 1482 return page; 1483 } 1484 rb_inc_page(&page); 1485 } while (page != head); 1486 } 1487 1488 RB_WARN_ON(cpu_buffer, 1); 1489 1490 return NULL; 1491 } 1492 1493 static int rb_head_page_replace(struct buffer_page *old, 1494 struct buffer_page *new) 1495 { 1496 unsigned long *ptr = (unsigned long *)&old->list.prev->next; 1497 unsigned long val; 1498 unsigned long ret; 1499 1500 val = *ptr & ~RB_FLAG_MASK; 1501 val |= RB_PAGE_HEAD; 1502 1503 ret = cmpxchg(ptr, val, (unsigned long)&new->list); 1504 1505 return ret == val; 1506 } 1507 1508 /* 1509 * rb_tail_page_update - move the tail page forward 1510 */ 1511 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, 1512 struct buffer_page *tail_page, 1513 struct buffer_page *next_page) 1514 { 1515 unsigned long old_entries; 1516 unsigned long old_write; 1517 1518 /* 1519 * The tail page now needs to be moved forward. 1520 * 1521 * We need to reset the tail page, but without messing 1522 * with possible erasing of data brought in by interrupts 1523 * that have moved the tail page and are currently on it. 1524 * 1525 * We add a counter to the write field to denote this. 1526 */ 1527 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); 1528 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); 1529 1530 local_inc(&cpu_buffer->pages_touched); 1531 /* 1532 * Just make sure we have seen our old_write and synchronize 1533 * with any interrupts that come in. 1534 */ 1535 barrier(); 1536 1537 /* 1538 * If the tail page is still the same as what we think 1539 * it is, then it is up to us to update the tail 1540 * pointer. 1541 */ 1542 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { 1543 /* Zero the write counter */ 1544 unsigned long val = old_write & ~RB_WRITE_MASK; 1545 unsigned long eval = old_entries & ~RB_WRITE_MASK; 1546 1547 /* 1548 * This will only succeed if an interrupt did 1549 * not come in and change it. In which case, we 1550 * do not want to modify it. 1551 * 1552 * We add (void) to let the compiler know that we do not care 1553 * about the return value of these functions. We use the 1554 * cmpxchg to only update if an interrupt did not already 1555 * do it for us. If the cmpxchg fails, we don't care. 1556 */ 1557 (void)local_cmpxchg(&next_page->write, old_write, val); 1558 (void)local_cmpxchg(&next_page->entries, old_entries, eval); 1559 1560 /* 1561 * No need to worry about races with clearing out the commit. 1562 * it only can increment when a commit takes place. But that 1563 * only happens in the outer most nested commit. 1564 */ 1565 local_set(&next_page->page->commit, 0); 1566 1567 /* Again, either we update tail_page or an interrupt does */ 1568 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page); 1569 } 1570 } 1571 1572 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, 1573 struct buffer_page *bpage) 1574 { 1575 unsigned long val = (unsigned long)bpage; 1576 1577 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK)) 1578 return 1; 1579 1580 return 0; 1581 } 1582 1583 /** 1584 * rb_check_list - make sure a pointer to a list has the last bits zero 1585 */ 1586 static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer, 1587 struct list_head *list) 1588 { 1589 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev)) 1590 return 1; 1591 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next)) 1592 return 1; 1593 return 0; 1594 } 1595 1596 /** 1597 * rb_check_pages - integrity check of buffer pages 1598 * @cpu_buffer: CPU buffer with pages to test 1599 * 1600 * As a safety measure we check to make sure the data pages have not 1601 * been corrupted. 1602 */ 1603 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) 1604 { 1605 struct list_head *head = cpu_buffer->pages; 1606 struct buffer_page *bpage, *tmp; 1607 1608 /* Reset the head page if it exists */ 1609 if (cpu_buffer->head_page) 1610 rb_set_head_page(cpu_buffer); 1611 1612 rb_head_page_deactivate(cpu_buffer); 1613 1614 if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) 1615 return -1; 1616 if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) 1617 return -1; 1618 1619 if (rb_check_list(cpu_buffer, head)) 1620 return -1; 1621 1622 list_for_each_entry_safe(bpage, tmp, head, list) { 1623 if (RB_WARN_ON(cpu_buffer, 1624 bpage->list.next->prev != &bpage->list)) 1625 return -1; 1626 if (RB_WARN_ON(cpu_buffer, 1627 bpage->list.prev->next != &bpage->list)) 1628 return -1; 1629 if (rb_check_list(cpu_buffer, &bpage->list)) 1630 return -1; 1631 } 1632 1633 rb_head_page_activate(cpu_buffer); 1634 1635 return 0; 1636 } 1637 1638 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 1639 long nr_pages, struct list_head *pages) 1640 { 1641 struct buffer_page *bpage, *tmp; 1642 bool user_thread = current->mm != NULL; 1643 gfp_t mflags; 1644 long i; 1645 1646 /* 1647 * Check if the available memory is there first. 1648 * Note, si_mem_available() only gives us a rough estimate of available 1649 * memory. It may not be accurate. But we don't care, we just want 1650 * to prevent doing any allocation when it is obvious that it is 1651 * not going to succeed. 1652 */ 1653 i = si_mem_available(); 1654 if (i < nr_pages) 1655 return -ENOMEM; 1656 1657 /* 1658 * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails 1659 * gracefully without invoking oom-killer and the system is not 1660 * destabilized. 1661 */ 1662 mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL; 1663 1664 /* 1665 * If a user thread allocates too much, and si_mem_available() 1666 * reports there's enough memory, even though there is not. 1667 * Make sure the OOM killer kills this thread. This can happen 1668 * even with RETRY_MAYFAIL because another task may be doing 1669 * an allocation after this task has taken all memory. 1670 * This is the task the OOM killer needs to take out during this 1671 * loop, even if it was triggered by an allocation somewhere else. 1672 */ 1673 if (user_thread) 1674 set_current_oom_origin(); 1675 for (i = 0; i < nr_pages; i++) { 1676 struct page *page; 1677 1678 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1679 mflags, cpu_to_node(cpu_buffer->cpu)); 1680 if (!bpage) 1681 goto free_pages; 1682 1683 rb_check_bpage(cpu_buffer, bpage); 1684 1685 list_add(&bpage->list, pages); 1686 1687 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, 0); 1688 if (!page) 1689 goto free_pages; 1690 bpage->page = page_address(page); 1691 rb_init_page(bpage->page); 1692 1693 if (user_thread && fatal_signal_pending(current)) 1694 goto free_pages; 1695 } 1696 if (user_thread) 1697 clear_current_oom_origin(); 1698 1699 return 0; 1700 1701 free_pages: 1702 list_for_each_entry_safe(bpage, tmp, pages, list) { 1703 list_del_init(&bpage->list); 1704 free_buffer_page(bpage); 1705 } 1706 if (user_thread) 1707 clear_current_oom_origin(); 1708 1709 return -ENOMEM; 1710 } 1711 1712 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 1713 unsigned long nr_pages) 1714 { 1715 LIST_HEAD(pages); 1716 1717 WARN_ON(!nr_pages); 1718 1719 if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages)) 1720 return -ENOMEM; 1721 1722 /* 1723 * The ring buffer page list is a circular list that does not 1724 * start and end with a list head. All page list items point to 1725 * other pages. 1726 */ 1727 cpu_buffer->pages = pages.next; 1728 list_del(&pages); 1729 1730 cpu_buffer->nr_pages = nr_pages; 1731 1732 rb_check_pages(cpu_buffer); 1733 1734 return 0; 1735 } 1736 1737 static struct ring_buffer_per_cpu * 1738 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu) 1739 { 1740 struct ring_buffer_per_cpu *cpu_buffer; 1741 struct buffer_page *bpage; 1742 struct page *page; 1743 int ret; 1744 1745 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), 1746 GFP_KERNEL, cpu_to_node(cpu)); 1747 if (!cpu_buffer) 1748 return NULL; 1749 1750 cpu_buffer->cpu = cpu; 1751 cpu_buffer->buffer = buffer; 1752 raw_spin_lock_init(&cpu_buffer->reader_lock); 1753 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1754 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 1755 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); 1756 init_completion(&cpu_buffer->update_done); 1757 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); 1758 init_waitqueue_head(&cpu_buffer->irq_work.waiters); 1759 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); 1760 1761 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1762 GFP_KERNEL, cpu_to_node(cpu)); 1763 if (!bpage) 1764 goto fail_free_buffer; 1765 1766 rb_check_bpage(cpu_buffer, bpage); 1767 1768 cpu_buffer->reader_page = bpage; 1769 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); 1770 if (!page) 1771 goto fail_free_reader; 1772 bpage->page = page_address(page); 1773 rb_init_page(bpage->page); 1774 1775 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 1776 INIT_LIST_HEAD(&cpu_buffer->new_pages); 1777 1778 ret = rb_allocate_pages(cpu_buffer, nr_pages); 1779 if (ret < 0) 1780 goto fail_free_reader; 1781 1782 cpu_buffer->head_page 1783 = list_entry(cpu_buffer->pages, struct buffer_page, list); 1784 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; 1785 1786 rb_head_page_activate(cpu_buffer); 1787 1788 return cpu_buffer; 1789 1790 fail_free_reader: 1791 free_buffer_page(cpu_buffer->reader_page); 1792 1793 fail_free_buffer: 1794 kfree(cpu_buffer); 1795 return NULL; 1796 } 1797 1798 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 1799 { 1800 struct list_head *head = cpu_buffer->pages; 1801 struct buffer_page *bpage, *tmp; 1802 1803 free_buffer_page(cpu_buffer->reader_page); 1804 1805 if (head) { 1806 rb_head_page_deactivate(cpu_buffer); 1807 1808 list_for_each_entry_safe(bpage, tmp, head, list) { 1809 list_del_init(&bpage->list); 1810 free_buffer_page(bpage); 1811 } 1812 bpage = list_entry(head, struct buffer_page, list); 1813 free_buffer_page(bpage); 1814 } 1815 1816 kfree(cpu_buffer); 1817 } 1818 1819 /** 1820 * __ring_buffer_alloc - allocate a new ring_buffer 1821 * @size: the size in bytes per cpu that is needed. 1822 * @flags: attributes to set for the ring buffer. 1823 * @key: ring buffer reader_lock_key. 1824 * 1825 * Currently the only flag that is available is the RB_FL_OVERWRITE 1826 * flag. This flag means that the buffer will overwrite old data 1827 * when the buffer wraps. If this flag is not set, the buffer will 1828 * drop data when the tail hits the head. 1829 */ 1830 struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, 1831 struct lock_class_key *key) 1832 { 1833 struct trace_buffer *buffer; 1834 long nr_pages; 1835 int bsize; 1836 int cpu; 1837 int ret; 1838 1839 /* keep it in its own cache line */ 1840 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), 1841 GFP_KERNEL); 1842 if (!buffer) 1843 return NULL; 1844 1845 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) 1846 goto fail_free_buffer; 1847 1848 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 1849 buffer->flags = flags; 1850 buffer->clock = trace_clock_local; 1851 buffer->reader_lock_key = key; 1852 1853 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); 1854 init_waitqueue_head(&buffer->irq_work.waiters); 1855 1856 /* need at least two pages */ 1857 if (nr_pages < 2) 1858 nr_pages = 2; 1859 1860 buffer->cpus = nr_cpu_ids; 1861 1862 bsize = sizeof(void *) * nr_cpu_ids; 1863 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), 1864 GFP_KERNEL); 1865 if (!buffer->buffers) 1866 goto fail_free_cpumask; 1867 1868 cpu = raw_smp_processor_id(); 1869 cpumask_set_cpu(cpu, buffer->cpumask); 1870 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 1871 if (!buffer->buffers[cpu]) 1872 goto fail_free_buffers; 1873 1874 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); 1875 if (ret < 0) 1876 goto fail_free_buffers; 1877 1878 mutex_init(&buffer->mutex); 1879 1880 return buffer; 1881 1882 fail_free_buffers: 1883 for_each_buffer_cpu(buffer, cpu) { 1884 if (buffer->buffers[cpu]) 1885 rb_free_cpu_buffer(buffer->buffers[cpu]); 1886 } 1887 kfree(buffer->buffers); 1888 1889 fail_free_cpumask: 1890 free_cpumask_var(buffer->cpumask); 1891 1892 fail_free_buffer: 1893 kfree(buffer); 1894 return NULL; 1895 } 1896 EXPORT_SYMBOL_GPL(__ring_buffer_alloc); 1897 1898 /** 1899 * ring_buffer_free - free a ring buffer. 1900 * @buffer: the buffer to free. 1901 */ 1902 void 1903 ring_buffer_free(struct trace_buffer *buffer) 1904 { 1905 int cpu; 1906 1907 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); 1908 1909 for_each_buffer_cpu(buffer, cpu) 1910 rb_free_cpu_buffer(buffer->buffers[cpu]); 1911 1912 kfree(buffer->buffers); 1913 free_cpumask_var(buffer->cpumask); 1914 1915 kfree(buffer); 1916 } 1917 EXPORT_SYMBOL_GPL(ring_buffer_free); 1918 1919 void ring_buffer_set_clock(struct trace_buffer *buffer, 1920 u64 (*clock)(void)) 1921 { 1922 buffer->clock = clock; 1923 } 1924 1925 void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs) 1926 { 1927 buffer->time_stamp_abs = abs; 1928 } 1929 1930 bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer) 1931 { 1932 return buffer->time_stamp_abs; 1933 } 1934 1935 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); 1936 1937 static inline unsigned long rb_page_entries(struct buffer_page *bpage) 1938 { 1939 return local_read(&bpage->entries) & RB_WRITE_MASK; 1940 } 1941 1942 static inline unsigned long rb_page_write(struct buffer_page *bpage) 1943 { 1944 return local_read(&bpage->write) & RB_WRITE_MASK; 1945 } 1946 1947 static int 1948 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) 1949 { 1950 struct list_head *tail_page, *to_remove, *next_page; 1951 struct buffer_page *to_remove_page, *tmp_iter_page; 1952 struct buffer_page *last_page, *first_page; 1953 unsigned long nr_removed; 1954 unsigned long head_bit; 1955 int page_entries; 1956 1957 head_bit = 0; 1958 1959 raw_spin_lock_irq(&cpu_buffer->reader_lock); 1960 atomic_inc(&cpu_buffer->record_disabled); 1961 /* 1962 * We don't race with the readers since we have acquired the reader 1963 * lock. We also don't race with writers after disabling recording. 1964 * This makes it easy to figure out the first and the last page to be 1965 * removed from the list. We unlink all the pages in between including 1966 * the first and last pages. This is done in a busy loop so that we 1967 * lose the least number of traces. 1968 * The pages are freed after we restart recording and unlock readers. 1969 */ 1970 tail_page = &cpu_buffer->tail_page->list; 1971 1972 /* 1973 * tail page might be on reader page, we remove the next page 1974 * from the ring buffer 1975 */ 1976 if (cpu_buffer->tail_page == cpu_buffer->reader_page) 1977 tail_page = rb_list_head(tail_page->next); 1978 to_remove = tail_page; 1979 1980 /* start of pages to remove */ 1981 first_page = list_entry(rb_list_head(to_remove->next), 1982 struct buffer_page, list); 1983 1984 for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) { 1985 to_remove = rb_list_head(to_remove)->next; 1986 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD; 1987 } 1988 1989 next_page = rb_list_head(to_remove)->next; 1990 1991 /* 1992 * Now we remove all pages between tail_page and next_page. 1993 * Make sure that we have head_bit value preserved for the 1994 * next page 1995 */ 1996 tail_page->next = (struct list_head *)((unsigned long)next_page | 1997 head_bit); 1998 next_page = rb_list_head(next_page); 1999 next_page->prev = tail_page; 2000 2001 /* make sure pages points to a valid page in the ring buffer */ 2002 cpu_buffer->pages = next_page; 2003 2004 /* update head page */ 2005 if (head_bit) 2006 cpu_buffer->head_page = list_entry(next_page, 2007 struct buffer_page, list); 2008 2009 /* 2010 * change read pointer to make sure any read iterators reset 2011 * themselves 2012 */ 2013 cpu_buffer->read = 0; 2014 2015 /* pages are removed, resume tracing and then free the pages */ 2016 atomic_dec(&cpu_buffer->record_disabled); 2017 raw_spin_unlock_irq(&cpu_buffer->reader_lock); 2018 2019 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); 2020 2021 /* last buffer page to remove */ 2022 last_page = list_entry(rb_list_head(to_remove), struct buffer_page, 2023 list); 2024 tmp_iter_page = first_page; 2025 2026 do { 2027 cond_resched(); 2028 2029 to_remove_page = tmp_iter_page; 2030 rb_inc_page(&tmp_iter_page); 2031 2032 /* update the counters */ 2033 page_entries = rb_page_entries(to_remove_page); 2034 if (page_entries) { 2035 /* 2036 * If something was added to this page, it was full 2037 * since it is not the tail page. So we deduct the 2038 * bytes consumed in ring buffer from here. 2039 * Increment overrun to account for the lost events. 2040 */ 2041 local_add(page_entries, &cpu_buffer->overrun); 2042 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); 2043 local_inc(&cpu_buffer->pages_lost); 2044 } 2045 2046 /* 2047 * We have already removed references to this list item, just 2048 * free up the buffer_page and its page 2049 */ 2050 free_buffer_page(to_remove_page); 2051 nr_removed--; 2052 2053 } while (to_remove_page != last_page); 2054 2055 RB_WARN_ON(cpu_buffer, nr_removed); 2056 2057 return nr_removed == 0; 2058 } 2059 2060 static int 2061 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) 2062 { 2063 struct list_head *pages = &cpu_buffer->new_pages; 2064 int retries, success; 2065 unsigned long flags; 2066 2067 /* Can be called at early boot up, where interrupts must not been enabled */ 2068 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2069 /* 2070 * We are holding the reader lock, so the reader page won't be swapped 2071 * in the ring buffer. Now we are racing with the writer trying to 2072 * move head page and the tail page. 2073 * We are going to adapt the reader page update process where: 2074 * 1. We first splice the start and end of list of new pages between 2075 * the head page and its previous page. 2076 * 2. We cmpxchg the prev_page->next to point from head page to the 2077 * start of new pages list. 2078 * 3. Finally, we update the head->prev to the end of new list. 2079 * 2080 * We will try this process 10 times, to make sure that we don't keep 2081 * spinning. 2082 */ 2083 retries = 10; 2084 success = 0; 2085 while (retries--) { 2086 struct list_head *head_page, *prev_page, *r; 2087 struct list_head *last_page, *first_page; 2088 struct list_head *head_page_with_bit; 2089 2090 head_page = &rb_set_head_page(cpu_buffer)->list; 2091 if (!head_page) 2092 break; 2093 prev_page = head_page->prev; 2094 2095 first_page = pages->next; 2096 last_page = pages->prev; 2097 2098 head_page_with_bit = (struct list_head *) 2099 ((unsigned long)head_page | RB_PAGE_HEAD); 2100 2101 last_page->next = head_page_with_bit; 2102 first_page->prev = prev_page; 2103 2104 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page); 2105 2106 if (r == head_page_with_bit) { 2107 /* 2108 * yay, we replaced the page pointer to our new list, 2109 * now, we just have to update to head page's prev 2110 * pointer to point to end of list 2111 */ 2112 head_page->prev = last_page; 2113 success = 1; 2114 break; 2115 } 2116 } 2117 2118 if (success) 2119 INIT_LIST_HEAD(pages); 2120 /* 2121 * If we weren't successful in adding in new pages, warn and stop 2122 * tracing 2123 */ 2124 RB_WARN_ON(cpu_buffer, !success); 2125 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2126 2127 /* free pages if they weren't inserted */ 2128 if (!success) { 2129 struct buffer_page *bpage, *tmp; 2130 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 2131 list) { 2132 list_del_init(&bpage->list); 2133 free_buffer_page(bpage); 2134 } 2135 } 2136 return success; 2137 } 2138 2139 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) 2140 { 2141 int success; 2142 2143 if (cpu_buffer->nr_pages_to_update > 0) 2144 success = rb_insert_pages(cpu_buffer); 2145 else 2146 success = rb_remove_pages(cpu_buffer, 2147 -cpu_buffer->nr_pages_to_update); 2148 2149 if (success) 2150 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; 2151 } 2152 2153 static void update_pages_handler(struct work_struct *work) 2154 { 2155 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, 2156 struct ring_buffer_per_cpu, update_pages_work); 2157 rb_update_pages(cpu_buffer); 2158 complete(&cpu_buffer->update_done); 2159 } 2160 2161 /** 2162 * ring_buffer_resize - resize the ring buffer 2163 * @buffer: the buffer to resize. 2164 * @size: the new size. 2165 * @cpu_id: the cpu buffer to resize 2166 * 2167 * Minimum size is 2 * BUF_PAGE_SIZE. 2168 * 2169 * Returns 0 on success and < 0 on failure. 2170 */ 2171 int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, 2172 int cpu_id) 2173 { 2174 struct ring_buffer_per_cpu *cpu_buffer; 2175 unsigned long nr_pages; 2176 int cpu, err; 2177 2178 /* 2179 * Always succeed at resizing a non-existent buffer: 2180 */ 2181 if (!buffer) 2182 return 0; 2183 2184 /* Make sure the requested buffer exists */ 2185 if (cpu_id != RING_BUFFER_ALL_CPUS && 2186 !cpumask_test_cpu(cpu_id, buffer->cpumask)) 2187 return 0; 2188 2189 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 2190 2191 /* we need a minimum of two pages */ 2192 if (nr_pages < 2) 2193 nr_pages = 2; 2194 2195 /* prevent another thread from changing buffer sizes */ 2196 mutex_lock(&buffer->mutex); 2197 2198 2199 if (cpu_id == RING_BUFFER_ALL_CPUS) { 2200 /* 2201 * Don't succeed if resizing is disabled, as a reader might be 2202 * manipulating the ring buffer and is expecting a sane state while 2203 * this is true. 2204 */ 2205 for_each_buffer_cpu(buffer, cpu) { 2206 cpu_buffer = buffer->buffers[cpu]; 2207 if (atomic_read(&cpu_buffer->resize_disabled)) { 2208 err = -EBUSY; 2209 goto out_err_unlock; 2210 } 2211 } 2212 2213 /* calculate the pages to update */ 2214 for_each_buffer_cpu(buffer, cpu) { 2215 cpu_buffer = buffer->buffers[cpu]; 2216 2217 cpu_buffer->nr_pages_to_update = nr_pages - 2218 cpu_buffer->nr_pages; 2219 /* 2220 * nothing more to do for removing pages or no update 2221 */ 2222 if (cpu_buffer->nr_pages_to_update <= 0) 2223 continue; 2224 /* 2225 * to add pages, make sure all new pages can be 2226 * allocated without receiving ENOMEM 2227 */ 2228 INIT_LIST_HEAD(&cpu_buffer->new_pages); 2229 if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, 2230 &cpu_buffer->new_pages)) { 2231 /* not enough memory for new pages */ 2232 err = -ENOMEM; 2233 goto out_err; 2234 } 2235 } 2236 2237 cpus_read_lock(); 2238 /* 2239 * Fire off all the required work handlers 2240 * We can't schedule on offline CPUs, but it's not necessary 2241 * since we can change their buffer sizes without any race. 2242 */ 2243 for_each_buffer_cpu(buffer, cpu) { 2244 cpu_buffer = buffer->buffers[cpu]; 2245 if (!cpu_buffer->nr_pages_to_update) 2246 continue; 2247 2248 /* Can't run something on an offline CPU. */ 2249 if (!cpu_online(cpu)) { 2250 rb_update_pages(cpu_buffer); 2251 cpu_buffer->nr_pages_to_update = 0; 2252 } else { 2253 /* Run directly if possible. */ 2254 migrate_disable(); 2255 if (cpu != smp_processor_id()) { 2256 migrate_enable(); 2257 schedule_work_on(cpu, 2258 &cpu_buffer->update_pages_work); 2259 } else { 2260 update_pages_handler(&cpu_buffer->update_pages_work); 2261 migrate_enable(); 2262 } 2263 } 2264 } 2265 2266 /* wait for all the updates to complete */ 2267 for_each_buffer_cpu(buffer, cpu) { 2268 cpu_buffer = buffer->buffers[cpu]; 2269 if (!cpu_buffer->nr_pages_to_update) 2270 continue; 2271 2272 if (cpu_online(cpu)) 2273 wait_for_completion(&cpu_buffer->update_done); 2274 cpu_buffer->nr_pages_to_update = 0; 2275 } 2276 2277 cpus_read_unlock(); 2278 } else { 2279 cpu_buffer = buffer->buffers[cpu_id]; 2280 2281 if (nr_pages == cpu_buffer->nr_pages) 2282 goto out; 2283 2284 /* 2285 * Don't succeed if resizing is disabled, as a reader might be 2286 * manipulating the ring buffer and is expecting a sane state while 2287 * this is true. 2288 */ 2289 if (atomic_read(&cpu_buffer->resize_disabled)) { 2290 err = -EBUSY; 2291 goto out_err_unlock; 2292 } 2293 2294 cpu_buffer->nr_pages_to_update = nr_pages - 2295 cpu_buffer->nr_pages; 2296 2297 INIT_LIST_HEAD(&cpu_buffer->new_pages); 2298 if (cpu_buffer->nr_pages_to_update > 0 && 2299 __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, 2300 &cpu_buffer->new_pages)) { 2301 err = -ENOMEM; 2302 goto out_err; 2303 } 2304 2305 cpus_read_lock(); 2306 2307 /* Can't run something on an offline CPU. */ 2308 if (!cpu_online(cpu_id)) 2309 rb_update_pages(cpu_buffer); 2310 else { 2311 /* Run directly if possible. */ 2312 migrate_disable(); 2313 if (cpu_id == smp_processor_id()) { 2314 rb_update_pages(cpu_buffer); 2315 migrate_enable(); 2316 } else { 2317 migrate_enable(); 2318 schedule_work_on(cpu_id, 2319 &cpu_buffer->update_pages_work); 2320 wait_for_completion(&cpu_buffer->update_done); 2321 } 2322 } 2323 2324 cpu_buffer->nr_pages_to_update = 0; 2325 cpus_read_unlock(); 2326 } 2327 2328 out: 2329 /* 2330 * The ring buffer resize can happen with the ring buffer 2331 * enabled, so that the update disturbs the tracing as little 2332 * as possible. But if the buffer is disabled, we do not need 2333 * to worry about that, and we can take the time to verify 2334 * that the buffer is not corrupt. 2335 */ 2336 if (atomic_read(&buffer->record_disabled)) { 2337 atomic_inc(&buffer->record_disabled); 2338 /* 2339 * Even though the buffer was disabled, we must make sure 2340 * that it is truly disabled before calling rb_check_pages. 2341 * There could have been a race between checking 2342 * record_disable and incrementing it. 2343 */ 2344 synchronize_rcu(); 2345 for_each_buffer_cpu(buffer, cpu) { 2346 cpu_buffer = buffer->buffers[cpu]; 2347 rb_check_pages(cpu_buffer); 2348 } 2349 atomic_dec(&buffer->record_disabled); 2350 } 2351 2352 mutex_unlock(&buffer->mutex); 2353 return 0; 2354 2355 out_err: 2356 for_each_buffer_cpu(buffer, cpu) { 2357 struct buffer_page *bpage, *tmp; 2358 2359 cpu_buffer = buffer->buffers[cpu]; 2360 cpu_buffer->nr_pages_to_update = 0; 2361 2362 if (list_empty(&cpu_buffer->new_pages)) 2363 continue; 2364 2365 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 2366 list) { 2367 list_del_init(&bpage->list); 2368 free_buffer_page(bpage); 2369 } 2370 } 2371 out_err_unlock: 2372 mutex_unlock(&buffer->mutex); 2373 return err; 2374 } 2375 EXPORT_SYMBOL_GPL(ring_buffer_resize); 2376 2377 void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val) 2378 { 2379 mutex_lock(&buffer->mutex); 2380 if (val) 2381 buffer->flags |= RB_FL_OVERWRITE; 2382 else 2383 buffer->flags &= ~RB_FL_OVERWRITE; 2384 mutex_unlock(&buffer->mutex); 2385 } 2386 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite); 2387 2388 static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) 2389 { 2390 return bpage->page->data + index; 2391 } 2392 2393 static __always_inline struct ring_buffer_event * 2394 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) 2395 { 2396 return __rb_page_index(cpu_buffer->reader_page, 2397 cpu_buffer->reader_page->read); 2398 } 2399 2400 static __always_inline unsigned rb_page_commit(struct buffer_page *bpage) 2401 { 2402 return local_read(&bpage->page->commit); 2403 } 2404 2405 static struct ring_buffer_event * 2406 rb_iter_head_event(struct ring_buffer_iter *iter) 2407 { 2408 struct ring_buffer_event *event; 2409 struct buffer_page *iter_head_page = iter->head_page; 2410 unsigned long commit; 2411 unsigned length; 2412 2413 if (iter->head != iter->next_event) 2414 return iter->event; 2415 2416 /* 2417 * When the writer goes across pages, it issues a cmpxchg which 2418 * is a mb(), which will synchronize with the rmb here. 2419 * (see rb_tail_page_update() and __rb_reserve_next()) 2420 */ 2421 commit = rb_page_commit(iter_head_page); 2422 smp_rmb(); 2423 event = __rb_page_index(iter_head_page, iter->head); 2424 length = rb_event_length(event); 2425 2426 /* 2427 * READ_ONCE() doesn't work on functions and we don't want the 2428 * compiler doing any crazy optimizations with length. 2429 */ 2430 barrier(); 2431 2432 if ((iter->head + length) > commit || length > BUF_MAX_DATA_SIZE) 2433 /* Writer corrupted the read? */ 2434 goto reset; 2435 2436 memcpy(iter->event, event, length); 2437 /* 2438 * If the page stamp is still the same after this rmb() then the 2439 * event was safely copied without the writer entering the page. 2440 */ 2441 smp_rmb(); 2442 2443 /* Make sure the page didn't change since we read this */ 2444 if (iter->page_stamp != iter_head_page->page->time_stamp || 2445 commit > rb_page_commit(iter_head_page)) 2446 goto reset; 2447 2448 iter->next_event = iter->head + length; 2449 return iter->event; 2450 reset: 2451 /* Reset to the beginning */ 2452 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; 2453 iter->head = 0; 2454 iter->next_event = 0; 2455 iter->missed_events = 1; 2456 return NULL; 2457 } 2458 2459 /* Size is determined by what has been committed */ 2460 static __always_inline unsigned rb_page_size(struct buffer_page *bpage) 2461 { 2462 return rb_page_commit(bpage); 2463 } 2464 2465 static __always_inline unsigned 2466 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) 2467 { 2468 return rb_page_commit(cpu_buffer->commit_page); 2469 } 2470 2471 static __always_inline unsigned 2472 rb_event_index(struct ring_buffer_event *event) 2473 { 2474 unsigned long addr = (unsigned long)event; 2475 2476 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; 2477 } 2478 2479 static void rb_inc_iter(struct ring_buffer_iter *iter) 2480 { 2481 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 2482 2483 /* 2484 * The iterator could be on the reader page (it starts there). 2485 * But the head could have moved, since the reader was 2486 * found. Check for this case and assign the iterator 2487 * to the head page instead of next. 2488 */ 2489 if (iter->head_page == cpu_buffer->reader_page) 2490 iter->head_page = rb_set_head_page(cpu_buffer); 2491 else 2492 rb_inc_page(&iter->head_page); 2493 2494 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; 2495 iter->head = 0; 2496 iter->next_event = 0; 2497 } 2498 2499 /* 2500 * rb_handle_head_page - writer hit the head page 2501 * 2502 * Returns: +1 to retry page 2503 * 0 to continue 2504 * -1 on error 2505 */ 2506 static int 2507 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, 2508 struct buffer_page *tail_page, 2509 struct buffer_page *next_page) 2510 { 2511 struct buffer_page *new_head; 2512 int entries; 2513 int type; 2514 int ret; 2515 2516 entries = rb_page_entries(next_page); 2517 2518 /* 2519 * The hard part is here. We need to move the head 2520 * forward, and protect against both readers on 2521 * other CPUs and writers coming in via interrupts. 2522 */ 2523 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, 2524 RB_PAGE_HEAD); 2525 2526 /* 2527 * type can be one of four: 2528 * NORMAL - an interrupt already moved it for us 2529 * HEAD - we are the first to get here. 2530 * UPDATE - we are the interrupt interrupting 2531 * a current move. 2532 * MOVED - a reader on another CPU moved the next 2533 * pointer to its reader page. Give up 2534 * and try again. 2535 */ 2536 2537 switch (type) { 2538 case RB_PAGE_HEAD: 2539 /* 2540 * We changed the head to UPDATE, thus 2541 * it is our responsibility to update 2542 * the counters. 2543 */ 2544 local_add(entries, &cpu_buffer->overrun); 2545 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); 2546 local_inc(&cpu_buffer->pages_lost); 2547 2548 /* 2549 * The entries will be zeroed out when we move the 2550 * tail page. 2551 */ 2552 2553 /* still more to do */ 2554 break; 2555 2556 case RB_PAGE_UPDATE: 2557 /* 2558 * This is an interrupt that interrupt the 2559 * previous update. Still more to do. 2560 */ 2561 break; 2562 case RB_PAGE_NORMAL: 2563 /* 2564 * An interrupt came in before the update 2565 * and processed this for us. 2566 * Nothing left to do. 2567 */ 2568 return 1; 2569 case RB_PAGE_MOVED: 2570 /* 2571 * The reader is on another CPU and just did 2572 * a swap with our next_page. 2573 * Try again. 2574 */ 2575 return 1; 2576 default: 2577 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ 2578 return -1; 2579 } 2580 2581 /* 2582 * Now that we are here, the old head pointer is 2583 * set to UPDATE. This will keep the reader from 2584 * swapping the head page with the reader page. 2585 * The reader (on another CPU) will spin till 2586 * we are finished. 2587 * 2588 * We just need to protect against interrupts 2589 * doing the job. We will set the next pointer 2590 * to HEAD. After that, we set the old pointer 2591 * to NORMAL, but only if it was HEAD before. 2592 * otherwise we are an interrupt, and only 2593 * want the outer most commit to reset it. 2594 */ 2595 new_head = next_page; 2596 rb_inc_page(&new_head); 2597 2598 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, 2599 RB_PAGE_NORMAL); 2600 2601 /* 2602 * Valid returns are: 2603 * HEAD - an interrupt came in and already set it. 2604 * NORMAL - One of two things: 2605 * 1) We really set it. 2606 * 2) A bunch of interrupts came in and moved 2607 * the page forward again. 2608 */ 2609 switch (ret) { 2610 case RB_PAGE_HEAD: 2611 case RB_PAGE_NORMAL: 2612 /* OK */ 2613 break; 2614 default: 2615 RB_WARN_ON(cpu_buffer, 1); 2616 return -1; 2617 } 2618 2619 /* 2620 * It is possible that an interrupt came in, 2621 * set the head up, then more interrupts came in 2622 * and moved it again. When we get back here, 2623 * the page would have been set to NORMAL but we 2624 * just set it back to HEAD. 2625 * 2626 * How do you detect this? Well, if that happened 2627 * the tail page would have moved. 2628 */ 2629 if (ret == RB_PAGE_NORMAL) { 2630 struct buffer_page *buffer_tail_page; 2631 2632 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); 2633 /* 2634 * If the tail had moved passed next, then we need 2635 * to reset the pointer. 2636 */ 2637 if (buffer_tail_page != tail_page && 2638 buffer_tail_page != next_page) 2639 rb_head_page_set_normal(cpu_buffer, new_head, 2640 next_page, 2641 RB_PAGE_HEAD); 2642 } 2643 2644 /* 2645 * If this was the outer most commit (the one that 2646 * changed the original pointer from HEAD to UPDATE), 2647 * then it is up to us to reset it to NORMAL. 2648 */ 2649 if (type == RB_PAGE_HEAD) { 2650 ret = rb_head_page_set_normal(cpu_buffer, next_page, 2651 tail_page, 2652 RB_PAGE_UPDATE); 2653 if (RB_WARN_ON(cpu_buffer, 2654 ret != RB_PAGE_UPDATE)) 2655 return -1; 2656 } 2657 2658 return 0; 2659 } 2660 2661 static inline void 2662 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, 2663 unsigned long tail, struct rb_event_info *info) 2664 { 2665 struct buffer_page *tail_page = info->tail_page; 2666 struct ring_buffer_event *event; 2667 unsigned long length = info->length; 2668 2669 /* 2670 * Only the event that crossed the page boundary 2671 * must fill the old tail_page with padding. 2672 */ 2673 if (tail >= BUF_PAGE_SIZE) { 2674 /* 2675 * If the page was filled, then we still need 2676 * to update the real_end. Reset it to zero 2677 * and the reader will ignore it. 2678 */ 2679 if (tail == BUF_PAGE_SIZE) 2680 tail_page->real_end = 0; 2681 2682 local_sub(length, &tail_page->write); 2683 return; 2684 } 2685 2686 event = __rb_page_index(tail_page, tail); 2687 2688 /* account for padding bytes */ 2689 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); 2690 2691 /* 2692 * Save the original length to the meta data. 2693 * This will be used by the reader to add lost event 2694 * counter. 2695 */ 2696 tail_page->real_end = tail; 2697 2698 /* 2699 * If this event is bigger than the minimum size, then 2700 * we need to be careful that we don't subtract the 2701 * write counter enough to allow another writer to slip 2702 * in on this page. 2703 * We put in a discarded commit instead, to make sure 2704 * that this space is not used again. 2705 * 2706 * If we are less than the minimum size, we don't need to 2707 * worry about it. 2708 */ 2709 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) { 2710 /* No room for any events */ 2711 2712 /* Mark the rest of the page with padding */ 2713 rb_event_set_padding(event); 2714 2715 /* Make sure the padding is visible before the write update */ 2716 smp_wmb(); 2717 2718 /* Set the write back to the previous setting */ 2719 local_sub(length, &tail_page->write); 2720 return; 2721 } 2722 2723 /* Put in a discarded event */ 2724 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; 2725 event->type_len = RINGBUF_TYPE_PADDING; 2726 /* time delta must be non zero */ 2727 event->time_delta = 1; 2728 2729 /* Make sure the padding is visible before the tail_page->write update */ 2730 smp_wmb(); 2731 2732 /* Set write to end of buffer */ 2733 length = (tail + length) - BUF_PAGE_SIZE; 2734 local_sub(length, &tail_page->write); 2735 } 2736 2737 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer); 2738 2739 /* 2740 * This is the slow path, force gcc not to inline it. 2741 */ 2742 static noinline struct ring_buffer_event * 2743 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, 2744 unsigned long tail, struct rb_event_info *info) 2745 { 2746 struct buffer_page *tail_page = info->tail_page; 2747 struct buffer_page *commit_page = cpu_buffer->commit_page; 2748 struct trace_buffer *buffer = cpu_buffer->buffer; 2749 struct buffer_page *next_page; 2750 int ret; 2751 2752 next_page = tail_page; 2753 2754 rb_inc_page(&next_page); 2755 2756 /* 2757 * If for some reason, we had an interrupt storm that made 2758 * it all the way around the buffer, bail, and warn 2759 * about it. 2760 */ 2761 if (unlikely(next_page == commit_page)) { 2762 local_inc(&cpu_buffer->commit_overrun); 2763 goto out_reset; 2764 } 2765 2766 /* 2767 * This is where the fun begins! 2768 * 2769 * We are fighting against races between a reader that 2770 * could be on another CPU trying to swap its reader 2771 * page with the buffer head. 2772 * 2773 * We are also fighting against interrupts coming in and 2774 * moving the head or tail on us as well. 2775 * 2776 * If the next page is the head page then we have filled 2777 * the buffer, unless the commit page is still on the 2778 * reader page. 2779 */ 2780 if (rb_is_head_page(next_page, &tail_page->list)) { 2781 2782 /* 2783 * If the commit is not on the reader page, then 2784 * move the header page. 2785 */ 2786 if (!rb_is_reader_page(cpu_buffer->commit_page)) { 2787 /* 2788 * If we are not in overwrite mode, 2789 * this is easy, just stop here. 2790 */ 2791 if (!(buffer->flags & RB_FL_OVERWRITE)) { 2792 local_inc(&cpu_buffer->dropped_events); 2793 goto out_reset; 2794 } 2795 2796 ret = rb_handle_head_page(cpu_buffer, 2797 tail_page, 2798 next_page); 2799 if (ret < 0) 2800 goto out_reset; 2801 if (ret) 2802 goto out_again; 2803 } else { 2804 /* 2805 * We need to be careful here too. The 2806 * commit page could still be on the reader 2807 * page. We could have a small buffer, and 2808 * have filled up the buffer with events 2809 * from interrupts and such, and wrapped. 2810 * 2811 * Note, if the tail page is also on the 2812 * reader_page, we let it move out. 2813 */ 2814 if (unlikely((cpu_buffer->commit_page != 2815 cpu_buffer->tail_page) && 2816 (cpu_buffer->commit_page == 2817 cpu_buffer->reader_page))) { 2818 local_inc(&cpu_buffer->commit_overrun); 2819 goto out_reset; 2820 } 2821 } 2822 } 2823 2824 rb_tail_page_update(cpu_buffer, tail_page, next_page); 2825 2826 out_again: 2827 2828 rb_reset_tail(cpu_buffer, tail, info); 2829 2830 /* Commit what we have for now. */ 2831 rb_end_commit(cpu_buffer); 2832 /* rb_end_commit() decs committing */ 2833 local_inc(&cpu_buffer->committing); 2834 2835 /* fail and let the caller try again */ 2836 return ERR_PTR(-EAGAIN); 2837 2838 out_reset: 2839 /* reset write */ 2840 rb_reset_tail(cpu_buffer, tail, info); 2841 2842 return NULL; 2843 } 2844 2845 /* Slow path */ 2846 static struct ring_buffer_event * 2847 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs) 2848 { 2849 if (abs) 2850 event->type_len = RINGBUF_TYPE_TIME_STAMP; 2851 else 2852 event->type_len = RINGBUF_TYPE_TIME_EXTEND; 2853 2854 /* Not the first event on the page, or not delta? */ 2855 if (abs || rb_event_index(event)) { 2856 event->time_delta = delta & TS_MASK; 2857 event->array[0] = delta >> TS_SHIFT; 2858 } else { 2859 /* nope, just zero it */ 2860 event->time_delta = 0; 2861 event->array[0] = 0; 2862 } 2863 2864 return skip_time_extend(event); 2865 } 2866 2867 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 2868 static inline bool sched_clock_stable(void) 2869 { 2870 return true; 2871 } 2872 #endif 2873 2874 static void 2875 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer, 2876 struct rb_event_info *info) 2877 { 2878 u64 write_stamp; 2879 2880 WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s", 2881 (unsigned long long)info->delta, 2882 (unsigned long long)info->ts, 2883 (unsigned long long)info->before, 2884 (unsigned long long)info->after, 2885 (unsigned long long)(rb_time_read(&cpu_buffer->write_stamp, &write_stamp) ? write_stamp : 0), 2886 sched_clock_stable() ? "" : 2887 "If you just came from a suspend/resume,\n" 2888 "please switch to the trace global clock:\n" 2889 " echo global > /sys/kernel/debug/tracing/trace_clock\n" 2890 "or add trace_clock=global to the kernel command line\n"); 2891 } 2892 2893 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer, 2894 struct ring_buffer_event **event, 2895 struct rb_event_info *info, 2896 u64 *delta, 2897 unsigned int *length) 2898 { 2899 bool abs = info->add_timestamp & 2900 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE); 2901 2902 if (unlikely(info->delta > (1ULL << 59))) { 2903 /* 2904 * Some timers can use more than 59 bits, and when a timestamp 2905 * is added to the buffer, it will lose those bits. 2906 */ 2907 if (abs && (info->ts & TS_MSB)) { 2908 info->delta &= ABS_TS_MASK; 2909 2910 /* did the clock go backwards */ 2911 } else if (info->before == info->after && info->before > info->ts) { 2912 /* not interrupted */ 2913 static int once; 2914 2915 /* 2916 * This is possible with a recalibrating of the TSC. 2917 * Do not produce a call stack, but just report it. 2918 */ 2919 if (!once) { 2920 once++; 2921 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n", 2922 info->before, info->ts); 2923 } 2924 } else 2925 rb_check_timestamp(cpu_buffer, info); 2926 if (!abs) 2927 info->delta = 0; 2928 } 2929 *event = rb_add_time_stamp(*event, info->delta, abs); 2930 *length -= RB_LEN_TIME_EXTEND; 2931 *delta = 0; 2932 } 2933 2934 /** 2935 * rb_update_event - update event type and data 2936 * @cpu_buffer: The per cpu buffer of the @event 2937 * @event: the event to update 2938 * @info: The info to update the @event with (contains length and delta) 2939 * 2940 * Update the type and data fields of the @event. The length 2941 * is the actual size that is written to the ring buffer, 2942 * and with this, we can determine what to place into the 2943 * data field. 2944 */ 2945 static void 2946 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, 2947 struct ring_buffer_event *event, 2948 struct rb_event_info *info) 2949 { 2950 unsigned length = info->length; 2951 u64 delta = info->delta; 2952 unsigned int nest = local_read(&cpu_buffer->committing) - 1; 2953 2954 if (!WARN_ON_ONCE(nest >= MAX_NEST)) 2955 cpu_buffer->event_stamp[nest] = info->ts; 2956 2957 /* 2958 * If we need to add a timestamp, then we 2959 * add it to the start of the reserved space. 2960 */ 2961 if (unlikely(info->add_timestamp)) 2962 rb_add_timestamp(cpu_buffer, &event, info, &delta, &length); 2963 2964 event->time_delta = delta; 2965 length -= RB_EVNT_HDR_SIZE; 2966 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) { 2967 event->type_len = 0; 2968 event->array[0] = length; 2969 } else 2970 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); 2971 } 2972 2973 static unsigned rb_calculate_event_length(unsigned length) 2974 { 2975 struct ring_buffer_event event; /* Used only for sizeof array */ 2976 2977 /* zero length can cause confusions */ 2978 if (!length) 2979 length++; 2980 2981 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) 2982 length += sizeof(event.array[0]); 2983 2984 length += RB_EVNT_HDR_SIZE; 2985 length = ALIGN(length, RB_ARCH_ALIGNMENT); 2986 2987 /* 2988 * In case the time delta is larger than the 27 bits for it 2989 * in the header, we need to add a timestamp. If another 2990 * event comes in when trying to discard this one to increase 2991 * the length, then the timestamp will be added in the allocated 2992 * space of this event. If length is bigger than the size needed 2993 * for the TIME_EXTEND, then padding has to be used. The events 2994 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal 2995 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding. 2996 * As length is a multiple of 4, we only need to worry if it 2997 * is 12 (RB_LEN_TIME_EXTEND + 4). 2998 */ 2999 if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT) 3000 length += RB_ALIGNMENT; 3001 3002 return length; 3003 } 3004 3005 static u64 rb_time_delta(struct ring_buffer_event *event) 3006 { 3007 switch (event->type_len) { 3008 case RINGBUF_TYPE_PADDING: 3009 return 0; 3010 3011 case RINGBUF_TYPE_TIME_EXTEND: 3012 return rb_event_time_stamp(event); 3013 3014 case RINGBUF_TYPE_TIME_STAMP: 3015 return 0; 3016 3017 case RINGBUF_TYPE_DATA: 3018 return event->time_delta; 3019 default: 3020 return 0; 3021 } 3022 } 3023 3024 static inline int 3025 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, 3026 struct ring_buffer_event *event) 3027 { 3028 unsigned long new_index, old_index; 3029 struct buffer_page *bpage; 3030 unsigned long index; 3031 unsigned long addr; 3032 u64 write_stamp; 3033 u64 delta; 3034 3035 new_index = rb_event_index(event); 3036 old_index = new_index + rb_event_ts_length(event); 3037 addr = (unsigned long)event; 3038 addr &= PAGE_MASK; 3039 3040 bpage = READ_ONCE(cpu_buffer->tail_page); 3041 3042 delta = rb_time_delta(event); 3043 3044 if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp)) 3045 return 0; 3046 3047 /* Make sure the write stamp is read before testing the location */ 3048 barrier(); 3049 3050 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { 3051 unsigned long write_mask = 3052 local_read(&bpage->write) & ~RB_WRITE_MASK; 3053 unsigned long event_length = rb_event_length(event); 3054 3055 /* Something came in, can't discard */ 3056 if (!rb_time_cmpxchg(&cpu_buffer->write_stamp, 3057 write_stamp, write_stamp - delta)) 3058 return 0; 3059 3060 /* 3061 * It's possible that the event time delta is zero 3062 * (has the same time stamp as the previous event) 3063 * in which case write_stamp and before_stamp could 3064 * be the same. In such a case, force before_stamp 3065 * to be different than write_stamp. It doesn't 3066 * matter what it is, as long as its different. 3067 */ 3068 if (!delta) 3069 rb_time_set(&cpu_buffer->before_stamp, 0); 3070 3071 /* 3072 * If an event were to come in now, it would see that the 3073 * write_stamp and the before_stamp are different, and assume 3074 * that this event just added itself before updating 3075 * the write stamp. The interrupting event will fix the 3076 * write stamp for us, and use the before stamp as its delta. 3077 */ 3078 3079 /* 3080 * This is on the tail page. It is possible that 3081 * a write could come in and move the tail page 3082 * and write to the next page. That is fine 3083 * because we just shorten what is on this page. 3084 */ 3085 old_index += write_mask; 3086 new_index += write_mask; 3087 index = local_cmpxchg(&bpage->write, old_index, new_index); 3088 if (index == old_index) { 3089 /* update counters */ 3090 local_sub(event_length, &cpu_buffer->entries_bytes); 3091 return 1; 3092 } 3093 } 3094 3095 /* could not discard */ 3096 return 0; 3097 } 3098 3099 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) 3100 { 3101 local_inc(&cpu_buffer->committing); 3102 local_inc(&cpu_buffer->commits); 3103 } 3104 3105 static __always_inline void 3106 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) 3107 { 3108 unsigned long max_count; 3109 3110 /* 3111 * We only race with interrupts and NMIs on this CPU. 3112 * If we own the commit event, then we can commit 3113 * all others that interrupted us, since the interruptions 3114 * are in stack format (they finish before they come 3115 * back to us). This allows us to do a simple loop to 3116 * assign the commit to the tail. 3117 */ 3118 again: 3119 max_count = cpu_buffer->nr_pages * 100; 3120 3121 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { 3122 if (RB_WARN_ON(cpu_buffer, !(--max_count))) 3123 return; 3124 if (RB_WARN_ON(cpu_buffer, 3125 rb_is_reader_page(cpu_buffer->tail_page))) 3126 return; 3127 local_set(&cpu_buffer->commit_page->page->commit, 3128 rb_page_write(cpu_buffer->commit_page)); 3129 rb_inc_page(&cpu_buffer->commit_page); 3130 /* add barrier to keep gcc from optimizing too much */ 3131 barrier(); 3132 } 3133 while (rb_commit_index(cpu_buffer) != 3134 rb_page_write(cpu_buffer->commit_page)) { 3135 3136 local_set(&cpu_buffer->commit_page->page->commit, 3137 rb_page_write(cpu_buffer->commit_page)); 3138 RB_WARN_ON(cpu_buffer, 3139 local_read(&cpu_buffer->commit_page->page->commit) & 3140 ~RB_WRITE_MASK); 3141 barrier(); 3142 } 3143 3144 /* again, keep gcc from optimizing */ 3145 barrier(); 3146 3147 /* 3148 * If an interrupt came in just after the first while loop 3149 * and pushed the tail page forward, we will be left with 3150 * a dangling commit that will never go forward. 3151 */ 3152 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) 3153 goto again; 3154 } 3155 3156 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) 3157 { 3158 unsigned long commits; 3159 3160 if (RB_WARN_ON(cpu_buffer, 3161 !local_read(&cpu_buffer->committing))) 3162 return; 3163 3164 again: 3165 commits = local_read(&cpu_buffer->commits); 3166 /* synchronize with interrupts */ 3167 barrier(); 3168 if (local_read(&cpu_buffer->committing) == 1) 3169 rb_set_commit_to_write(cpu_buffer); 3170 3171 local_dec(&cpu_buffer->committing); 3172 3173 /* synchronize with interrupts */ 3174 barrier(); 3175 3176 /* 3177 * Need to account for interrupts coming in between the 3178 * updating of the commit page and the clearing of the 3179 * committing counter. 3180 */ 3181 if (unlikely(local_read(&cpu_buffer->commits) != commits) && 3182 !local_read(&cpu_buffer->committing)) { 3183 local_inc(&cpu_buffer->committing); 3184 goto again; 3185 } 3186 } 3187 3188 static inline void rb_event_discard(struct ring_buffer_event *event) 3189 { 3190 if (extended_time(event)) 3191 event = skip_time_extend(event); 3192 3193 /* array[0] holds the actual length for the discarded event */ 3194 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; 3195 event->type_len = RINGBUF_TYPE_PADDING; 3196 /* time delta must be non zero */ 3197 if (!event->time_delta) 3198 event->time_delta = 1; 3199 } 3200 3201 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer) 3202 { 3203 local_inc(&cpu_buffer->entries); 3204 rb_end_commit(cpu_buffer); 3205 } 3206 3207 static __always_inline void 3208 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) 3209 { 3210 if (buffer->irq_work.waiters_pending) { 3211 buffer->irq_work.waiters_pending = false; 3212 /* irq_work_queue() supplies it's own memory barriers */ 3213 irq_work_queue(&buffer->irq_work.work); 3214 } 3215 3216 if (cpu_buffer->irq_work.waiters_pending) { 3217 cpu_buffer->irq_work.waiters_pending = false; 3218 /* irq_work_queue() supplies it's own memory barriers */ 3219 irq_work_queue(&cpu_buffer->irq_work.work); 3220 } 3221 3222 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched)) 3223 return; 3224 3225 if (cpu_buffer->reader_page == cpu_buffer->commit_page) 3226 return; 3227 3228 if (!cpu_buffer->irq_work.full_waiters_pending) 3229 return; 3230 3231 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); 3232 3233 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) 3234 return; 3235 3236 cpu_buffer->irq_work.wakeup_full = true; 3237 cpu_buffer->irq_work.full_waiters_pending = false; 3238 /* irq_work_queue() supplies it's own memory barriers */ 3239 irq_work_queue(&cpu_buffer->irq_work.work); 3240 } 3241 3242 #ifdef CONFIG_RING_BUFFER_RECORD_RECURSION 3243 # define do_ring_buffer_record_recursion() \ 3244 do_ftrace_record_recursion(_THIS_IP_, _RET_IP_) 3245 #else 3246 # define do_ring_buffer_record_recursion() do { } while (0) 3247 #endif 3248 3249 /* 3250 * The lock and unlock are done within a preempt disable section. 3251 * The current_context per_cpu variable can only be modified 3252 * by the current task between lock and unlock. But it can 3253 * be modified more than once via an interrupt. To pass this 3254 * information from the lock to the unlock without having to 3255 * access the 'in_interrupt()' functions again (which do show 3256 * a bit of overhead in something as critical as function tracing, 3257 * we use a bitmask trick. 3258 * 3259 * bit 1 = NMI context 3260 * bit 2 = IRQ context 3261 * bit 3 = SoftIRQ context 3262 * bit 4 = normal context. 3263 * 3264 * This works because this is the order of contexts that can 3265 * preempt other contexts. A SoftIRQ never preempts an IRQ 3266 * context. 3267 * 3268 * When the context is determined, the corresponding bit is 3269 * checked and set (if it was set, then a recursion of that context 3270 * happened). 3271 * 3272 * On unlock, we need to clear this bit. To do so, just subtract 3273 * 1 from the current_context and AND it to itself. 3274 * 3275 * (binary) 3276 * 101 - 1 = 100 3277 * 101 & 100 = 100 (clearing bit zero) 3278 * 3279 * 1010 - 1 = 1001 3280 * 1010 & 1001 = 1000 (clearing bit 1) 3281 * 3282 * The least significant bit can be cleared this way, and it 3283 * just so happens that it is the same bit corresponding to 3284 * the current context. 3285 * 3286 * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit 3287 * is set when a recursion is detected at the current context, and if 3288 * the TRANSITION bit is already set, it will fail the recursion. 3289 * This is needed because there's a lag between the changing of 3290 * interrupt context and updating the preempt count. In this case, 3291 * a false positive will be found. To handle this, one extra recursion 3292 * is allowed, and this is done by the TRANSITION bit. If the TRANSITION 3293 * bit is already set, then it is considered a recursion and the function 3294 * ends. Otherwise, the TRANSITION bit is set, and that bit is returned. 3295 * 3296 * On the trace_recursive_unlock(), the TRANSITION bit will be the first 3297 * to be cleared. Even if it wasn't the context that set it. That is, 3298 * if an interrupt comes in while NORMAL bit is set and the ring buffer 3299 * is called before preempt_count() is updated, since the check will 3300 * be on the NORMAL bit, the TRANSITION bit will then be set. If an 3301 * NMI then comes in, it will set the NMI bit, but when the NMI code 3302 * does the trace_recursive_unlock() it will clear the TRANSITION bit 3303 * and leave the NMI bit set. But this is fine, because the interrupt 3304 * code that set the TRANSITION bit will then clear the NMI bit when it 3305 * calls trace_recursive_unlock(). If another NMI comes in, it will 3306 * set the TRANSITION bit and continue. 3307 * 3308 * Note: The TRANSITION bit only handles a single transition between context. 3309 */ 3310 3311 static __always_inline int 3312 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) 3313 { 3314 unsigned int val = cpu_buffer->current_context; 3315 int bit = interrupt_context_level(); 3316 3317 bit = RB_CTX_NORMAL - bit; 3318 3319 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { 3320 /* 3321 * It is possible that this was called by transitioning 3322 * between interrupt context, and preempt_count() has not 3323 * been updated yet. In this case, use the TRANSITION bit. 3324 */ 3325 bit = RB_CTX_TRANSITION; 3326 if (val & (1 << (bit + cpu_buffer->nest))) { 3327 do_ring_buffer_record_recursion(); 3328 return 1; 3329 } 3330 } 3331 3332 val |= (1 << (bit + cpu_buffer->nest)); 3333 cpu_buffer->current_context = val; 3334 3335 return 0; 3336 } 3337 3338 static __always_inline void 3339 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) 3340 { 3341 cpu_buffer->current_context &= 3342 cpu_buffer->current_context - (1 << cpu_buffer->nest); 3343 } 3344 3345 /* The recursive locking above uses 5 bits */ 3346 #define NESTED_BITS 5 3347 3348 /** 3349 * ring_buffer_nest_start - Allow to trace while nested 3350 * @buffer: The ring buffer to modify 3351 * 3352 * The ring buffer has a safety mechanism to prevent recursion. 3353 * But there may be a case where a trace needs to be done while 3354 * tracing something else. In this case, calling this function 3355 * will allow this function to nest within a currently active 3356 * ring_buffer_lock_reserve(). 3357 * 3358 * Call this function before calling another ring_buffer_lock_reserve() and 3359 * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit(). 3360 */ 3361 void ring_buffer_nest_start(struct trace_buffer *buffer) 3362 { 3363 struct ring_buffer_per_cpu *cpu_buffer; 3364 int cpu; 3365 3366 /* Enabled by ring_buffer_nest_end() */ 3367 preempt_disable_notrace(); 3368 cpu = raw_smp_processor_id(); 3369 cpu_buffer = buffer->buffers[cpu]; 3370 /* This is the shift value for the above recursive locking */ 3371 cpu_buffer->nest += NESTED_BITS; 3372 } 3373 3374 /** 3375 * ring_buffer_nest_end - Allow to trace while nested 3376 * @buffer: The ring buffer to modify 3377 * 3378 * Must be called after ring_buffer_nest_start() and after the 3379 * ring_buffer_unlock_commit(). 3380 */ 3381 void ring_buffer_nest_end(struct trace_buffer *buffer) 3382 { 3383 struct ring_buffer_per_cpu *cpu_buffer; 3384 int cpu; 3385 3386 /* disabled by ring_buffer_nest_start() */ 3387 cpu = raw_smp_processor_id(); 3388 cpu_buffer = buffer->buffers[cpu]; 3389 /* This is the shift value for the above recursive locking */ 3390 cpu_buffer->nest -= NESTED_BITS; 3391 preempt_enable_notrace(); 3392 } 3393 3394 /** 3395 * ring_buffer_unlock_commit - commit a reserved 3396 * @buffer: The buffer to commit to 3397 * @event: The event pointer to commit. 3398 * 3399 * This commits the data to the ring buffer, and releases any locks held. 3400 * 3401 * Must be paired with ring_buffer_lock_reserve. 3402 */ 3403 int ring_buffer_unlock_commit(struct trace_buffer *buffer) 3404 { 3405 struct ring_buffer_per_cpu *cpu_buffer; 3406 int cpu = raw_smp_processor_id(); 3407 3408 cpu_buffer = buffer->buffers[cpu]; 3409 3410 rb_commit(cpu_buffer); 3411 3412 rb_wakeups(buffer, cpu_buffer); 3413 3414 trace_recursive_unlock(cpu_buffer); 3415 3416 preempt_enable_notrace(); 3417 3418 return 0; 3419 } 3420 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); 3421 3422 /* Special value to validate all deltas on a page. */ 3423 #define CHECK_FULL_PAGE 1L 3424 3425 #ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS 3426 static void dump_buffer_page(struct buffer_data_page *bpage, 3427 struct rb_event_info *info, 3428 unsigned long tail) 3429 { 3430 struct ring_buffer_event *event; 3431 u64 ts, delta; 3432 int e; 3433 3434 ts = bpage->time_stamp; 3435 pr_warn(" [%lld] PAGE TIME STAMP\n", ts); 3436 3437 for (e = 0; e < tail; e += rb_event_length(event)) { 3438 3439 event = (struct ring_buffer_event *)(bpage->data + e); 3440 3441 switch (event->type_len) { 3442 3443 case RINGBUF_TYPE_TIME_EXTEND: 3444 delta = rb_event_time_stamp(event); 3445 ts += delta; 3446 pr_warn(" [%lld] delta:%lld TIME EXTEND\n", ts, delta); 3447 break; 3448 3449 case RINGBUF_TYPE_TIME_STAMP: 3450 delta = rb_event_time_stamp(event); 3451 ts = rb_fix_abs_ts(delta, ts); 3452 pr_warn(" [%lld] absolute:%lld TIME STAMP\n", ts, delta); 3453 break; 3454 3455 case RINGBUF_TYPE_PADDING: 3456 ts += event->time_delta; 3457 pr_warn(" [%lld] delta:%d PADDING\n", ts, event->time_delta); 3458 break; 3459 3460 case RINGBUF_TYPE_DATA: 3461 ts += event->time_delta; 3462 pr_warn(" [%lld] delta:%d\n", ts, event->time_delta); 3463 break; 3464 3465 default: 3466 break; 3467 } 3468 } 3469 } 3470 3471 static DEFINE_PER_CPU(atomic_t, checking); 3472 static atomic_t ts_dump; 3473 3474 /* 3475 * Check if the current event time stamp matches the deltas on 3476 * the buffer page. 3477 */ 3478 static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, 3479 struct rb_event_info *info, 3480 unsigned long tail) 3481 { 3482 struct ring_buffer_event *event; 3483 struct buffer_data_page *bpage; 3484 u64 ts, delta; 3485 bool full = false; 3486 int e; 3487 3488 bpage = info->tail_page->page; 3489 3490 if (tail == CHECK_FULL_PAGE) { 3491 full = true; 3492 tail = local_read(&bpage->commit); 3493 } else if (info->add_timestamp & 3494 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) { 3495 /* Ignore events with absolute time stamps */ 3496 return; 3497 } 3498 3499 /* 3500 * Do not check the first event (skip possible extends too). 3501 * Also do not check if previous events have not been committed. 3502 */ 3503 if (tail <= 8 || tail > local_read(&bpage->commit)) 3504 return; 3505 3506 /* 3507 * If this interrupted another event, 3508 */ 3509 if (atomic_inc_return(this_cpu_ptr(&checking)) != 1) 3510 goto out; 3511 3512 ts = bpage->time_stamp; 3513 3514 for (e = 0; e < tail; e += rb_event_length(event)) { 3515 3516 event = (struct ring_buffer_event *)(bpage->data + e); 3517 3518 switch (event->type_len) { 3519 3520 case RINGBUF_TYPE_TIME_EXTEND: 3521 delta = rb_event_time_stamp(event); 3522 ts += delta; 3523 break; 3524 3525 case RINGBUF_TYPE_TIME_STAMP: 3526 delta = rb_event_time_stamp(event); 3527 ts = rb_fix_abs_ts(delta, ts); 3528 break; 3529 3530 case RINGBUF_TYPE_PADDING: 3531 if (event->time_delta == 1) 3532 break; 3533 fallthrough; 3534 case RINGBUF_TYPE_DATA: 3535 ts += event->time_delta; 3536 break; 3537 3538 default: 3539 RB_WARN_ON(cpu_buffer, 1); 3540 } 3541 } 3542 if ((full && ts > info->ts) || 3543 (!full && ts + info->delta != info->ts)) { 3544 /* If another report is happening, ignore this one */ 3545 if (atomic_inc_return(&ts_dump) != 1) { 3546 atomic_dec(&ts_dump); 3547 goto out; 3548 } 3549 atomic_inc(&cpu_buffer->record_disabled); 3550 /* There's some cases in boot up that this can happen */ 3551 WARN_ON_ONCE(system_state != SYSTEM_BOOTING); 3552 pr_warn("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s\n", 3553 cpu_buffer->cpu, 3554 ts + info->delta, info->ts, info->delta, 3555 info->before, info->after, 3556 full ? " (full)" : ""); 3557 dump_buffer_page(bpage, info, tail); 3558 atomic_dec(&ts_dump); 3559 /* Do not re-enable checking */ 3560 return; 3561 } 3562 out: 3563 atomic_dec(this_cpu_ptr(&checking)); 3564 } 3565 #else 3566 static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, 3567 struct rb_event_info *info, 3568 unsigned long tail) 3569 { 3570 } 3571 #endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */ 3572 3573 static struct ring_buffer_event * 3574 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 3575 struct rb_event_info *info) 3576 { 3577 struct ring_buffer_event *event; 3578 struct buffer_page *tail_page; 3579 unsigned long tail, write, w; 3580 bool a_ok; 3581 bool b_ok; 3582 3583 /* Don't let the compiler play games with cpu_buffer->tail_page */ 3584 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); 3585 3586 /*A*/ w = local_read(&tail_page->write) & RB_WRITE_MASK; 3587 barrier(); 3588 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); 3589 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3590 barrier(); 3591 info->ts = rb_time_stamp(cpu_buffer->buffer); 3592 3593 if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) { 3594 info->delta = info->ts; 3595 } else { 3596 /* 3597 * If interrupting an event time update, we may need an 3598 * absolute timestamp. 3599 * Don't bother if this is the start of a new page (w == 0). 3600 */ 3601 if (unlikely(!a_ok || !b_ok || (info->before != info->after && w))) { 3602 info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND; 3603 info->length += RB_LEN_TIME_EXTEND; 3604 } else { 3605 info->delta = info->ts - info->after; 3606 if (unlikely(test_time_stamp(info->delta))) { 3607 info->add_timestamp |= RB_ADD_STAMP_EXTEND; 3608 info->length += RB_LEN_TIME_EXTEND; 3609 } 3610 } 3611 } 3612 3613 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts); 3614 3615 /*C*/ write = local_add_return(info->length, &tail_page->write); 3616 3617 /* set write to only the index of the write */ 3618 write &= RB_WRITE_MASK; 3619 3620 tail = write - info->length; 3621 3622 /* See if we shot pass the end of this buffer page */ 3623 if (unlikely(write > BUF_PAGE_SIZE)) { 3624 /* before and after may now different, fix it up*/ 3625 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); 3626 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3627 if (a_ok && b_ok && info->before != info->after) 3628 (void)rb_time_cmpxchg(&cpu_buffer->before_stamp, 3629 info->before, info->after); 3630 if (a_ok && b_ok) 3631 check_buffer(cpu_buffer, info, CHECK_FULL_PAGE); 3632 return rb_move_tail(cpu_buffer, tail, info); 3633 } 3634 3635 if (likely(tail == w)) { 3636 u64 save_before; 3637 bool s_ok; 3638 3639 /* Nothing interrupted us between A and C */ 3640 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts); 3641 barrier(); 3642 /*E*/ s_ok = rb_time_read(&cpu_buffer->before_stamp, &save_before); 3643 RB_WARN_ON(cpu_buffer, !s_ok); 3644 if (likely(!(info->add_timestamp & 3645 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)))) 3646 /* This did not interrupt any time update */ 3647 info->delta = info->ts - info->after; 3648 else 3649 /* Just use full timestamp for interrupting event */ 3650 info->delta = info->ts; 3651 barrier(); 3652 check_buffer(cpu_buffer, info, tail); 3653 if (unlikely(info->ts != save_before)) { 3654 /* SLOW PATH - Interrupted between C and E */ 3655 3656 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3657 RB_WARN_ON(cpu_buffer, !a_ok); 3658 3659 /* Write stamp must only go forward */ 3660 if (save_before > info->after) { 3661 /* 3662 * We do not care about the result, only that 3663 * it gets updated atomically. 3664 */ 3665 (void)rb_time_cmpxchg(&cpu_buffer->write_stamp, 3666 info->after, save_before); 3667 } 3668 } 3669 } else { 3670 u64 ts; 3671 /* SLOW PATH - Interrupted between A and C */ 3672 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3673 /* Was interrupted before here, write_stamp must be valid */ 3674 RB_WARN_ON(cpu_buffer, !a_ok); 3675 ts = rb_time_stamp(cpu_buffer->buffer); 3676 barrier(); 3677 /*E*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) && 3678 info->after < ts && 3679 rb_time_cmpxchg(&cpu_buffer->write_stamp, 3680 info->after, ts)) { 3681 /* Nothing came after this event between C and E */ 3682 info->delta = ts - info->after; 3683 } else { 3684 /* 3685 * Interrupted between C and E: 3686 * Lost the previous events time stamp. Just set the 3687 * delta to zero, and this will be the same time as 3688 * the event this event interrupted. And the events that 3689 * came after this will still be correct (as they would 3690 * have built their delta on the previous event. 3691 */ 3692 info->delta = 0; 3693 } 3694 info->ts = ts; 3695 info->add_timestamp &= ~RB_ADD_STAMP_FORCE; 3696 } 3697 3698 /* 3699 * If this is the first commit on the page, then it has the same 3700 * timestamp as the page itself. 3701 */ 3702 if (unlikely(!tail && !(info->add_timestamp & 3703 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)))) 3704 info->delta = 0; 3705 3706 /* We reserved something on the buffer */ 3707 3708 event = __rb_page_index(tail_page, tail); 3709 rb_update_event(cpu_buffer, event, info); 3710 3711 local_inc(&tail_page->entries); 3712 3713 /* 3714 * If this is the first commit on the page, then update 3715 * its timestamp. 3716 */ 3717 if (unlikely(!tail)) 3718 tail_page->page->time_stamp = info->ts; 3719 3720 /* account for these added bytes */ 3721 local_add(info->length, &cpu_buffer->entries_bytes); 3722 3723 return event; 3724 } 3725 3726 static __always_inline struct ring_buffer_event * 3727 rb_reserve_next_event(struct trace_buffer *buffer, 3728 struct ring_buffer_per_cpu *cpu_buffer, 3729 unsigned long length) 3730 { 3731 struct ring_buffer_event *event; 3732 struct rb_event_info info; 3733 int nr_loops = 0; 3734 int add_ts_default; 3735 3736 rb_start_commit(cpu_buffer); 3737 /* The commit page can not change after this */ 3738 3739 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 3740 /* 3741 * Due to the ability to swap a cpu buffer from a buffer 3742 * it is possible it was swapped before we committed. 3743 * (committing stops a swap). We check for it here and 3744 * if it happened, we have to fail the write. 3745 */ 3746 barrier(); 3747 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { 3748 local_dec(&cpu_buffer->committing); 3749 local_dec(&cpu_buffer->commits); 3750 return NULL; 3751 } 3752 #endif 3753 3754 info.length = rb_calculate_event_length(length); 3755 3756 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { 3757 add_ts_default = RB_ADD_STAMP_ABSOLUTE; 3758 info.length += RB_LEN_TIME_EXTEND; 3759 } else { 3760 add_ts_default = RB_ADD_STAMP_NONE; 3761 } 3762 3763 again: 3764 info.add_timestamp = add_ts_default; 3765 info.delta = 0; 3766 3767 /* 3768 * We allow for interrupts to reenter here and do a trace. 3769 * If one does, it will cause this original code to loop 3770 * back here. Even with heavy interrupts happening, this 3771 * should only happen a few times in a row. If this happens 3772 * 1000 times in a row, there must be either an interrupt 3773 * storm or we have something buggy. 3774 * Bail! 3775 */ 3776 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) 3777 goto out_fail; 3778 3779 event = __rb_reserve_next(cpu_buffer, &info); 3780 3781 if (unlikely(PTR_ERR(event) == -EAGAIN)) { 3782 if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND)) 3783 info.length -= RB_LEN_TIME_EXTEND; 3784 goto again; 3785 } 3786 3787 if (likely(event)) 3788 return event; 3789 out_fail: 3790 rb_end_commit(cpu_buffer); 3791 return NULL; 3792 } 3793 3794 /** 3795 * ring_buffer_lock_reserve - reserve a part of the buffer 3796 * @buffer: the ring buffer to reserve from 3797 * @length: the length of the data to reserve (excluding event header) 3798 * 3799 * Returns a reserved event on the ring buffer to copy directly to. 3800 * The user of this interface will need to get the body to write into 3801 * and can use the ring_buffer_event_data() interface. 3802 * 3803 * The length is the length of the data needed, not the event length 3804 * which also includes the event header. 3805 * 3806 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned. 3807 * If NULL is returned, then nothing has been allocated or locked. 3808 */ 3809 struct ring_buffer_event * 3810 ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length) 3811 { 3812 struct ring_buffer_per_cpu *cpu_buffer; 3813 struct ring_buffer_event *event; 3814 int cpu; 3815 3816 /* If we are tracing schedule, we don't want to recurse */ 3817 preempt_disable_notrace(); 3818 3819 if (unlikely(atomic_read(&buffer->record_disabled))) 3820 goto out; 3821 3822 cpu = raw_smp_processor_id(); 3823 3824 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) 3825 goto out; 3826 3827 cpu_buffer = buffer->buffers[cpu]; 3828 3829 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) 3830 goto out; 3831 3832 if (unlikely(length > BUF_MAX_DATA_SIZE)) 3833 goto out; 3834 3835 if (unlikely(trace_recursive_lock(cpu_buffer))) 3836 goto out; 3837 3838 event = rb_reserve_next_event(buffer, cpu_buffer, length); 3839 if (!event) 3840 goto out_unlock; 3841 3842 return event; 3843 3844 out_unlock: 3845 trace_recursive_unlock(cpu_buffer); 3846 out: 3847 preempt_enable_notrace(); 3848 return NULL; 3849 } 3850 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); 3851 3852 /* 3853 * Decrement the entries to the page that an event is on. 3854 * The event does not even need to exist, only the pointer 3855 * to the page it is on. This may only be called before the commit 3856 * takes place. 3857 */ 3858 static inline void 3859 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, 3860 struct ring_buffer_event *event) 3861 { 3862 unsigned long addr = (unsigned long)event; 3863 struct buffer_page *bpage = cpu_buffer->commit_page; 3864 struct buffer_page *start; 3865 3866 addr &= PAGE_MASK; 3867 3868 /* Do the likely case first */ 3869 if (likely(bpage->page == (void *)addr)) { 3870 local_dec(&bpage->entries); 3871 return; 3872 } 3873 3874 /* 3875 * Because the commit page may be on the reader page we 3876 * start with the next page and check the end loop there. 3877 */ 3878 rb_inc_page(&bpage); 3879 start = bpage; 3880 do { 3881 if (bpage->page == (void *)addr) { 3882 local_dec(&bpage->entries); 3883 return; 3884 } 3885 rb_inc_page(&bpage); 3886 } while (bpage != start); 3887 3888 /* commit not part of this buffer?? */ 3889 RB_WARN_ON(cpu_buffer, 1); 3890 } 3891 3892 /** 3893 * ring_buffer_discard_commit - discard an event that has not been committed 3894 * @buffer: the ring buffer 3895 * @event: non committed event to discard 3896 * 3897 * Sometimes an event that is in the ring buffer needs to be ignored. 3898 * This function lets the user discard an event in the ring buffer 3899 * and then that event will not be read later. 3900 * 3901 * This function only works if it is called before the item has been 3902 * committed. It will try to free the event from the ring buffer 3903 * if another event has not been added behind it. 3904 * 3905 * If another event has been added behind it, it will set the event 3906 * up as discarded, and perform the commit. 3907 * 3908 * If this function is called, do not call ring_buffer_unlock_commit on 3909 * the event. 3910 */ 3911 void ring_buffer_discard_commit(struct trace_buffer *buffer, 3912 struct ring_buffer_event *event) 3913 { 3914 struct ring_buffer_per_cpu *cpu_buffer; 3915 int cpu; 3916 3917 /* The event is discarded regardless */ 3918 rb_event_discard(event); 3919 3920 cpu = smp_processor_id(); 3921 cpu_buffer = buffer->buffers[cpu]; 3922 3923 /* 3924 * This must only be called if the event has not been 3925 * committed yet. Thus we can assume that preemption 3926 * is still disabled. 3927 */ 3928 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); 3929 3930 rb_decrement_entry(cpu_buffer, event); 3931 if (rb_try_to_discard(cpu_buffer, event)) 3932 goto out; 3933 3934 out: 3935 rb_end_commit(cpu_buffer); 3936 3937 trace_recursive_unlock(cpu_buffer); 3938 3939 preempt_enable_notrace(); 3940 3941 } 3942 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); 3943 3944 /** 3945 * ring_buffer_write - write data to the buffer without reserving 3946 * @buffer: The ring buffer to write to. 3947 * @length: The length of the data being written (excluding the event header) 3948 * @data: The data to write to the buffer. 3949 * 3950 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as 3951 * one function. If you already have the data to write to the buffer, it 3952 * may be easier to simply call this function. 3953 * 3954 * Note, like ring_buffer_lock_reserve, the length is the length of the data 3955 * and not the length of the event which would hold the header. 3956 */ 3957 int ring_buffer_write(struct trace_buffer *buffer, 3958 unsigned long length, 3959 void *data) 3960 { 3961 struct ring_buffer_per_cpu *cpu_buffer; 3962 struct ring_buffer_event *event; 3963 void *body; 3964 int ret = -EBUSY; 3965 int cpu; 3966 3967 preempt_disable_notrace(); 3968 3969 if (atomic_read(&buffer->record_disabled)) 3970 goto out; 3971 3972 cpu = raw_smp_processor_id(); 3973 3974 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3975 goto out; 3976 3977 cpu_buffer = buffer->buffers[cpu]; 3978 3979 if (atomic_read(&cpu_buffer->record_disabled)) 3980 goto out; 3981 3982 if (length > BUF_MAX_DATA_SIZE) 3983 goto out; 3984 3985 if (unlikely(trace_recursive_lock(cpu_buffer))) 3986 goto out; 3987 3988 event = rb_reserve_next_event(buffer, cpu_buffer, length); 3989 if (!event) 3990 goto out_unlock; 3991 3992 body = rb_event_data(event); 3993 3994 memcpy(body, data, length); 3995 3996 rb_commit(cpu_buffer); 3997 3998 rb_wakeups(buffer, cpu_buffer); 3999 4000 ret = 0; 4001 4002 out_unlock: 4003 trace_recursive_unlock(cpu_buffer); 4004 4005 out: 4006 preempt_enable_notrace(); 4007 4008 return ret; 4009 } 4010 EXPORT_SYMBOL_GPL(ring_buffer_write); 4011 4012 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 4013 { 4014 struct buffer_page *reader = cpu_buffer->reader_page; 4015 struct buffer_page *head = rb_set_head_page(cpu_buffer); 4016 struct buffer_page *commit = cpu_buffer->commit_page; 4017 4018 /* In case of error, head will be NULL */ 4019 if (unlikely(!head)) 4020 return true; 4021 4022 /* Reader should exhaust content in reader page */ 4023 if (reader->read != rb_page_commit(reader)) 4024 return false; 4025 4026 /* 4027 * If writers are committing on the reader page, knowing all 4028 * committed content has been read, the ring buffer is empty. 4029 */ 4030 if (commit == reader) 4031 return true; 4032 4033 /* 4034 * If writers are committing on a page other than reader page 4035 * and head page, there should always be content to read. 4036 */ 4037 if (commit != head) 4038 return false; 4039 4040 /* 4041 * Writers are committing on the head page, we just need 4042 * to care about there're committed data, and the reader will 4043 * swap reader page with head page when it is to read data. 4044 */ 4045 return rb_page_commit(commit) == 0; 4046 } 4047 4048 /** 4049 * ring_buffer_record_disable - stop all writes into the buffer 4050 * @buffer: The ring buffer to stop writes to. 4051 * 4052 * This prevents all writes to the buffer. Any attempt to write 4053 * to the buffer after this will fail and return NULL. 4054 * 4055 * The caller should call synchronize_rcu() after this. 4056 */ 4057 void ring_buffer_record_disable(struct trace_buffer *buffer) 4058 { 4059 atomic_inc(&buffer->record_disabled); 4060 } 4061 EXPORT_SYMBOL_GPL(ring_buffer_record_disable); 4062 4063 /** 4064 * ring_buffer_record_enable - enable writes to the buffer 4065 * @buffer: The ring buffer to enable writes 4066 * 4067 * Note, multiple disables will need the same number of enables 4068 * to truly enable the writing (much like preempt_disable). 4069 */ 4070 void ring_buffer_record_enable(struct trace_buffer *buffer) 4071 { 4072 atomic_dec(&buffer->record_disabled); 4073 } 4074 EXPORT_SYMBOL_GPL(ring_buffer_record_enable); 4075 4076 /** 4077 * ring_buffer_record_off - stop all writes into the buffer 4078 * @buffer: The ring buffer to stop writes to. 4079 * 4080 * This prevents all writes to the buffer. Any attempt to write 4081 * to the buffer after this will fail and return NULL. 4082 * 4083 * This is different than ring_buffer_record_disable() as 4084 * it works like an on/off switch, where as the disable() version 4085 * must be paired with a enable(). 4086 */ 4087 void ring_buffer_record_off(struct trace_buffer *buffer) 4088 { 4089 unsigned int rd; 4090 unsigned int new_rd; 4091 4092 do { 4093 rd = atomic_read(&buffer->record_disabled); 4094 new_rd = rd | RB_BUFFER_OFF; 4095 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); 4096 } 4097 EXPORT_SYMBOL_GPL(ring_buffer_record_off); 4098 4099 /** 4100 * ring_buffer_record_on - restart writes into the buffer 4101 * @buffer: The ring buffer to start writes to. 4102 * 4103 * This enables all writes to the buffer that was disabled by 4104 * ring_buffer_record_off(). 4105 * 4106 * This is different than ring_buffer_record_enable() as 4107 * it works like an on/off switch, where as the enable() version 4108 * must be paired with a disable(). 4109 */ 4110 void ring_buffer_record_on(struct trace_buffer *buffer) 4111 { 4112 unsigned int rd; 4113 unsigned int new_rd; 4114 4115 do { 4116 rd = atomic_read(&buffer->record_disabled); 4117 new_rd = rd & ~RB_BUFFER_OFF; 4118 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); 4119 } 4120 EXPORT_SYMBOL_GPL(ring_buffer_record_on); 4121 4122 /** 4123 * ring_buffer_record_is_on - return true if the ring buffer can write 4124 * @buffer: The ring buffer to see if write is enabled 4125 * 4126 * Returns true if the ring buffer is in a state that it accepts writes. 4127 */ 4128 bool ring_buffer_record_is_on(struct trace_buffer *buffer) 4129 { 4130 return !atomic_read(&buffer->record_disabled); 4131 } 4132 4133 /** 4134 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable 4135 * @buffer: The ring buffer to see if write is set enabled 4136 * 4137 * Returns true if the ring buffer is set writable by ring_buffer_record_on(). 4138 * Note that this does NOT mean it is in a writable state. 4139 * 4140 * It may return true when the ring buffer has been disabled by 4141 * ring_buffer_record_disable(), as that is a temporary disabling of 4142 * the ring buffer. 4143 */ 4144 bool ring_buffer_record_is_set_on(struct trace_buffer *buffer) 4145 { 4146 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); 4147 } 4148 4149 /** 4150 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 4151 * @buffer: The ring buffer to stop writes to. 4152 * @cpu: The CPU buffer to stop 4153 * 4154 * This prevents all writes to the buffer. Any attempt to write 4155 * to the buffer after this will fail and return NULL. 4156 * 4157 * The caller should call synchronize_rcu() after this. 4158 */ 4159 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu) 4160 { 4161 struct ring_buffer_per_cpu *cpu_buffer; 4162 4163 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4164 return; 4165 4166 cpu_buffer = buffer->buffers[cpu]; 4167 atomic_inc(&cpu_buffer->record_disabled); 4168 } 4169 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); 4170 4171 /** 4172 * ring_buffer_record_enable_cpu - enable writes to the buffer 4173 * @buffer: The ring buffer to enable writes 4174 * @cpu: The CPU to enable. 4175 * 4176 * Note, multiple disables will need the same number of enables 4177 * to truly enable the writing (much like preempt_disable). 4178 */ 4179 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu) 4180 { 4181 struct ring_buffer_per_cpu *cpu_buffer; 4182 4183 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4184 return; 4185 4186 cpu_buffer = buffer->buffers[cpu]; 4187 atomic_dec(&cpu_buffer->record_disabled); 4188 } 4189 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); 4190 4191 /* 4192 * The total entries in the ring buffer is the running counter 4193 * of entries entered into the ring buffer, minus the sum of 4194 * the entries read from the ring buffer and the number of 4195 * entries that were overwritten. 4196 */ 4197 static inline unsigned long 4198 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) 4199 { 4200 return local_read(&cpu_buffer->entries) - 4201 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); 4202 } 4203 4204 /** 4205 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer 4206 * @buffer: The ring buffer 4207 * @cpu: The per CPU buffer to read from. 4208 */ 4209 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu) 4210 { 4211 unsigned long flags; 4212 struct ring_buffer_per_cpu *cpu_buffer; 4213 struct buffer_page *bpage; 4214 u64 ret = 0; 4215 4216 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4217 return 0; 4218 4219 cpu_buffer = buffer->buffers[cpu]; 4220 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4221 /* 4222 * if the tail is on reader_page, oldest time stamp is on the reader 4223 * page 4224 */ 4225 if (cpu_buffer->tail_page == cpu_buffer->reader_page) 4226 bpage = cpu_buffer->reader_page; 4227 else 4228 bpage = rb_set_head_page(cpu_buffer); 4229 if (bpage) 4230 ret = bpage->page->time_stamp; 4231 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4232 4233 return ret; 4234 } 4235 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts); 4236 4237 /** 4238 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer 4239 * @buffer: The ring buffer 4240 * @cpu: The per CPU buffer to read from. 4241 */ 4242 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu) 4243 { 4244 struct ring_buffer_per_cpu *cpu_buffer; 4245 unsigned long ret; 4246 4247 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4248 return 0; 4249 4250 cpu_buffer = buffer->buffers[cpu]; 4251 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; 4252 4253 return ret; 4254 } 4255 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu); 4256 4257 /** 4258 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer 4259 * @buffer: The ring buffer 4260 * @cpu: The per CPU buffer to get the entries from. 4261 */ 4262 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu) 4263 { 4264 struct ring_buffer_per_cpu *cpu_buffer; 4265 4266 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4267 return 0; 4268 4269 cpu_buffer = buffer->buffers[cpu]; 4270 4271 return rb_num_of_entries(cpu_buffer); 4272 } 4273 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); 4274 4275 /** 4276 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring 4277 * buffer wrapping around (only if RB_FL_OVERWRITE is on). 4278 * @buffer: The ring buffer 4279 * @cpu: The per CPU buffer to get the number of overruns from 4280 */ 4281 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu) 4282 { 4283 struct ring_buffer_per_cpu *cpu_buffer; 4284 unsigned long ret; 4285 4286 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4287 return 0; 4288 4289 cpu_buffer = buffer->buffers[cpu]; 4290 ret = local_read(&cpu_buffer->overrun); 4291 4292 return ret; 4293 } 4294 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); 4295 4296 /** 4297 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by 4298 * commits failing due to the buffer wrapping around while there are uncommitted 4299 * events, such as during an interrupt storm. 4300 * @buffer: The ring buffer 4301 * @cpu: The per CPU buffer to get the number of overruns from 4302 */ 4303 unsigned long 4304 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu) 4305 { 4306 struct ring_buffer_per_cpu *cpu_buffer; 4307 unsigned long ret; 4308 4309 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4310 return 0; 4311 4312 cpu_buffer = buffer->buffers[cpu]; 4313 ret = local_read(&cpu_buffer->commit_overrun); 4314 4315 return ret; 4316 } 4317 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); 4318 4319 /** 4320 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by 4321 * the ring buffer filling up (only if RB_FL_OVERWRITE is off). 4322 * @buffer: The ring buffer 4323 * @cpu: The per CPU buffer to get the number of overruns from 4324 */ 4325 unsigned long 4326 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu) 4327 { 4328 struct ring_buffer_per_cpu *cpu_buffer; 4329 unsigned long ret; 4330 4331 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4332 return 0; 4333 4334 cpu_buffer = buffer->buffers[cpu]; 4335 ret = local_read(&cpu_buffer->dropped_events); 4336 4337 return ret; 4338 } 4339 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu); 4340 4341 /** 4342 * ring_buffer_read_events_cpu - get the number of events successfully read 4343 * @buffer: The ring buffer 4344 * @cpu: The per CPU buffer to get the number of events read 4345 */ 4346 unsigned long 4347 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu) 4348 { 4349 struct ring_buffer_per_cpu *cpu_buffer; 4350 4351 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4352 return 0; 4353 4354 cpu_buffer = buffer->buffers[cpu]; 4355 return cpu_buffer->read; 4356 } 4357 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu); 4358 4359 /** 4360 * ring_buffer_entries - get the number of entries in a buffer 4361 * @buffer: The ring buffer 4362 * 4363 * Returns the total number of entries in the ring buffer 4364 * (all CPU entries) 4365 */ 4366 unsigned long ring_buffer_entries(struct trace_buffer *buffer) 4367 { 4368 struct ring_buffer_per_cpu *cpu_buffer; 4369 unsigned long entries = 0; 4370 int cpu; 4371 4372 /* if you care about this being correct, lock the buffer */ 4373 for_each_buffer_cpu(buffer, cpu) { 4374 cpu_buffer = buffer->buffers[cpu]; 4375 entries += rb_num_of_entries(cpu_buffer); 4376 } 4377 4378 return entries; 4379 } 4380 EXPORT_SYMBOL_GPL(ring_buffer_entries); 4381 4382 /** 4383 * ring_buffer_overruns - get the number of overruns in buffer 4384 * @buffer: The ring buffer 4385 * 4386 * Returns the total number of overruns in the ring buffer 4387 * (all CPU entries) 4388 */ 4389 unsigned long ring_buffer_overruns(struct trace_buffer *buffer) 4390 { 4391 struct ring_buffer_per_cpu *cpu_buffer; 4392 unsigned long overruns = 0; 4393 int cpu; 4394 4395 /* if you care about this being correct, lock the buffer */ 4396 for_each_buffer_cpu(buffer, cpu) { 4397 cpu_buffer = buffer->buffers[cpu]; 4398 overruns += local_read(&cpu_buffer->overrun); 4399 } 4400 4401 return overruns; 4402 } 4403 EXPORT_SYMBOL_GPL(ring_buffer_overruns); 4404 4405 static void rb_iter_reset(struct ring_buffer_iter *iter) 4406 { 4407 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 4408 4409 /* Iterator usage is expected to have record disabled */ 4410 iter->head_page = cpu_buffer->reader_page; 4411 iter->head = cpu_buffer->reader_page->read; 4412 iter->next_event = iter->head; 4413 4414 iter->cache_reader_page = iter->head_page; 4415 iter->cache_read = cpu_buffer->read; 4416 4417 if (iter->head) { 4418 iter->read_stamp = cpu_buffer->read_stamp; 4419 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp; 4420 } else { 4421 iter->read_stamp = iter->head_page->page->time_stamp; 4422 iter->page_stamp = iter->read_stamp; 4423 } 4424 } 4425 4426 /** 4427 * ring_buffer_iter_reset - reset an iterator 4428 * @iter: The iterator to reset 4429 * 4430 * Resets the iterator, so that it will start from the beginning 4431 * again. 4432 */ 4433 void ring_buffer_iter_reset(struct ring_buffer_iter *iter) 4434 { 4435 struct ring_buffer_per_cpu *cpu_buffer; 4436 unsigned long flags; 4437 4438 if (!iter) 4439 return; 4440 4441 cpu_buffer = iter->cpu_buffer; 4442 4443 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4444 rb_iter_reset(iter); 4445 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4446 } 4447 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); 4448 4449 /** 4450 * ring_buffer_iter_empty - check if an iterator has no more to read 4451 * @iter: The iterator to check 4452 */ 4453 int ring_buffer_iter_empty(struct ring_buffer_iter *iter) 4454 { 4455 struct ring_buffer_per_cpu *cpu_buffer; 4456 struct buffer_page *reader; 4457 struct buffer_page *head_page; 4458 struct buffer_page *commit_page; 4459 struct buffer_page *curr_commit_page; 4460 unsigned commit; 4461 u64 curr_commit_ts; 4462 u64 commit_ts; 4463 4464 cpu_buffer = iter->cpu_buffer; 4465 reader = cpu_buffer->reader_page; 4466 head_page = cpu_buffer->head_page; 4467 commit_page = cpu_buffer->commit_page; 4468 commit_ts = commit_page->page->time_stamp; 4469 4470 /* 4471 * When the writer goes across pages, it issues a cmpxchg which 4472 * is a mb(), which will synchronize with the rmb here. 4473 * (see rb_tail_page_update()) 4474 */ 4475 smp_rmb(); 4476 commit = rb_page_commit(commit_page); 4477 /* We want to make sure that the commit page doesn't change */ 4478 smp_rmb(); 4479 4480 /* Make sure commit page didn't change */ 4481 curr_commit_page = READ_ONCE(cpu_buffer->commit_page); 4482 curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp); 4483 4484 /* If the commit page changed, then there's more data */ 4485 if (curr_commit_page != commit_page || 4486 curr_commit_ts != commit_ts) 4487 return 0; 4488 4489 /* Still racy, as it may return a false positive, but that's OK */ 4490 return ((iter->head_page == commit_page && iter->head >= commit) || 4491 (iter->head_page == reader && commit_page == head_page && 4492 head_page->read == commit && 4493 iter->head == rb_page_commit(cpu_buffer->reader_page))); 4494 } 4495 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); 4496 4497 static void 4498 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, 4499 struct ring_buffer_event *event) 4500 { 4501 u64 delta; 4502 4503 switch (event->type_len) { 4504 case RINGBUF_TYPE_PADDING: 4505 return; 4506 4507 case RINGBUF_TYPE_TIME_EXTEND: 4508 delta = rb_event_time_stamp(event); 4509 cpu_buffer->read_stamp += delta; 4510 return; 4511 4512 case RINGBUF_TYPE_TIME_STAMP: 4513 delta = rb_event_time_stamp(event); 4514 delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp); 4515 cpu_buffer->read_stamp = delta; 4516 return; 4517 4518 case RINGBUF_TYPE_DATA: 4519 cpu_buffer->read_stamp += event->time_delta; 4520 return; 4521 4522 default: 4523 RB_WARN_ON(cpu_buffer, 1); 4524 } 4525 return; 4526 } 4527 4528 static void 4529 rb_update_iter_read_stamp(struct ring_buffer_iter *iter, 4530 struct ring_buffer_event *event) 4531 { 4532 u64 delta; 4533 4534 switch (event->type_len) { 4535 case RINGBUF_TYPE_PADDING: 4536 return; 4537 4538 case RINGBUF_TYPE_TIME_EXTEND: 4539 delta = rb_event_time_stamp(event); 4540 iter->read_stamp += delta; 4541 return; 4542 4543 case RINGBUF_TYPE_TIME_STAMP: 4544 delta = rb_event_time_stamp(event); 4545 delta = rb_fix_abs_ts(delta, iter->read_stamp); 4546 iter->read_stamp = delta; 4547 return; 4548 4549 case RINGBUF_TYPE_DATA: 4550 iter->read_stamp += event->time_delta; 4551 return; 4552 4553 default: 4554 RB_WARN_ON(iter->cpu_buffer, 1); 4555 } 4556 return; 4557 } 4558 4559 static struct buffer_page * 4560 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 4561 { 4562 struct buffer_page *reader = NULL; 4563 unsigned long overwrite; 4564 unsigned long flags; 4565 int nr_loops = 0; 4566 int ret; 4567 4568 local_irq_save(flags); 4569 arch_spin_lock(&cpu_buffer->lock); 4570 4571 again: 4572 /* 4573 * This should normally only loop twice. But because the 4574 * start of the reader inserts an empty page, it causes 4575 * a case where we will loop three times. There should be no 4576 * reason to loop four times (that I know of). 4577 */ 4578 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { 4579 reader = NULL; 4580 goto out; 4581 } 4582 4583 reader = cpu_buffer->reader_page; 4584 4585 /* If there's more to read, return this page */ 4586 if (cpu_buffer->reader_page->read < rb_page_size(reader)) 4587 goto out; 4588 4589 /* Never should we have an index greater than the size */ 4590 if (RB_WARN_ON(cpu_buffer, 4591 cpu_buffer->reader_page->read > rb_page_size(reader))) 4592 goto out; 4593 4594 /* check if we caught up to the tail */ 4595 reader = NULL; 4596 if (cpu_buffer->commit_page == cpu_buffer->reader_page) 4597 goto out; 4598 4599 /* Don't bother swapping if the ring buffer is empty */ 4600 if (rb_num_of_entries(cpu_buffer) == 0) 4601 goto out; 4602 4603 /* 4604 * Reset the reader page to size zero. 4605 */ 4606 local_set(&cpu_buffer->reader_page->write, 0); 4607 local_set(&cpu_buffer->reader_page->entries, 0); 4608 local_set(&cpu_buffer->reader_page->page->commit, 0); 4609 cpu_buffer->reader_page->real_end = 0; 4610 4611 spin: 4612 /* 4613 * Splice the empty reader page into the list around the head. 4614 */ 4615 reader = rb_set_head_page(cpu_buffer); 4616 if (!reader) 4617 goto out; 4618 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); 4619 cpu_buffer->reader_page->list.prev = reader->list.prev; 4620 4621 /* 4622 * cpu_buffer->pages just needs to point to the buffer, it 4623 * has no specific buffer page to point to. Lets move it out 4624 * of our way so we don't accidentally swap it. 4625 */ 4626 cpu_buffer->pages = reader->list.prev; 4627 4628 /* The reader page will be pointing to the new head */ 4629 rb_set_list_to_head(&cpu_buffer->reader_page->list); 4630 4631 /* 4632 * We want to make sure we read the overruns after we set up our 4633 * pointers to the next object. The writer side does a 4634 * cmpxchg to cross pages which acts as the mb on the writer 4635 * side. Note, the reader will constantly fail the swap 4636 * while the writer is updating the pointers, so this 4637 * guarantees that the overwrite recorded here is the one we 4638 * want to compare with the last_overrun. 4639 */ 4640 smp_mb(); 4641 overwrite = local_read(&(cpu_buffer->overrun)); 4642 4643 /* 4644 * Here's the tricky part. 4645 * 4646 * We need to move the pointer past the header page. 4647 * But we can only do that if a writer is not currently 4648 * moving it. The page before the header page has the 4649 * flag bit '1' set if it is pointing to the page we want. 4650 * but if the writer is in the process of moving it 4651 * than it will be '2' or already moved '0'. 4652 */ 4653 4654 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); 4655 4656 /* 4657 * If we did not convert it, then we must try again. 4658 */ 4659 if (!ret) 4660 goto spin; 4661 4662 /* 4663 * Yay! We succeeded in replacing the page. 4664 * 4665 * Now make the new head point back to the reader page. 4666 */ 4667 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; 4668 rb_inc_page(&cpu_buffer->head_page); 4669 4670 local_inc(&cpu_buffer->pages_read); 4671 4672 /* Finally update the reader page to the new head */ 4673 cpu_buffer->reader_page = reader; 4674 cpu_buffer->reader_page->read = 0; 4675 4676 if (overwrite != cpu_buffer->last_overrun) { 4677 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; 4678 cpu_buffer->last_overrun = overwrite; 4679 } 4680 4681 goto again; 4682 4683 out: 4684 /* Update the read_stamp on the first event */ 4685 if (reader && reader->read == 0) 4686 cpu_buffer->read_stamp = reader->page->time_stamp; 4687 4688 arch_spin_unlock(&cpu_buffer->lock); 4689 local_irq_restore(flags); 4690 4691 /* 4692 * The writer has preempt disable, wait for it. But not forever 4693 * Although, 1 second is pretty much "forever" 4694 */ 4695 #define USECS_WAIT 1000000 4696 for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) { 4697 /* If the write is past the end of page, a writer is still updating it */ 4698 if (likely(!reader || rb_page_write(reader) <= BUF_PAGE_SIZE)) 4699 break; 4700 4701 udelay(1); 4702 4703 /* Get the latest version of the reader write value */ 4704 smp_rmb(); 4705 } 4706 4707 /* The writer is not moving forward? Something is wrong */ 4708 if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT)) 4709 reader = NULL; 4710 4711 /* 4712 * Make sure we see any padding after the write update 4713 * (see rb_reset_tail()) 4714 */ 4715 smp_rmb(); 4716 4717 4718 return reader; 4719 } 4720 4721 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) 4722 { 4723 struct ring_buffer_event *event; 4724 struct buffer_page *reader; 4725 unsigned length; 4726 4727 reader = rb_get_reader_page(cpu_buffer); 4728 4729 /* This function should not be called when buffer is empty */ 4730 if (RB_WARN_ON(cpu_buffer, !reader)) 4731 return; 4732 4733 event = rb_reader_event(cpu_buffer); 4734 4735 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 4736 cpu_buffer->read++; 4737 4738 rb_update_read_stamp(cpu_buffer, event); 4739 4740 length = rb_event_length(event); 4741 cpu_buffer->reader_page->read += length; 4742 } 4743 4744 static void rb_advance_iter(struct ring_buffer_iter *iter) 4745 { 4746 struct ring_buffer_per_cpu *cpu_buffer; 4747 4748 cpu_buffer = iter->cpu_buffer; 4749 4750 /* If head == next_event then we need to jump to the next event */ 4751 if (iter->head == iter->next_event) { 4752 /* If the event gets overwritten again, there's nothing to do */ 4753 if (rb_iter_head_event(iter) == NULL) 4754 return; 4755 } 4756 4757 iter->head = iter->next_event; 4758 4759 /* 4760 * Check if we are at the end of the buffer. 4761 */ 4762 if (iter->next_event >= rb_page_size(iter->head_page)) { 4763 /* discarded commits can make the page empty */ 4764 if (iter->head_page == cpu_buffer->commit_page) 4765 return; 4766 rb_inc_iter(iter); 4767 return; 4768 } 4769 4770 rb_update_iter_read_stamp(iter, iter->event); 4771 } 4772 4773 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) 4774 { 4775 return cpu_buffer->lost_events; 4776 } 4777 4778 static struct ring_buffer_event * 4779 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, 4780 unsigned long *lost_events) 4781 { 4782 struct ring_buffer_event *event; 4783 struct buffer_page *reader; 4784 int nr_loops = 0; 4785 4786 if (ts) 4787 *ts = 0; 4788 again: 4789 /* 4790 * We repeat when a time extend is encountered. 4791 * Since the time extend is always attached to a data event, 4792 * we should never loop more than once. 4793 * (We never hit the following condition more than twice). 4794 */ 4795 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) 4796 return NULL; 4797 4798 reader = rb_get_reader_page(cpu_buffer); 4799 if (!reader) 4800 return NULL; 4801 4802 event = rb_reader_event(cpu_buffer); 4803 4804 switch (event->type_len) { 4805 case RINGBUF_TYPE_PADDING: 4806 if (rb_null_event(event)) 4807 RB_WARN_ON(cpu_buffer, 1); 4808 /* 4809 * Because the writer could be discarding every 4810 * event it creates (which would probably be bad) 4811 * if we were to go back to "again" then we may never 4812 * catch up, and will trigger the warn on, or lock 4813 * the box. Return the padding, and we will release 4814 * the current locks, and try again. 4815 */ 4816 return event; 4817 4818 case RINGBUF_TYPE_TIME_EXTEND: 4819 /* Internal data, OK to advance */ 4820 rb_advance_reader(cpu_buffer); 4821 goto again; 4822 4823 case RINGBUF_TYPE_TIME_STAMP: 4824 if (ts) { 4825 *ts = rb_event_time_stamp(event); 4826 *ts = rb_fix_abs_ts(*ts, reader->page->time_stamp); 4827 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 4828 cpu_buffer->cpu, ts); 4829 } 4830 /* Internal data, OK to advance */ 4831 rb_advance_reader(cpu_buffer); 4832 goto again; 4833 4834 case RINGBUF_TYPE_DATA: 4835 if (ts && !(*ts)) { 4836 *ts = cpu_buffer->read_stamp + event->time_delta; 4837 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 4838 cpu_buffer->cpu, ts); 4839 } 4840 if (lost_events) 4841 *lost_events = rb_lost_events(cpu_buffer); 4842 return event; 4843 4844 default: 4845 RB_WARN_ON(cpu_buffer, 1); 4846 } 4847 4848 return NULL; 4849 } 4850 EXPORT_SYMBOL_GPL(ring_buffer_peek); 4851 4852 static struct ring_buffer_event * 4853 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 4854 { 4855 struct trace_buffer *buffer; 4856 struct ring_buffer_per_cpu *cpu_buffer; 4857 struct ring_buffer_event *event; 4858 int nr_loops = 0; 4859 4860 if (ts) 4861 *ts = 0; 4862 4863 cpu_buffer = iter->cpu_buffer; 4864 buffer = cpu_buffer->buffer; 4865 4866 /* 4867 * Check if someone performed a consuming read to 4868 * the buffer. A consuming read invalidates the iterator 4869 * and we need to reset the iterator in this case. 4870 */ 4871 if (unlikely(iter->cache_read != cpu_buffer->read || 4872 iter->cache_reader_page != cpu_buffer->reader_page)) 4873 rb_iter_reset(iter); 4874 4875 again: 4876 if (ring_buffer_iter_empty(iter)) 4877 return NULL; 4878 4879 /* 4880 * As the writer can mess with what the iterator is trying 4881 * to read, just give up if we fail to get an event after 4882 * three tries. The iterator is not as reliable when reading 4883 * the ring buffer with an active write as the consumer is. 4884 * Do not warn if the three failures is reached. 4885 */ 4886 if (++nr_loops > 3) 4887 return NULL; 4888 4889 if (rb_per_cpu_empty(cpu_buffer)) 4890 return NULL; 4891 4892 if (iter->head >= rb_page_size(iter->head_page)) { 4893 rb_inc_iter(iter); 4894 goto again; 4895 } 4896 4897 event = rb_iter_head_event(iter); 4898 if (!event) 4899 goto again; 4900 4901 switch (event->type_len) { 4902 case RINGBUF_TYPE_PADDING: 4903 if (rb_null_event(event)) { 4904 rb_inc_iter(iter); 4905 goto again; 4906 } 4907 rb_advance_iter(iter); 4908 return event; 4909 4910 case RINGBUF_TYPE_TIME_EXTEND: 4911 /* Internal data, OK to advance */ 4912 rb_advance_iter(iter); 4913 goto again; 4914 4915 case RINGBUF_TYPE_TIME_STAMP: 4916 if (ts) { 4917 *ts = rb_event_time_stamp(event); 4918 *ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp); 4919 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 4920 cpu_buffer->cpu, ts); 4921 } 4922 /* Internal data, OK to advance */ 4923 rb_advance_iter(iter); 4924 goto again; 4925 4926 case RINGBUF_TYPE_DATA: 4927 if (ts && !(*ts)) { 4928 *ts = iter->read_stamp + event->time_delta; 4929 ring_buffer_normalize_time_stamp(buffer, 4930 cpu_buffer->cpu, ts); 4931 } 4932 return event; 4933 4934 default: 4935 RB_WARN_ON(cpu_buffer, 1); 4936 } 4937 4938 return NULL; 4939 } 4940 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); 4941 4942 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) 4943 { 4944 if (likely(!in_nmi())) { 4945 raw_spin_lock(&cpu_buffer->reader_lock); 4946 return true; 4947 } 4948 4949 /* 4950 * If an NMI die dumps out the content of the ring buffer 4951 * trylock must be used to prevent a deadlock if the NMI 4952 * preempted a task that holds the ring buffer locks. If 4953 * we get the lock then all is fine, if not, then continue 4954 * to do the read, but this can corrupt the ring buffer, 4955 * so it must be permanently disabled from future writes. 4956 * Reading from NMI is a oneshot deal. 4957 */ 4958 if (raw_spin_trylock(&cpu_buffer->reader_lock)) 4959 return true; 4960 4961 /* Continue without locking, but disable the ring buffer */ 4962 atomic_inc(&cpu_buffer->record_disabled); 4963 return false; 4964 } 4965 4966 static inline void 4967 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) 4968 { 4969 if (likely(locked)) 4970 raw_spin_unlock(&cpu_buffer->reader_lock); 4971 return; 4972 } 4973 4974 /** 4975 * ring_buffer_peek - peek at the next event to be read 4976 * @buffer: The ring buffer to read 4977 * @cpu: The cpu to peak at 4978 * @ts: The timestamp counter of this event. 4979 * @lost_events: a variable to store if events were lost (may be NULL) 4980 * 4981 * This will return the event that will be read next, but does 4982 * not consume the data. 4983 */ 4984 struct ring_buffer_event * 4985 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, 4986 unsigned long *lost_events) 4987 { 4988 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 4989 struct ring_buffer_event *event; 4990 unsigned long flags; 4991 bool dolock; 4992 4993 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4994 return NULL; 4995 4996 again: 4997 local_irq_save(flags); 4998 dolock = rb_reader_lock(cpu_buffer); 4999 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 5000 if (event && event->type_len == RINGBUF_TYPE_PADDING) 5001 rb_advance_reader(cpu_buffer); 5002 rb_reader_unlock(cpu_buffer, dolock); 5003 local_irq_restore(flags); 5004 5005 if (event && event->type_len == RINGBUF_TYPE_PADDING) 5006 goto again; 5007 5008 return event; 5009 } 5010 5011 /** ring_buffer_iter_dropped - report if there are dropped events 5012 * @iter: The ring buffer iterator 5013 * 5014 * Returns true if there was dropped events since the last peek. 5015 */ 5016 bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter) 5017 { 5018 bool ret = iter->missed_events != 0; 5019 5020 iter->missed_events = 0; 5021 return ret; 5022 } 5023 EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped); 5024 5025 /** 5026 * ring_buffer_iter_peek - peek at the next event to be read 5027 * @iter: The ring buffer iterator 5028 * @ts: The timestamp counter of this event. 5029 * 5030 * This will return the event that will be read next, but does 5031 * not increment the iterator. 5032 */ 5033 struct ring_buffer_event * 5034 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 5035 { 5036 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5037 struct ring_buffer_event *event; 5038 unsigned long flags; 5039 5040 again: 5041 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5042 event = rb_iter_peek(iter, ts); 5043 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5044 5045 if (event && event->type_len == RINGBUF_TYPE_PADDING) 5046 goto again; 5047 5048 return event; 5049 } 5050 5051 /** 5052 * ring_buffer_consume - return an event and consume it 5053 * @buffer: The ring buffer to get the next event from 5054 * @cpu: the cpu to read the buffer from 5055 * @ts: a variable to store the timestamp (may be NULL) 5056 * @lost_events: a variable to store if events were lost (may be NULL) 5057 * 5058 * Returns the next event in the ring buffer, and that event is consumed. 5059 * Meaning, that sequential reads will keep returning a different event, 5060 * and eventually empty the ring buffer if the producer is slower. 5061 */ 5062 struct ring_buffer_event * 5063 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, 5064 unsigned long *lost_events) 5065 { 5066 struct ring_buffer_per_cpu *cpu_buffer; 5067 struct ring_buffer_event *event = NULL; 5068 unsigned long flags; 5069 bool dolock; 5070 5071 again: 5072 /* might be called in atomic */ 5073 preempt_disable(); 5074 5075 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5076 goto out; 5077 5078 cpu_buffer = buffer->buffers[cpu]; 5079 local_irq_save(flags); 5080 dolock = rb_reader_lock(cpu_buffer); 5081 5082 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 5083 if (event) { 5084 cpu_buffer->lost_events = 0; 5085 rb_advance_reader(cpu_buffer); 5086 } 5087 5088 rb_reader_unlock(cpu_buffer, dolock); 5089 local_irq_restore(flags); 5090 5091 out: 5092 preempt_enable(); 5093 5094 if (event && event->type_len == RINGBUF_TYPE_PADDING) 5095 goto again; 5096 5097 return event; 5098 } 5099 EXPORT_SYMBOL_GPL(ring_buffer_consume); 5100 5101 /** 5102 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer 5103 * @buffer: The ring buffer to read from 5104 * @cpu: The cpu buffer to iterate over 5105 * @flags: gfp flags to use for memory allocation 5106 * 5107 * This performs the initial preparations necessary to iterate 5108 * through the buffer. Memory is allocated, buffer recording 5109 * is disabled, and the iterator pointer is returned to the caller. 5110 * 5111 * Disabling buffer recording prevents the reading from being 5112 * corrupted. This is not a consuming read, so a producer is not 5113 * expected. 5114 * 5115 * After a sequence of ring_buffer_read_prepare calls, the user is 5116 * expected to make at least one call to ring_buffer_read_prepare_sync. 5117 * Afterwards, ring_buffer_read_start is invoked to get things going 5118 * for real. 5119 * 5120 * This overall must be paired with ring_buffer_read_finish. 5121 */ 5122 struct ring_buffer_iter * 5123 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags) 5124 { 5125 struct ring_buffer_per_cpu *cpu_buffer; 5126 struct ring_buffer_iter *iter; 5127 5128 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5129 return NULL; 5130 5131 iter = kzalloc(sizeof(*iter), flags); 5132 if (!iter) 5133 return NULL; 5134 5135 iter->event = kmalloc(BUF_MAX_DATA_SIZE, flags); 5136 if (!iter->event) { 5137 kfree(iter); 5138 return NULL; 5139 } 5140 5141 cpu_buffer = buffer->buffers[cpu]; 5142 5143 iter->cpu_buffer = cpu_buffer; 5144 5145 atomic_inc(&cpu_buffer->resize_disabled); 5146 5147 return iter; 5148 } 5149 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare); 5150 5151 /** 5152 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls 5153 * 5154 * All previously invoked ring_buffer_read_prepare calls to prepare 5155 * iterators will be synchronized. Afterwards, read_buffer_read_start 5156 * calls on those iterators are allowed. 5157 */ 5158 void 5159 ring_buffer_read_prepare_sync(void) 5160 { 5161 synchronize_rcu(); 5162 } 5163 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); 5164 5165 /** 5166 * ring_buffer_read_start - start a non consuming read of the buffer 5167 * @iter: The iterator returned by ring_buffer_read_prepare 5168 * 5169 * This finalizes the startup of an iteration through the buffer. 5170 * The iterator comes from a call to ring_buffer_read_prepare and 5171 * an intervening ring_buffer_read_prepare_sync must have been 5172 * performed. 5173 * 5174 * Must be paired with ring_buffer_read_finish. 5175 */ 5176 void 5177 ring_buffer_read_start(struct ring_buffer_iter *iter) 5178 { 5179 struct ring_buffer_per_cpu *cpu_buffer; 5180 unsigned long flags; 5181 5182 if (!iter) 5183 return; 5184 5185 cpu_buffer = iter->cpu_buffer; 5186 5187 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5188 arch_spin_lock(&cpu_buffer->lock); 5189 rb_iter_reset(iter); 5190 arch_spin_unlock(&cpu_buffer->lock); 5191 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5192 } 5193 EXPORT_SYMBOL_GPL(ring_buffer_read_start); 5194 5195 /** 5196 * ring_buffer_read_finish - finish reading the iterator of the buffer 5197 * @iter: The iterator retrieved by ring_buffer_start 5198 * 5199 * This re-enables the recording to the buffer, and frees the 5200 * iterator. 5201 */ 5202 void 5203 ring_buffer_read_finish(struct ring_buffer_iter *iter) 5204 { 5205 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5206 unsigned long flags; 5207 5208 /* 5209 * Ring buffer is disabled from recording, here's a good place 5210 * to check the integrity of the ring buffer. 5211 * Must prevent readers from trying to read, as the check 5212 * clears the HEAD page and readers require it. 5213 */ 5214 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5215 rb_check_pages(cpu_buffer); 5216 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5217 5218 atomic_dec(&cpu_buffer->resize_disabled); 5219 kfree(iter->event); 5220 kfree(iter); 5221 } 5222 EXPORT_SYMBOL_GPL(ring_buffer_read_finish); 5223 5224 /** 5225 * ring_buffer_iter_advance - advance the iterator to the next location 5226 * @iter: The ring buffer iterator 5227 * 5228 * Move the location of the iterator such that the next read will 5229 * be the next location of the iterator. 5230 */ 5231 void ring_buffer_iter_advance(struct ring_buffer_iter *iter) 5232 { 5233 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5234 unsigned long flags; 5235 5236 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5237 5238 rb_advance_iter(iter); 5239 5240 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5241 } 5242 EXPORT_SYMBOL_GPL(ring_buffer_iter_advance); 5243 5244 /** 5245 * ring_buffer_size - return the size of the ring buffer (in bytes) 5246 * @buffer: The ring buffer. 5247 * @cpu: The CPU to get ring buffer size from. 5248 */ 5249 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu) 5250 { 5251 /* 5252 * Earlier, this method returned 5253 * BUF_PAGE_SIZE * buffer->nr_pages 5254 * Since the nr_pages field is now removed, we have converted this to 5255 * return the per cpu buffer value. 5256 */ 5257 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5258 return 0; 5259 5260 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; 5261 } 5262 EXPORT_SYMBOL_GPL(ring_buffer_size); 5263 5264 static void 5265 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 5266 { 5267 rb_head_page_deactivate(cpu_buffer); 5268 5269 cpu_buffer->head_page 5270 = list_entry(cpu_buffer->pages, struct buffer_page, list); 5271 local_set(&cpu_buffer->head_page->write, 0); 5272 local_set(&cpu_buffer->head_page->entries, 0); 5273 local_set(&cpu_buffer->head_page->page->commit, 0); 5274 5275 cpu_buffer->head_page->read = 0; 5276 5277 cpu_buffer->tail_page = cpu_buffer->head_page; 5278 cpu_buffer->commit_page = cpu_buffer->head_page; 5279 5280 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 5281 INIT_LIST_HEAD(&cpu_buffer->new_pages); 5282 local_set(&cpu_buffer->reader_page->write, 0); 5283 local_set(&cpu_buffer->reader_page->entries, 0); 5284 local_set(&cpu_buffer->reader_page->page->commit, 0); 5285 cpu_buffer->reader_page->read = 0; 5286 5287 local_set(&cpu_buffer->entries_bytes, 0); 5288 local_set(&cpu_buffer->overrun, 0); 5289 local_set(&cpu_buffer->commit_overrun, 0); 5290 local_set(&cpu_buffer->dropped_events, 0); 5291 local_set(&cpu_buffer->entries, 0); 5292 local_set(&cpu_buffer->committing, 0); 5293 local_set(&cpu_buffer->commits, 0); 5294 local_set(&cpu_buffer->pages_touched, 0); 5295 local_set(&cpu_buffer->pages_lost, 0); 5296 local_set(&cpu_buffer->pages_read, 0); 5297 cpu_buffer->last_pages_touch = 0; 5298 cpu_buffer->shortest_full = 0; 5299 cpu_buffer->read = 0; 5300 cpu_buffer->read_bytes = 0; 5301 5302 rb_time_set(&cpu_buffer->write_stamp, 0); 5303 rb_time_set(&cpu_buffer->before_stamp, 0); 5304 5305 memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp)); 5306 5307 cpu_buffer->lost_events = 0; 5308 cpu_buffer->last_overrun = 0; 5309 5310 rb_head_page_activate(cpu_buffer); 5311 } 5312 5313 /* Must have disabled the cpu buffer then done a synchronize_rcu */ 5314 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 5315 { 5316 unsigned long flags; 5317 5318 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5319 5320 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 5321 goto out; 5322 5323 arch_spin_lock(&cpu_buffer->lock); 5324 5325 rb_reset_cpu(cpu_buffer); 5326 5327 arch_spin_unlock(&cpu_buffer->lock); 5328 5329 out: 5330 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5331 } 5332 5333 /** 5334 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer 5335 * @buffer: The ring buffer to reset a per cpu buffer of 5336 * @cpu: The CPU buffer to be reset 5337 */ 5338 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu) 5339 { 5340 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 5341 5342 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5343 return; 5344 5345 /* prevent another thread from changing buffer sizes */ 5346 mutex_lock(&buffer->mutex); 5347 5348 atomic_inc(&cpu_buffer->resize_disabled); 5349 atomic_inc(&cpu_buffer->record_disabled); 5350 5351 /* Make sure all commits have finished */ 5352 synchronize_rcu(); 5353 5354 reset_disabled_cpu_buffer(cpu_buffer); 5355 5356 atomic_dec(&cpu_buffer->record_disabled); 5357 atomic_dec(&cpu_buffer->resize_disabled); 5358 5359 mutex_unlock(&buffer->mutex); 5360 } 5361 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); 5362 5363 /** 5364 * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer 5365 * @buffer: The ring buffer to reset a per cpu buffer of 5366 * @cpu: The CPU buffer to be reset 5367 */ 5368 void ring_buffer_reset_online_cpus(struct trace_buffer *buffer) 5369 { 5370 struct ring_buffer_per_cpu *cpu_buffer; 5371 int cpu; 5372 5373 /* prevent another thread from changing buffer sizes */ 5374 mutex_lock(&buffer->mutex); 5375 5376 for_each_online_buffer_cpu(buffer, cpu) { 5377 cpu_buffer = buffer->buffers[cpu]; 5378 5379 atomic_inc(&cpu_buffer->resize_disabled); 5380 atomic_inc(&cpu_buffer->record_disabled); 5381 } 5382 5383 /* Make sure all commits have finished */ 5384 synchronize_rcu(); 5385 5386 for_each_online_buffer_cpu(buffer, cpu) { 5387 cpu_buffer = buffer->buffers[cpu]; 5388 5389 reset_disabled_cpu_buffer(cpu_buffer); 5390 5391 atomic_dec(&cpu_buffer->record_disabled); 5392 atomic_dec(&cpu_buffer->resize_disabled); 5393 } 5394 5395 mutex_unlock(&buffer->mutex); 5396 } 5397 5398 /** 5399 * ring_buffer_reset - reset a ring buffer 5400 * @buffer: The ring buffer to reset all cpu buffers 5401 */ 5402 void ring_buffer_reset(struct trace_buffer *buffer) 5403 { 5404 struct ring_buffer_per_cpu *cpu_buffer; 5405 int cpu; 5406 5407 /* prevent another thread from changing buffer sizes */ 5408 mutex_lock(&buffer->mutex); 5409 5410 for_each_buffer_cpu(buffer, cpu) { 5411 cpu_buffer = buffer->buffers[cpu]; 5412 5413 atomic_inc(&cpu_buffer->resize_disabled); 5414 atomic_inc(&cpu_buffer->record_disabled); 5415 } 5416 5417 /* Make sure all commits have finished */ 5418 synchronize_rcu(); 5419 5420 for_each_buffer_cpu(buffer, cpu) { 5421 cpu_buffer = buffer->buffers[cpu]; 5422 5423 reset_disabled_cpu_buffer(cpu_buffer); 5424 5425 atomic_dec(&cpu_buffer->record_disabled); 5426 atomic_dec(&cpu_buffer->resize_disabled); 5427 } 5428 5429 mutex_unlock(&buffer->mutex); 5430 } 5431 EXPORT_SYMBOL_GPL(ring_buffer_reset); 5432 5433 /** 5434 * ring_buffer_empty - is the ring buffer empty? 5435 * @buffer: The ring buffer to test 5436 */ 5437 bool ring_buffer_empty(struct trace_buffer *buffer) 5438 { 5439 struct ring_buffer_per_cpu *cpu_buffer; 5440 unsigned long flags; 5441 bool dolock; 5442 int cpu; 5443 int ret; 5444 5445 /* yes this is racy, but if you don't like the race, lock the buffer */ 5446 for_each_buffer_cpu(buffer, cpu) { 5447 cpu_buffer = buffer->buffers[cpu]; 5448 local_irq_save(flags); 5449 dolock = rb_reader_lock(cpu_buffer); 5450 ret = rb_per_cpu_empty(cpu_buffer); 5451 rb_reader_unlock(cpu_buffer, dolock); 5452 local_irq_restore(flags); 5453 5454 if (!ret) 5455 return false; 5456 } 5457 5458 return true; 5459 } 5460 EXPORT_SYMBOL_GPL(ring_buffer_empty); 5461 5462 /** 5463 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? 5464 * @buffer: The ring buffer 5465 * @cpu: The CPU buffer to test 5466 */ 5467 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu) 5468 { 5469 struct ring_buffer_per_cpu *cpu_buffer; 5470 unsigned long flags; 5471 bool dolock; 5472 int ret; 5473 5474 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5475 return true; 5476 5477 cpu_buffer = buffer->buffers[cpu]; 5478 local_irq_save(flags); 5479 dolock = rb_reader_lock(cpu_buffer); 5480 ret = rb_per_cpu_empty(cpu_buffer); 5481 rb_reader_unlock(cpu_buffer, dolock); 5482 local_irq_restore(flags); 5483 5484 return ret; 5485 } 5486 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); 5487 5488 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 5489 /** 5490 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers 5491 * @buffer_a: One buffer to swap with 5492 * @buffer_b: The other buffer to swap with 5493 * @cpu: the CPU of the buffers to swap 5494 * 5495 * This function is useful for tracers that want to take a "snapshot" 5496 * of a CPU buffer and has another back up buffer lying around. 5497 * it is expected that the tracer handles the cpu buffer not being 5498 * used at the moment. 5499 */ 5500 int ring_buffer_swap_cpu(struct trace_buffer *buffer_a, 5501 struct trace_buffer *buffer_b, int cpu) 5502 { 5503 struct ring_buffer_per_cpu *cpu_buffer_a; 5504 struct ring_buffer_per_cpu *cpu_buffer_b; 5505 int ret = -EINVAL; 5506 5507 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || 5508 !cpumask_test_cpu(cpu, buffer_b->cpumask)) 5509 goto out; 5510 5511 cpu_buffer_a = buffer_a->buffers[cpu]; 5512 cpu_buffer_b = buffer_b->buffers[cpu]; 5513 5514 /* At least make sure the two buffers are somewhat the same */ 5515 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages) 5516 goto out; 5517 5518 ret = -EAGAIN; 5519 5520 if (atomic_read(&buffer_a->record_disabled)) 5521 goto out; 5522 5523 if (atomic_read(&buffer_b->record_disabled)) 5524 goto out; 5525 5526 if (atomic_read(&cpu_buffer_a->record_disabled)) 5527 goto out; 5528 5529 if (atomic_read(&cpu_buffer_b->record_disabled)) 5530 goto out; 5531 5532 /* 5533 * We can't do a synchronize_rcu here because this 5534 * function can be called in atomic context. 5535 * Normally this will be called from the same CPU as cpu. 5536 * If not it's up to the caller to protect this. 5537 */ 5538 atomic_inc(&cpu_buffer_a->record_disabled); 5539 atomic_inc(&cpu_buffer_b->record_disabled); 5540 5541 ret = -EBUSY; 5542 if (local_read(&cpu_buffer_a->committing)) 5543 goto out_dec; 5544 if (local_read(&cpu_buffer_b->committing)) 5545 goto out_dec; 5546 5547 buffer_a->buffers[cpu] = cpu_buffer_b; 5548 buffer_b->buffers[cpu] = cpu_buffer_a; 5549 5550 cpu_buffer_b->buffer = buffer_a; 5551 cpu_buffer_a->buffer = buffer_b; 5552 5553 ret = 0; 5554 5555 out_dec: 5556 atomic_dec(&cpu_buffer_a->record_disabled); 5557 atomic_dec(&cpu_buffer_b->record_disabled); 5558 out: 5559 return ret; 5560 } 5561 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); 5562 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */ 5563 5564 /** 5565 * ring_buffer_alloc_read_page - allocate a page to read from buffer 5566 * @buffer: the buffer to allocate for. 5567 * @cpu: the cpu buffer to allocate. 5568 * 5569 * This function is used in conjunction with ring_buffer_read_page. 5570 * When reading a full page from the ring buffer, these functions 5571 * can be used to speed up the process. The calling function should 5572 * allocate a few pages first with this function. Then when it 5573 * needs to get pages from the ring buffer, it passes the result 5574 * of this function into ring_buffer_read_page, which will swap 5575 * the page that was allocated, with the read page of the buffer. 5576 * 5577 * Returns: 5578 * The page allocated, or ERR_PTR 5579 */ 5580 void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu) 5581 { 5582 struct ring_buffer_per_cpu *cpu_buffer; 5583 struct buffer_data_page *bpage = NULL; 5584 unsigned long flags; 5585 struct page *page; 5586 5587 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5588 return ERR_PTR(-ENODEV); 5589 5590 cpu_buffer = buffer->buffers[cpu]; 5591 local_irq_save(flags); 5592 arch_spin_lock(&cpu_buffer->lock); 5593 5594 if (cpu_buffer->free_page) { 5595 bpage = cpu_buffer->free_page; 5596 cpu_buffer->free_page = NULL; 5597 } 5598 5599 arch_spin_unlock(&cpu_buffer->lock); 5600 local_irq_restore(flags); 5601 5602 if (bpage) 5603 goto out; 5604 5605 page = alloc_pages_node(cpu_to_node(cpu), 5606 GFP_KERNEL | __GFP_NORETRY, 0); 5607 if (!page) 5608 return ERR_PTR(-ENOMEM); 5609 5610 bpage = page_address(page); 5611 5612 out: 5613 rb_init_page(bpage); 5614 5615 return bpage; 5616 } 5617 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page); 5618 5619 /** 5620 * ring_buffer_free_read_page - free an allocated read page 5621 * @buffer: the buffer the page was allocate for 5622 * @cpu: the cpu buffer the page came from 5623 * @data: the page to free 5624 * 5625 * Free a page allocated from ring_buffer_alloc_read_page. 5626 */ 5627 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data) 5628 { 5629 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 5630 struct buffer_data_page *bpage = data; 5631 struct page *page = virt_to_page(bpage); 5632 unsigned long flags; 5633 5634 /* If the page is still in use someplace else, we can't reuse it */ 5635 if (page_ref_count(page) > 1) 5636 goto out; 5637 5638 local_irq_save(flags); 5639 arch_spin_lock(&cpu_buffer->lock); 5640 5641 if (!cpu_buffer->free_page) { 5642 cpu_buffer->free_page = bpage; 5643 bpage = NULL; 5644 } 5645 5646 arch_spin_unlock(&cpu_buffer->lock); 5647 local_irq_restore(flags); 5648 5649 out: 5650 free_page((unsigned long)bpage); 5651 } 5652 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); 5653 5654 /** 5655 * ring_buffer_read_page - extract a page from the ring buffer 5656 * @buffer: buffer to extract from 5657 * @data_page: the page to use allocated from ring_buffer_alloc_read_page 5658 * @len: amount to extract 5659 * @cpu: the cpu of the buffer to extract 5660 * @full: should the extraction only happen when the page is full. 5661 * 5662 * This function will pull out a page from the ring buffer and consume it. 5663 * @data_page must be the address of the variable that was returned 5664 * from ring_buffer_alloc_read_page. This is because the page might be used 5665 * to swap with a page in the ring buffer. 5666 * 5667 * for example: 5668 * rpage = ring_buffer_alloc_read_page(buffer, cpu); 5669 * if (IS_ERR(rpage)) 5670 * return PTR_ERR(rpage); 5671 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); 5672 * if (ret >= 0) 5673 * process_page(rpage, ret); 5674 * 5675 * When @full is set, the function will not return true unless 5676 * the writer is off the reader page. 5677 * 5678 * Note: it is up to the calling functions to handle sleeps and wakeups. 5679 * The ring buffer can be used anywhere in the kernel and can not 5680 * blindly call wake_up. The layer that uses the ring buffer must be 5681 * responsible for that. 5682 * 5683 * Returns: 5684 * >=0 if data has been transferred, returns the offset of consumed data. 5685 * <0 if no data has been transferred. 5686 */ 5687 int ring_buffer_read_page(struct trace_buffer *buffer, 5688 void **data_page, size_t len, int cpu, int full) 5689 { 5690 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 5691 struct ring_buffer_event *event; 5692 struct buffer_data_page *bpage; 5693 struct buffer_page *reader; 5694 unsigned long missed_events; 5695 unsigned long flags; 5696 unsigned int commit; 5697 unsigned int read; 5698 u64 save_timestamp; 5699 int ret = -1; 5700 5701 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5702 goto out; 5703 5704 /* 5705 * If len is not big enough to hold the page header, then 5706 * we can not copy anything. 5707 */ 5708 if (len <= BUF_PAGE_HDR_SIZE) 5709 goto out; 5710 5711 len -= BUF_PAGE_HDR_SIZE; 5712 5713 if (!data_page) 5714 goto out; 5715 5716 bpage = *data_page; 5717 if (!bpage) 5718 goto out; 5719 5720 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5721 5722 reader = rb_get_reader_page(cpu_buffer); 5723 if (!reader) 5724 goto out_unlock; 5725 5726 event = rb_reader_event(cpu_buffer); 5727 5728 read = reader->read; 5729 commit = rb_page_commit(reader); 5730 5731 /* Check if any events were dropped */ 5732 missed_events = cpu_buffer->lost_events; 5733 5734 /* 5735 * If this page has been partially read or 5736 * if len is not big enough to read the rest of the page or 5737 * a writer is still on the page, then 5738 * we must copy the data from the page to the buffer. 5739 * Otherwise, we can simply swap the page with the one passed in. 5740 */ 5741 if (read || (len < (commit - read)) || 5742 cpu_buffer->reader_page == cpu_buffer->commit_page) { 5743 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; 5744 unsigned int rpos = read; 5745 unsigned int pos = 0; 5746 unsigned int size; 5747 5748 /* 5749 * If a full page is expected, this can still be returned 5750 * if there's been a previous partial read and the 5751 * rest of the page can be read and the commit page is off 5752 * the reader page. 5753 */ 5754 if (full && 5755 (!read || (len < (commit - read)) || 5756 cpu_buffer->reader_page == cpu_buffer->commit_page)) 5757 goto out_unlock; 5758 5759 if (len > (commit - read)) 5760 len = (commit - read); 5761 5762 /* Always keep the time extend and data together */ 5763 size = rb_event_ts_length(event); 5764 5765 if (len < size) 5766 goto out_unlock; 5767 5768 /* save the current timestamp, since the user will need it */ 5769 save_timestamp = cpu_buffer->read_stamp; 5770 5771 /* Need to copy one event at a time */ 5772 do { 5773 /* We need the size of one event, because 5774 * rb_advance_reader only advances by one event, 5775 * whereas rb_event_ts_length may include the size of 5776 * one or two events. 5777 * We have already ensured there's enough space if this 5778 * is a time extend. */ 5779 size = rb_event_length(event); 5780 memcpy(bpage->data + pos, rpage->data + rpos, size); 5781 5782 len -= size; 5783 5784 rb_advance_reader(cpu_buffer); 5785 rpos = reader->read; 5786 pos += size; 5787 5788 if (rpos >= commit) 5789 break; 5790 5791 event = rb_reader_event(cpu_buffer); 5792 /* Always keep the time extend and data together */ 5793 size = rb_event_ts_length(event); 5794 } while (len >= size); 5795 5796 /* update bpage */ 5797 local_set(&bpage->commit, pos); 5798 bpage->time_stamp = save_timestamp; 5799 5800 /* we copied everything to the beginning */ 5801 read = 0; 5802 } else { 5803 /* update the entry counter */ 5804 cpu_buffer->read += rb_page_entries(reader); 5805 cpu_buffer->read_bytes += BUF_PAGE_SIZE; 5806 5807 /* swap the pages */ 5808 rb_init_page(bpage); 5809 bpage = reader->page; 5810 reader->page = *data_page; 5811 local_set(&reader->write, 0); 5812 local_set(&reader->entries, 0); 5813 reader->read = 0; 5814 *data_page = bpage; 5815 5816 /* 5817 * Use the real_end for the data size, 5818 * This gives us a chance to store the lost events 5819 * on the page. 5820 */ 5821 if (reader->real_end) 5822 local_set(&bpage->commit, reader->real_end); 5823 } 5824 ret = read; 5825 5826 cpu_buffer->lost_events = 0; 5827 5828 commit = local_read(&bpage->commit); 5829 /* 5830 * Set a flag in the commit field if we lost events 5831 */ 5832 if (missed_events) { 5833 /* If there is room at the end of the page to save the 5834 * missed events, then record it there. 5835 */ 5836 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) { 5837 memcpy(&bpage->data[commit], &missed_events, 5838 sizeof(missed_events)); 5839 local_add(RB_MISSED_STORED, &bpage->commit); 5840 commit += sizeof(missed_events); 5841 } 5842 local_add(RB_MISSED_EVENTS, &bpage->commit); 5843 } 5844 5845 /* 5846 * This page may be off to user land. Zero it out here. 5847 */ 5848 if (commit < BUF_PAGE_SIZE) 5849 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); 5850 5851 out_unlock: 5852 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5853 5854 out: 5855 return ret; 5856 } 5857 EXPORT_SYMBOL_GPL(ring_buffer_read_page); 5858 5859 /* 5860 * We only allocate new buffers, never free them if the CPU goes down. 5861 * If we were to free the buffer, then the user would lose any trace that was in 5862 * the buffer. 5863 */ 5864 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node) 5865 { 5866 struct trace_buffer *buffer; 5867 long nr_pages_same; 5868 int cpu_i; 5869 unsigned long nr_pages; 5870 5871 buffer = container_of(node, struct trace_buffer, node); 5872 if (cpumask_test_cpu(cpu, buffer->cpumask)) 5873 return 0; 5874 5875 nr_pages = 0; 5876 nr_pages_same = 1; 5877 /* check if all cpu sizes are same */ 5878 for_each_buffer_cpu(buffer, cpu_i) { 5879 /* fill in the size from first enabled cpu */ 5880 if (nr_pages == 0) 5881 nr_pages = buffer->buffers[cpu_i]->nr_pages; 5882 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { 5883 nr_pages_same = 0; 5884 break; 5885 } 5886 } 5887 /* allocate minimum pages, user can later expand it */ 5888 if (!nr_pages_same) 5889 nr_pages = 2; 5890 buffer->buffers[cpu] = 5891 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 5892 if (!buffer->buffers[cpu]) { 5893 WARN(1, "failed to allocate ring buffer on CPU %u\n", 5894 cpu); 5895 return -ENOMEM; 5896 } 5897 smp_wmb(); 5898 cpumask_set_cpu(cpu, buffer->cpumask); 5899 return 0; 5900 } 5901 5902 #ifdef CONFIG_RING_BUFFER_STARTUP_TEST 5903 /* 5904 * This is a basic integrity check of the ring buffer. 5905 * Late in the boot cycle this test will run when configured in. 5906 * It will kick off a thread per CPU that will go into a loop 5907 * writing to the per cpu ring buffer various sizes of data. 5908 * Some of the data will be large items, some small. 5909 * 5910 * Another thread is created that goes into a spin, sending out 5911 * IPIs to the other CPUs to also write into the ring buffer. 5912 * this is to test the nesting ability of the buffer. 5913 * 5914 * Basic stats are recorded and reported. If something in the 5915 * ring buffer should happen that's not expected, a big warning 5916 * is displayed and all ring buffers are disabled. 5917 */ 5918 static struct task_struct *rb_threads[NR_CPUS] __initdata; 5919 5920 struct rb_test_data { 5921 struct trace_buffer *buffer; 5922 unsigned long events; 5923 unsigned long bytes_written; 5924 unsigned long bytes_alloc; 5925 unsigned long bytes_dropped; 5926 unsigned long events_nested; 5927 unsigned long bytes_written_nested; 5928 unsigned long bytes_alloc_nested; 5929 unsigned long bytes_dropped_nested; 5930 int min_size_nested; 5931 int max_size_nested; 5932 int max_size; 5933 int min_size; 5934 int cpu; 5935 int cnt; 5936 }; 5937 5938 static struct rb_test_data rb_data[NR_CPUS] __initdata; 5939 5940 /* 1 meg per cpu */ 5941 #define RB_TEST_BUFFER_SIZE 1048576 5942 5943 static char rb_string[] __initdata = 5944 "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\" 5945 "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890" 5946 "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv"; 5947 5948 static bool rb_test_started __initdata; 5949 5950 struct rb_item { 5951 int size; 5952 char str[]; 5953 }; 5954 5955 static __init int rb_write_something(struct rb_test_data *data, bool nested) 5956 { 5957 struct ring_buffer_event *event; 5958 struct rb_item *item; 5959 bool started; 5960 int event_len; 5961 int size; 5962 int len; 5963 int cnt; 5964 5965 /* Have nested writes different that what is written */ 5966 cnt = data->cnt + (nested ? 27 : 0); 5967 5968 /* Multiply cnt by ~e, to make some unique increment */ 5969 size = (cnt * 68 / 25) % (sizeof(rb_string) - 1); 5970 5971 len = size + sizeof(struct rb_item); 5972 5973 started = rb_test_started; 5974 /* read rb_test_started before checking buffer enabled */ 5975 smp_rmb(); 5976 5977 event = ring_buffer_lock_reserve(data->buffer, len); 5978 if (!event) { 5979 /* Ignore dropped events before test starts. */ 5980 if (started) { 5981 if (nested) 5982 data->bytes_dropped += len; 5983 else 5984 data->bytes_dropped_nested += len; 5985 } 5986 return len; 5987 } 5988 5989 event_len = ring_buffer_event_length(event); 5990 5991 if (RB_WARN_ON(data->buffer, event_len < len)) 5992 goto out; 5993 5994 item = ring_buffer_event_data(event); 5995 item->size = size; 5996 memcpy(item->str, rb_string, size); 5997 5998 if (nested) { 5999 data->bytes_alloc_nested += event_len; 6000 data->bytes_written_nested += len; 6001 data->events_nested++; 6002 if (!data->min_size_nested || len < data->min_size_nested) 6003 data->min_size_nested = len; 6004 if (len > data->max_size_nested) 6005 data->max_size_nested = len; 6006 } else { 6007 data->bytes_alloc += event_len; 6008 data->bytes_written += len; 6009 data->events++; 6010 if (!data->min_size || len < data->min_size) 6011 data->max_size = len; 6012 if (len > data->max_size) 6013 data->max_size = len; 6014 } 6015 6016 out: 6017 ring_buffer_unlock_commit(data->buffer); 6018 6019 return 0; 6020 } 6021 6022 static __init int rb_test(void *arg) 6023 { 6024 struct rb_test_data *data = arg; 6025 6026 while (!kthread_should_stop()) { 6027 rb_write_something(data, false); 6028 data->cnt++; 6029 6030 set_current_state(TASK_INTERRUPTIBLE); 6031 /* Now sleep between a min of 100-300us and a max of 1ms */ 6032 usleep_range(((data->cnt % 3) + 1) * 100, 1000); 6033 } 6034 6035 return 0; 6036 } 6037 6038 static __init void rb_ipi(void *ignore) 6039 { 6040 struct rb_test_data *data; 6041 int cpu = smp_processor_id(); 6042 6043 data = &rb_data[cpu]; 6044 rb_write_something(data, true); 6045 } 6046 6047 static __init int rb_hammer_test(void *arg) 6048 { 6049 while (!kthread_should_stop()) { 6050 6051 /* Send an IPI to all cpus to write data! */ 6052 smp_call_function(rb_ipi, NULL, 1); 6053 /* No sleep, but for non preempt, let others run */ 6054 schedule(); 6055 } 6056 6057 return 0; 6058 } 6059 6060 static __init int test_ringbuffer(void) 6061 { 6062 struct task_struct *rb_hammer; 6063 struct trace_buffer *buffer; 6064 int cpu; 6065 int ret = 0; 6066 6067 if (security_locked_down(LOCKDOWN_TRACEFS)) { 6068 pr_warn("Lockdown is enabled, skipping ring buffer tests\n"); 6069 return 0; 6070 } 6071 6072 pr_info("Running ring buffer tests...\n"); 6073 6074 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE); 6075 if (WARN_ON(!buffer)) 6076 return 0; 6077 6078 /* Disable buffer so that threads can't write to it yet */ 6079 ring_buffer_record_off(buffer); 6080 6081 for_each_online_cpu(cpu) { 6082 rb_data[cpu].buffer = buffer; 6083 rb_data[cpu].cpu = cpu; 6084 rb_data[cpu].cnt = cpu; 6085 rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu], 6086 cpu, "rbtester/%u"); 6087 if (WARN_ON(IS_ERR(rb_threads[cpu]))) { 6088 pr_cont("FAILED\n"); 6089 ret = PTR_ERR(rb_threads[cpu]); 6090 goto out_free; 6091 } 6092 } 6093 6094 /* Now create the rb hammer! */ 6095 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer"); 6096 if (WARN_ON(IS_ERR(rb_hammer))) { 6097 pr_cont("FAILED\n"); 6098 ret = PTR_ERR(rb_hammer); 6099 goto out_free; 6100 } 6101 6102 ring_buffer_record_on(buffer); 6103 /* 6104 * Show buffer is enabled before setting rb_test_started. 6105 * Yes there's a small race window where events could be 6106 * dropped and the thread wont catch it. But when a ring 6107 * buffer gets enabled, there will always be some kind of 6108 * delay before other CPUs see it. Thus, we don't care about 6109 * those dropped events. We care about events dropped after 6110 * the threads see that the buffer is active. 6111 */ 6112 smp_wmb(); 6113 rb_test_started = true; 6114 6115 set_current_state(TASK_INTERRUPTIBLE); 6116 /* Just run for 10 seconds */; 6117 schedule_timeout(10 * HZ); 6118 6119 kthread_stop(rb_hammer); 6120 6121 out_free: 6122 for_each_online_cpu(cpu) { 6123 if (!rb_threads[cpu]) 6124 break; 6125 kthread_stop(rb_threads[cpu]); 6126 } 6127 if (ret) { 6128 ring_buffer_free(buffer); 6129 return ret; 6130 } 6131 6132 /* Report! */ 6133 pr_info("finished\n"); 6134 for_each_online_cpu(cpu) { 6135 struct ring_buffer_event *event; 6136 struct rb_test_data *data = &rb_data[cpu]; 6137 struct rb_item *item; 6138 unsigned long total_events; 6139 unsigned long total_dropped; 6140 unsigned long total_written; 6141 unsigned long total_alloc; 6142 unsigned long total_read = 0; 6143 unsigned long total_size = 0; 6144 unsigned long total_len = 0; 6145 unsigned long total_lost = 0; 6146 unsigned long lost; 6147 int big_event_size; 6148 int small_event_size; 6149 6150 ret = -1; 6151 6152 total_events = data->events + data->events_nested; 6153 total_written = data->bytes_written + data->bytes_written_nested; 6154 total_alloc = data->bytes_alloc + data->bytes_alloc_nested; 6155 total_dropped = data->bytes_dropped + data->bytes_dropped_nested; 6156 6157 big_event_size = data->max_size + data->max_size_nested; 6158 small_event_size = data->min_size + data->min_size_nested; 6159 6160 pr_info("CPU %d:\n", cpu); 6161 pr_info(" events: %ld\n", total_events); 6162 pr_info(" dropped bytes: %ld\n", total_dropped); 6163 pr_info(" alloced bytes: %ld\n", total_alloc); 6164 pr_info(" written bytes: %ld\n", total_written); 6165 pr_info(" biggest event: %d\n", big_event_size); 6166 pr_info(" smallest event: %d\n", small_event_size); 6167 6168 if (RB_WARN_ON(buffer, total_dropped)) 6169 break; 6170 6171 ret = 0; 6172 6173 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { 6174 total_lost += lost; 6175 item = ring_buffer_event_data(event); 6176 total_len += ring_buffer_event_length(event); 6177 total_size += item->size + sizeof(struct rb_item); 6178 if (memcmp(&item->str[0], rb_string, item->size) != 0) { 6179 pr_info("FAILED!\n"); 6180 pr_info("buffer had: %.*s\n", item->size, item->str); 6181 pr_info("expected: %.*s\n", item->size, rb_string); 6182 RB_WARN_ON(buffer, 1); 6183 ret = -1; 6184 break; 6185 } 6186 total_read++; 6187 } 6188 if (ret) 6189 break; 6190 6191 ret = -1; 6192 6193 pr_info(" read events: %ld\n", total_read); 6194 pr_info(" lost events: %ld\n", total_lost); 6195 pr_info(" total events: %ld\n", total_lost + total_read); 6196 pr_info(" recorded len bytes: %ld\n", total_len); 6197 pr_info(" recorded size bytes: %ld\n", total_size); 6198 if (total_lost) { 6199 pr_info(" With dropped events, record len and size may not match\n" 6200 " alloced and written from above\n"); 6201 } else { 6202 if (RB_WARN_ON(buffer, total_len != total_alloc || 6203 total_size != total_written)) 6204 break; 6205 } 6206 if (RB_WARN_ON(buffer, total_lost + total_read != total_events)) 6207 break; 6208 6209 ret = 0; 6210 } 6211 if (!ret) 6212 pr_info("Ring buffer PASSED!\n"); 6213 6214 ring_buffer_free(buffer); 6215 return 0; 6216 } 6217 6218 late_initcall(test_ringbuffer); 6219 #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */ 6220