1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Generic ring buffer 4 * 5 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> 6 */ 7 #include <linux/trace_recursion.h> 8 #include <linux/trace_events.h> 9 #include <linux/ring_buffer.h> 10 #include <linux/trace_clock.h> 11 #include <linux/sched/clock.h> 12 #include <linux/trace_seq.h> 13 #include <linux/spinlock.h> 14 #include <linux/irq_work.h> 15 #include <linux/security.h> 16 #include <linux/uaccess.h> 17 #include <linux/hardirq.h> 18 #include <linux/kthread.h> /* for self test */ 19 #include <linux/module.h> 20 #include <linux/percpu.h> 21 #include <linux/mutex.h> 22 #include <linux/delay.h> 23 #include <linux/slab.h> 24 #include <linux/init.h> 25 #include <linux/hash.h> 26 #include <linux/list.h> 27 #include <linux/cpu.h> 28 #include <linux/oom.h> 29 30 #include <asm/local.h> 31 32 /* 33 * The "absolute" timestamp in the buffer is only 59 bits. 34 * If a clock has the 5 MSBs set, it needs to be saved and 35 * reinserted. 36 */ 37 #define TS_MSB (0xf8ULL << 56) 38 #define ABS_TS_MASK (~TS_MSB) 39 40 static void update_pages_handler(struct work_struct *work); 41 42 /* 43 * The ring buffer header is special. We must manually up keep it. 44 */ 45 int ring_buffer_print_entry_header(struct trace_seq *s) 46 { 47 trace_seq_puts(s, "# compressed entry header\n"); 48 trace_seq_puts(s, "\ttype_len : 5 bits\n"); 49 trace_seq_puts(s, "\ttime_delta : 27 bits\n"); 50 trace_seq_puts(s, "\tarray : 32 bits\n"); 51 trace_seq_putc(s, '\n'); 52 trace_seq_printf(s, "\tpadding : type == %d\n", 53 RINGBUF_TYPE_PADDING); 54 trace_seq_printf(s, "\ttime_extend : type == %d\n", 55 RINGBUF_TYPE_TIME_EXTEND); 56 trace_seq_printf(s, "\ttime_stamp : type == %d\n", 57 RINGBUF_TYPE_TIME_STAMP); 58 trace_seq_printf(s, "\tdata max type_len == %d\n", 59 RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 60 61 return !trace_seq_has_overflowed(s); 62 } 63 64 /* 65 * The ring buffer is made up of a list of pages. A separate list of pages is 66 * allocated for each CPU. A writer may only write to a buffer that is 67 * associated with the CPU it is currently executing on. A reader may read 68 * from any per cpu buffer. 69 * 70 * The reader is special. For each per cpu buffer, the reader has its own 71 * reader page. When a reader has read the entire reader page, this reader 72 * page is swapped with another page in the ring buffer. 73 * 74 * Now, as long as the writer is off the reader page, the reader can do what 75 * ever it wants with that page. The writer will never write to that page 76 * again (as long as it is out of the ring buffer). 77 * 78 * Here's some silly ASCII art. 79 * 80 * +------+ 81 * |reader| RING BUFFER 82 * |page | 83 * +------+ +---+ +---+ +---+ 84 * | |-->| |-->| | 85 * +---+ +---+ +---+ 86 * ^ | 87 * | | 88 * +---------------+ 89 * 90 * 91 * +------+ 92 * |reader| RING BUFFER 93 * |page |------------------v 94 * +------+ +---+ +---+ +---+ 95 * | |-->| |-->| | 96 * +---+ +---+ +---+ 97 * ^ | 98 * | | 99 * +---------------+ 100 * 101 * 102 * +------+ 103 * |reader| RING BUFFER 104 * |page |------------------v 105 * +------+ +---+ +---+ +---+ 106 * ^ | |-->| |-->| | 107 * | +---+ +---+ +---+ 108 * | | 109 * | | 110 * +------------------------------+ 111 * 112 * 113 * +------+ 114 * |buffer| RING BUFFER 115 * |page |------------------v 116 * +------+ +---+ +---+ +---+ 117 * ^ | | | |-->| | 118 * | New +---+ +---+ +---+ 119 * | Reader------^ | 120 * | page | 121 * +------------------------------+ 122 * 123 * 124 * After we make this swap, the reader can hand this page off to the splice 125 * code and be done with it. It can even allocate a new page if it needs to 126 * and swap that into the ring buffer. 127 * 128 * We will be using cmpxchg soon to make all this lockless. 129 * 130 */ 131 132 /* Used for individual buffers (after the counter) */ 133 #define RB_BUFFER_OFF (1 << 20) 134 135 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) 136 137 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) 138 #define RB_ALIGNMENT 4U 139 #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 140 #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ 141 142 #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS 143 # define RB_FORCE_8BYTE_ALIGNMENT 0 144 # define RB_ARCH_ALIGNMENT RB_ALIGNMENT 145 #else 146 # define RB_FORCE_8BYTE_ALIGNMENT 1 147 # define RB_ARCH_ALIGNMENT 8U 148 #endif 149 150 #define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT) 151 152 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ 153 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX 154 155 enum { 156 RB_LEN_TIME_EXTEND = 8, 157 RB_LEN_TIME_STAMP = 8, 158 }; 159 160 #define skip_time_extend(event) \ 161 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND)) 162 163 #define extended_time(event) \ 164 (event->type_len >= RINGBUF_TYPE_TIME_EXTEND) 165 166 static inline bool rb_null_event(struct ring_buffer_event *event) 167 { 168 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; 169 } 170 171 static void rb_event_set_padding(struct ring_buffer_event *event) 172 { 173 /* padding has a NULL time_delta */ 174 event->type_len = RINGBUF_TYPE_PADDING; 175 event->time_delta = 0; 176 } 177 178 static unsigned 179 rb_event_data_length(struct ring_buffer_event *event) 180 { 181 unsigned length; 182 183 if (event->type_len) 184 length = event->type_len * RB_ALIGNMENT; 185 else 186 length = event->array[0]; 187 return length + RB_EVNT_HDR_SIZE; 188 } 189 190 /* 191 * Return the length of the given event. Will return 192 * the length of the time extend if the event is a 193 * time extend. 194 */ 195 static inline unsigned 196 rb_event_length(struct ring_buffer_event *event) 197 { 198 switch (event->type_len) { 199 case RINGBUF_TYPE_PADDING: 200 if (rb_null_event(event)) 201 /* undefined */ 202 return -1; 203 return event->array[0] + RB_EVNT_HDR_SIZE; 204 205 case RINGBUF_TYPE_TIME_EXTEND: 206 return RB_LEN_TIME_EXTEND; 207 208 case RINGBUF_TYPE_TIME_STAMP: 209 return RB_LEN_TIME_STAMP; 210 211 case RINGBUF_TYPE_DATA: 212 return rb_event_data_length(event); 213 default: 214 WARN_ON_ONCE(1); 215 } 216 /* not hit */ 217 return 0; 218 } 219 220 /* 221 * Return total length of time extend and data, 222 * or just the event length for all other events. 223 */ 224 static inline unsigned 225 rb_event_ts_length(struct ring_buffer_event *event) 226 { 227 unsigned len = 0; 228 229 if (extended_time(event)) { 230 /* time extends include the data event after it */ 231 len = RB_LEN_TIME_EXTEND; 232 event = skip_time_extend(event); 233 } 234 return len + rb_event_length(event); 235 } 236 237 /** 238 * ring_buffer_event_length - return the length of the event 239 * @event: the event to get the length of 240 * 241 * Returns the size of the data load of a data event. 242 * If the event is something other than a data event, it 243 * returns the size of the event itself. With the exception 244 * of a TIME EXTEND, where it still returns the size of the 245 * data load of the data event after it. 246 */ 247 unsigned ring_buffer_event_length(struct ring_buffer_event *event) 248 { 249 unsigned length; 250 251 if (extended_time(event)) 252 event = skip_time_extend(event); 253 254 length = rb_event_length(event); 255 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 256 return length; 257 length -= RB_EVNT_HDR_SIZE; 258 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) 259 length -= sizeof(event->array[0]); 260 return length; 261 } 262 EXPORT_SYMBOL_GPL(ring_buffer_event_length); 263 264 /* inline for ring buffer fast paths */ 265 static __always_inline void * 266 rb_event_data(struct ring_buffer_event *event) 267 { 268 if (extended_time(event)) 269 event = skip_time_extend(event); 270 WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 271 /* If length is in len field, then array[0] has the data */ 272 if (event->type_len) 273 return (void *)&event->array[0]; 274 /* Otherwise length is in array[0] and array[1] has the data */ 275 return (void *)&event->array[1]; 276 } 277 278 /** 279 * ring_buffer_event_data - return the data of the event 280 * @event: the event to get the data from 281 */ 282 void *ring_buffer_event_data(struct ring_buffer_event *event) 283 { 284 return rb_event_data(event); 285 } 286 EXPORT_SYMBOL_GPL(ring_buffer_event_data); 287 288 #define for_each_buffer_cpu(buffer, cpu) \ 289 for_each_cpu(cpu, buffer->cpumask) 290 291 #define for_each_online_buffer_cpu(buffer, cpu) \ 292 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask) 293 294 #define TS_SHIFT 27 295 #define TS_MASK ((1ULL << TS_SHIFT) - 1) 296 #define TS_DELTA_TEST (~TS_MASK) 297 298 static u64 rb_event_time_stamp(struct ring_buffer_event *event) 299 { 300 u64 ts; 301 302 ts = event->array[0]; 303 ts <<= TS_SHIFT; 304 ts += event->time_delta; 305 306 return ts; 307 } 308 309 /* Flag when events were overwritten */ 310 #define RB_MISSED_EVENTS (1 << 31) 311 /* Missed count stored at end */ 312 #define RB_MISSED_STORED (1 << 30) 313 314 struct buffer_data_page { 315 u64 time_stamp; /* page time stamp */ 316 local_t commit; /* write committed index */ 317 unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */ 318 }; 319 320 /* 321 * Note, the buffer_page list must be first. The buffer pages 322 * are allocated in cache lines, which means that each buffer 323 * page will be at the beginning of a cache line, and thus 324 * the least significant bits will be zero. We use this to 325 * add flags in the list struct pointers, to make the ring buffer 326 * lockless. 327 */ 328 struct buffer_page { 329 struct list_head list; /* list of buffer pages */ 330 local_t write; /* index for next write */ 331 unsigned read; /* index for next read */ 332 local_t entries; /* entries on this page */ 333 unsigned long real_end; /* real end of data */ 334 struct buffer_data_page *page; /* Actual data page */ 335 }; 336 337 /* 338 * The buffer page counters, write and entries, must be reset 339 * atomically when crossing page boundaries. To synchronize this 340 * update, two counters are inserted into the number. One is 341 * the actual counter for the write position or count on the page. 342 * 343 * The other is a counter of updaters. Before an update happens 344 * the update partition of the counter is incremented. This will 345 * allow the updater to update the counter atomically. 346 * 347 * The counter is 20 bits, and the state data is 12. 348 */ 349 #define RB_WRITE_MASK 0xfffff 350 #define RB_WRITE_INTCNT (1 << 20) 351 352 static void rb_init_page(struct buffer_data_page *bpage) 353 { 354 local_set(&bpage->commit, 0); 355 } 356 357 static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage) 358 { 359 return local_read(&bpage->page->commit); 360 } 361 362 static void free_buffer_page(struct buffer_page *bpage) 363 { 364 free_page((unsigned long)bpage->page); 365 kfree(bpage); 366 } 367 368 /* 369 * We need to fit the time_stamp delta into 27 bits. 370 */ 371 static inline bool test_time_stamp(u64 delta) 372 { 373 return !!(delta & TS_DELTA_TEST); 374 } 375 376 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE) 377 378 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */ 379 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) 380 381 int ring_buffer_print_page_header(struct trace_seq *s) 382 { 383 struct buffer_data_page field; 384 385 trace_seq_printf(s, "\tfield: u64 timestamp;\t" 386 "offset:0;\tsize:%u;\tsigned:%u;\n", 387 (unsigned int)sizeof(field.time_stamp), 388 (unsigned int)is_signed_type(u64)); 389 390 trace_seq_printf(s, "\tfield: local_t commit;\t" 391 "offset:%u;\tsize:%u;\tsigned:%u;\n", 392 (unsigned int)offsetof(typeof(field), commit), 393 (unsigned int)sizeof(field.commit), 394 (unsigned int)is_signed_type(long)); 395 396 trace_seq_printf(s, "\tfield: int overwrite;\t" 397 "offset:%u;\tsize:%u;\tsigned:%u;\n", 398 (unsigned int)offsetof(typeof(field), commit), 399 1, 400 (unsigned int)is_signed_type(long)); 401 402 trace_seq_printf(s, "\tfield: char data;\t" 403 "offset:%u;\tsize:%u;\tsigned:%u;\n", 404 (unsigned int)offsetof(typeof(field), data), 405 (unsigned int)BUF_PAGE_SIZE, 406 (unsigned int)is_signed_type(char)); 407 408 return !trace_seq_has_overflowed(s); 409 } 410 411 struct rb_irq_work { 412 struct irq_work work; 413 wait_queue_head_t waiters; 414 wait_queue_head_t full_waiters; 415 long wait_index; 416 bool waiters_pending; 417 bool full_waiters_pending; 418 bool wakeup_full; 419 }; 420 421 /* 422 * Structure to hold event state and handle nested events. 423 */ 424 struct rb_event_info { 425 u64 ts; 426 u64 delta; 427 u64 before; 428 u64 after; 429 unsigned long length; 430 struct buffer_page *tail_page; 431 int add_timestamp; 432 }; 433 434 /* 435 * Used for the add_timestamp 436 * NONE 437 * EXTEND - wants a time extend 438 * ABSOLUTE - the buffer requests all events to have absolute time stamps 439 * FORCE - force a full time stamp. 440 */ 441 enum { 442 RB_ADD_STAMP_NONE = 0, 443 RB_ADD_STAMP_EXTEND = BIT(1), 444 RB_ADD_STAMP_ABSOLUTE = BIT(2), 445 RB_ADD_STAMP_FORCE = BIT(3) 446 }; 447 /* 448 * Used for which event context the event is in. 449 * TRANSITION = 0 450 * NMI = 1 451 * IRQ = 2 452 * SOFTIRQ = 3 453 * NORMAL = 4 454 * 455 * See trace_recursive_lock() comment below for more details. 456 */ 457 enum { 458 RB_CTX_TRANSITION, 459 RB_CTX_NMI, 460 RB_CTX_IRQ, 461 RB_CTX_SOFTIRQ, 462 RB_CTX_NORMAL, 463 RB_CTX_MAX 464 }; 465 466 #if BITS_PER_LONG == 32 467 #define RB_TIME_32 468 #endif 469 470 /* To test on 64 bit machines */ 471 //#define RB_TIME_32 472 473 #ifdef RB_TIME_32 474 475 struct rb_time_struct { 476 local_t cnt; 477 local_t top; 478 local_t bottom; 479 local_t msb; 480 }; 481 #else 482 #include <asm/local64.h> 483 struct rb_time_struct { 484 local64_t time; 485 }; 486 #endif 487 typedef struct rb_time_struct rb_time_t; 488 489 #define MAX_NEST 5 490 491 /* 492 * head_page == tail_page && head == tail then buffer is empty. 493 */ 494 struct ring_buffer_per_cpu { 495 int cpu; 496 atomic_t record_disabled; 497 atomic_t resize_disabled; 498 struct trace_buffer *buffer; 499 raw_spinlock_t reader_lock; /* serialize readers */ 500 arch_spinlock_t lock; 501 struct lock_class_key lock_key; 502 struct buffer_data_page *free_page; 503 unsigned long nr_pages; 504 unsigned int current_context; 505 struct list_head *pages; 506 struct buffer_page *head_page; /* read from head */ 507 struct buffer_page *tail_page; /* write to tail */ 508 struct buffer_page *commit_page; /* committed pages */ 509 struct buffer_page *reader_page; 510 unsigned long lost_events; 511 unsigned long last_overrun; 512 unsigned long nest; 513 local_t entries_bytes; 514 local_t entries; 515 local_t overrun; 516 local_t commit_overrun; 517 local_t dropped_events; 518 local_t committing; 519 local_t commits; 520 local_t pages_touched; 521 local_t pages_lost; 522 local_t pages_read; 523 long last_pages_touch; 524 size_t shortest_full; 525 unsigned long read; 526 unsigned long read_bytes; 527 rb_time_t write_stamp; 528 rb_time_t before_stamp; 529 u64 event_stamp[MAX_NEST]; 530 u64 read_stamp; 531 /* pages removed since last reset */ 532 unsigned long pages_removed; 533 /* ring buffer pages to update, > 0 to add, < 0 to remove */ 534 long nr_pages_to_update; 535 struct list_head new_pages; /* new pages to add */ 536 struct work_struct update_pages_work; 537 struct completion update_done; 538 539 struct rb_irq_work irq_work; 540 }; 541 542 struct trace_buffer { 543 unsigned flags; 544 int cpus; 545 atomic_t record_disabled; 546 atomic_t resizing; 547 cpumask_var_t cpumask; 548 549 struct lock_class_key *reader_lock_key; 550 551 struct mutex mutex; 552 553 struct ring_buffer_per_cpu **buffers; 554 555 struct hlist_node node; 556 u64 (*clock)(void); 557 558 struct rb_irq_work irq_work; 559 bool time_stamp_abs; 560 }; 561 562 struct ring_buffer_iter { 563 struct ring_buffer_per_cpu *cpu_buffer; 564 unsigned long head; 565 unsigned long next_event; 566 struct buffer_page *head_page; 567 struct buffer_page *cache_reader_page; 568 unsigned long cache_read; 569 unsigned long cache_pages_removed; 570 u64 read_stamp; 571 u64 page_stamp; 572 struct ring_buffer_event *event; 573 int missed_events; 574 }; 575 576 #ifdef RB_TIME_32 577 578 /* 579 * On 32 bit machines, local64_t is very expensive. As the ring 580 * buffer doesn't need all the features of a true 64 bit atomic, 581 * on 32 bit, it uses these functions (64 still uses local64_t). 582 * 583 * For the ring buffer, 64 bit required operations for the time is 584 * the following: 585 * 586 * - Reads may fail if it interrupted a modification of the time stamp. 587 * It will succeed if it did not interrupt another write even if 588 * the read itself is interrupted by a write. 589 * It returns whether it was successful or not. 590 * 591 * - Writes always succeed and will overwrite other writes and writes 592 * that were done by events interrupting the current write. 593 * 594 * - A write followed by a read of the same time stamp will always succeed, 595 * but may not contain the same value. 596 * 597 * - A cmpxchg will fail if it interrupted another write or cmpxchg. 598 * Other than that, it acts like a normal cmpxchg. 599 * 600 * The 60 bit time stamp is broken up by 30 bits in a top and bottom half 601 * (bottom being the least significant 30 bits of the 60 bit time stamp). 602 * 603 * The two most significant bits of each half holds a 2 bit counter (0-3). 604 * Each update will increment this counter by one. 605 * When reading the top and bottom, if the two counter bits match then the 606 * top and bottom together make a valid 60 bit number. 607 */ 608 #define RB_TIME_SHIFT 30 609 #define RB_TIME_VAL_MASK ((1 << RB_TIME_SHIFT) - 1) 610 #define RB_TIME_MSB_SHIFT 60 611 612 static inline int rb_time_cnt(unsigned long val) 613 { 614 return (val >> RB_TIME_SHIFT) & 3; 615 } 616 617 static inline u64 rb_time_val(unsigned long top, unsigned long bottom) 618 { 619 u64 val; 620 621 val = top & RB_TIME_VAL_MASK; 622 val <<= RB_TIME_SHIFT; 623 val |= bottom & RB_TIME_VAL_MASK; 624 625 return val; 626 } 627 628 static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt) 629 { 630 unsigned long top, bottom, msb; 631 unsigned long c; 632 633 /* 634 * If the read is interrupted by a write, then the cnt will 635 * be different. Loop until both top and bottom have been read 636 * without interruption. 637 */ 638 do { 639 c = local_read(&t->cnt); 640 top = local_read(&t->top); 641 bottom = local_read(&t->bottom); 642 msb = local_read(&t->msb); 643 } while (c != local_read(&t->cnt)); 644 645 *cnt = rb_time_cnt(top); 646 647 /* If top and bottom counts don't match, this interrupted a write */ 648 if (*cnt != rb_time_cnt(bottom)) 649 return false; 650 651 /* The shift to msb will lose its cnt bits */ 652 *ret = rb_time_val(top, bottom) | ((u64)msb << RB_TIME_MSB_SHIFT); 653 return true; 654 } 655 656 static bool rb_time_read(rb_time_t *t, u64 *ret) 657 { 658 unsigned long cnt; 659 660 return __rb_time_read(t, ret, &cnt); 661 } 662 663 static inline unsigned long rb_time_val_cnt(unsigned long val, unsigned long cnt) 664 { 665 return (val & RB_TIME_VAL_MASK) | ((cnt & 3) << RB_TIME_SHIFT); 666 } 667 668 static inline void rb_time_split(u64 val, unsigned long *top, unsigned long *bottom, 669 unsigned long *msb) 670 { 671 *top = (unsigned long)((val >> RB_TIME_SHIFT) & RB_TIME_VAL_MASK); 672 *bottom = (unsigned long)(val & RB_TIME_VAL_MASK); 673 *msb = (unsigned long)(val >> RB_TIME_MSB_SHIFT); 674 } 675 676 static inline void rb_time_val_set(local_t *t, unsigned long val, unsigned long cnt) 677 { 678 val = rb_time_val_cnt(val, cnt); 679 local_set(t, val); 680 } 681 682 static void rb_time_set(rb_time_t *t, u64 val) 683 { 684 unsigned long cnt, top, bottom, msb; 685 686 rb_time_split(val, &top, &bottom, &msb); 687 688 /* Writes always succeed with a valid number even if it gets interrupted. */ 689 do { 690 cnt = local_inc_return(&t->cnt); 691 rb_time_val_set(&t->top, top, cnt); 692 rb_time_val_set(&t->bottom, bottom, cnt); 693 rb_time_val_set(&t->msb, val >> RB_TIME_MSB_SHIFT, cnt); 694 } while (cnt != local_read(&t->cnt)); 695 } 696 697 static inline bool 698 rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set) 699 { 700 return local_try_cmpxchg(l, &expect, set); 701 } 702 703 static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set) 704 { 705 unsigned long cnt, top, bottom, msb; 706 unsigned long cnt2, top2, bottom2, msb2; 707 u64 val; 708 709 /* The cmpxchg always fails if it interrupted an update */ 710 if (!__rb_time_read(t, &val, &cnt2)) 711 return false; 712 713 if (val != expect) 714 return false; 715 716 cnt = local_read(&t->cnt); 717 if ((cnt & 3) != cnt2) 718 return false; 719 720 cnt2 = cnt + 1; 721 722 rb_time_split(val, &top, &bottom, &msb); 723 top = rb_time_val_cnt(top, cnt); 724 bottom = rb_time_val_cnt(bottom, cnt); 725 726 rb_time_split(set, &top2, &bottom2, &msb2); 727 top2 = rb_time_val_cnt(top2, cnt2); 728 bottom2 = rb_time_val_cnt(bottom2, cnt2); 729 730 if (!rb_time_read_cmpxchg(&t->cnt, cnt, cnt2)) 731 return false; 732 if (!rb_time_read_cmpxchg(&t->msb, msb, msb2)) 733 return false; 734 if (!rb_time_read_cmpxchg(&t->top, top, top2)) 735 return false; 736 if (!rb_time_read_cmpxchg(&t->bottom, bottom, bottom2)) 737 return false; 738 return true; 739 } 740 741 #else /* 64 bits */ 742 743 /* local64_t always succeeds */ 744 745 static inline bool rb_time_read(rb_time_t *t, u64 *ret) 746 { 747 *ret = local64_read(&t->time); 748 return true; 749 } 750 static void rb_time_set(rb_time_t *t, u64 val) 751 { 752 local64_set(&t->time, val); 753 } 754 755 static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set) 756 { 757 return local64_try_cmpxchg(&t->time, &expect, set); 758 } 759 #endif 760 761 /* 762 * Enable this to make sure that the event passed to 763 * ring_buffer_event_time_stamp() is not committed and also 764 * is on the buffer that it passed in. 765 */ 766 //#define RB_VERIFY_EVENT 767 #ifdef RB_VERIFY_EVENT 768 static struct list_head *rb_list_head(struct list_head *list); 769 static void verify_event(struct ring_buffer_per_cpu *cpu_buffer, 770 void *event) 771 { 772 struct buffer_page *page = cpu_buffer->commit_page; 773 struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page); 774 struct list_head *next; 775 long commit, write; 776 unsigned long addr = (unsigned long)event; 777 bool done = false; 778 int stop = 0; 779 780 /* Make sure the event exists and is not committed yet */ 781 do { 782 if (page == tail_page || WARN_ON_ONCE(stop++ > 100)) 783 done = true; 784 commit = local_read(&page->page->commit); 785 write = local_read(&page->write); 786 if (addr >= (unsigned long)&page->page->data[commit] && 787 addr < (unsigned long)&page->page->data[write]) 788 return; 789 790 next = rb_list_head(page->list.next); 791 page = list_entry(next, struct buffer_page, list); 792 } while (!done); 793 WARN_ON_ONCE(1); 794 } 795 #else 796 static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer, 797 void *event) 798 { 799 } 800 #endif 801 802 /* 803 * The absolute time stamp drops the 5 MSBs and some clocks may 804 * require them. The rb_fix_abs_ts() will take a previous full 805 * time stamp, and add the 5 MSB of that time stamp on to the 806 * saved absolute time stamp. Then they are compared in case of 807 * the unlikely event that the latest time stamp incremented 808 * the 5 MSB. 809 */ 810 static inline u64 rb_fix_abs_ts(u64 abs, u64 save_ts) 811 { 812 if (save_ts & TS_MSB) { 813 abs |= save_ts & TS_MSB; 814 /* Check for overflow */ 815 if (unlikely(abs < save_ts)) 816 abs += 1ULL << 59; 817 } 818 return abs; 819 } 820 821 static inline u64 rb_time_stamp(struct trace_buffer *buffer); 822 823 /** 824 * ring_buffer_event_time_stamp - return the event's current time stamp 825 * @buffer: The buffer that the event is on 826 * @event: the event to get the time stamp of 827 * 828 * Note, this must be called after @event is reserved, and before it is 829 * committed to the ring buffer. And must be called from the same 830 * context where the event was reserved (normal, softirq, irq, etc). 831 * 832 * Returns the time stamp associated with the current event. 833 * If the event has an extended time stamp, then that is used as 834 * the time stamp to return. 835 * In the highly unlikely case that the event was nested more than 836 * the max nesting, then the write_stamp of the buffer is returned, 837 * otherwise current time is returned, but that really neither of 838 * the last two cases should ever happen. 839 */ 840 u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer, 841 struct ring_buffer_event *event) 842 { 843 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; 844 unsigned int nest; 845 u64 ts; 846 847 /* If the event includes an absolute time, then just use that */ 848 if (event->type_len == RINGBUF_TYPE_TIME_STAMP) { 849 ts = rb_event_time_stamp(event); 850 return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp); 851 } 852 853 nest = local_read(&cpu_buffer->committing); 854 verify_event(cpu_buffer, event); 855 if (WARN_ON_ONCE(!nest)) 856 goto fail; 857 858 /* Read the current saved nesting level time stamp */ 859 if (likely(--nest < MAX_NEST)) 860 return cpu_buffer->event_stamp[nest]; 861 862 /* Shouldn't happen, warn if it does */ 863 WARN_ONCE(1, "nest (%d) greater than max", nest); 864 865 fail: 866 /* Can only fail on 32 bit */ 867 if (!rb_time_read(&cpu_buffer->write_stamp, &ts)) 868 /* Screw it, just read the current time */ 869 ts = rb_time_stamp(cpu_buffer->buffer); 870 871 return ts; 872 } 873 874 /** 875 * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer 876 * @buffer: The ring_buffer to get the number of pages from 877 * @cpu: The cpu of the ring_buffer to get the number of pages from 878 * 879 * Returns the number of pages used by a per_cpu buffer of the ring buffer. 880 */ 881 size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu) 882 { 883 return buffer->buffers[cpu]->nr_pages; 884 } 885 886 /** 887 * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer 888 * @buffer: The ring_buffer to get the number of pages from 889 * @cpu: The cpu of the ring_buffer to get the number of pages from 890 * 891 * Returns the number of pages that have content in the ring buffer. 892 */ 893 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) 894 { 895 size_t read; 896 size_t lost; 897 size_t cnt; 898 899 read = local_read(&buffer->buffers[cpu]->pages_read); 900 lost = local_read(&buffer->buffers[cpu]->pages_lost); 901 cnt = local_read(&buffer->buffers[cpu]->pages_touched); 902 903 if (WARN_ON_ONCE(cnt < lost)) 904 return 0; 905 906 cnt -= lost; 907 908 /* The reader can read an empty page, but not more than that */ 909 if (cnt < read) { 910 WARN_ON_ONCE(read > cnt + 1); 911 return 0; 912 } 913 914 return cnt - read; 915 } 916 917 static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full) 918 { 919 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 920 size_t nr_pages; 921 size_t dirty; 922 923 nr_pages = cpu_buffer->nr_pages; 924 if (!nr_pages || !full) 925 return true; 926 927 dirty = ring_buffer_nr_dirty_pages(buffer, cpu); 928 929 return (dirty * 100) > (full * nr_pages); 930 } 931 932 /* 933 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input 934 * 935 * Schedules a delayed work to wake up any task that is blocked on the 936 * ring buffer waiters queue. 937 */ 938 static void rb_wake_up_waiters(struct irq_work *work) 939 { 940 struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work); 941 942 wake_up_all(&rbwork->waiters); 943 if (rbwork->full_waiters_pending || rbwork->wakeup_full) { 944 rbwork->wakeup_full = false; 945 rbwork->full_waiters_pending = false; 946 wake_up_all(&rbwork->full_waiters); 947 } 948 } 949 950 /** 951 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer 952 * @buffer: The ring buffer to wake waiters on 953 * @cpu: The CPU buffer to wake waiters on 954 * 955 * In the case of a file that represents a ring buffer is closing, 956 * it is prudent to wake up any waiters that are on this. 957 */ 958 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu) 959 { 960 struct ring_buffer_per_cpu *cpu_buffer; 961 struct rb_irq_work *rbwork; 962 963 if (!buffer) 964 return; 965 966 if (cpu == RING_BUFFER_ALL_CPUS) { 967 968 /* Wake up individual ones too. One level recursion */ 969 for_each_buffer_cpu(buffer, cpu) 970 ring_buffer_wake_waiters(buffer, cpu); 971 972 rbwork = &buffer->irq_work; 973 } else { 974 if (WARN_ON_ONCE(!buffer->buffers)) 975 return; 976 if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) 977 return; 978 979 cpu_buffer = buffer->buffers[cpu]; 980 /* The CPU buffer may not have been initialized yet */ 981 if (!cpu_buffer) 982 return; 983 rbwork = &cpu_buffer->irq_work; 984 } 985 986 rbwork->wait_index++; 987 /* make sure the waiters see the new index */ 988 smp_wmb(); 989 990 rb_wake_up_waiters(&rbwork->work); 991 } 992 993 /** 994 * ring_buffer_wait - wait for input to the ring buffer 995 * @buffer: buffer to wait on 996 * @cpu: the cpu buffer to wait on 997 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS 998 * 999 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 1000 * as data is added to any of the @buffer's cpu buffers. Otherwise 1001 * it will wait for data to be added to a specific cpu buffer. 1002 */ 1003 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) 1004 { 1005 struct ring_buffer_per_cpu *cpu_buffer; 1006 DEFINE_WAIT(wait); 1007 struct rb_irq_work *work; 1008 long wait_index; 1009 int ret = 0; 1010 1011 /* 1012 * Depending on what the caller is waiting for, either any 1013 * data in any cpu buffer, or a specific buffer, put the 1014 * caller on the appropriate wait queue. 1015 */ 1016 if (cpu == RING_BUFFER_ALL_CPUS) { 1017 work = &buffer->irq_work; 1018 /* Full only makes sense on per cpu reads */ 1019 full = 0; 1020 } else { 1021 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1022 return -ENODEV; 1023 cpu_buffer = buffer->buffers[cpu]; 1024 work = &cpu_buffer->irq_work; 1025 } 1026 1027 wait_index = READ_ONCE(work->wait_index); 1028 1029 while (true) { 1030 if (full) 1031 prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE); 1032 else 1033 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); 1034 1035 /* 1036 * The events can happen in critical sections where 1037 * checking a work queue can cause deadlocks. 1038 * After adding a task to the queue, this flag is set 1039 * only to notify events to try to wake up the queue 1040 * using irq_work. 1041 * 1042 * We don't clear it even if the buffer is no longer 1043 * empty. The flag only causes the next event to run 1044 * irq_work to do the work queue wake up. The worse 1045 * that can happen if we race with !trace_empty() is that 1046 * an event will cause an irq_work to try to wake up 1047 * an empty queue. 1048 * 1049 * There's no reason to protect this flag either, as 1050 * the work queue and irq_work logic will do the necessary 1051 * synchronization for the wake ups. The only thing 1052 * that is necessary is that the wake up happens after 1053 * a task has been queued. It's OK for spurious wake ups. 1054 */ 1055 if (full) 1056 work->full_waiters_pending = true; 1057 else 1058 work->waiters_pending = true; 1059 1060 if (signal_pending(current)) { 1061 ret = -EINTR; 1062 break; 1063 } 1064 1065 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) 1066 break; 1067 1068 if (cpu != RING_BUFFER_ALL_CPUS && 1069 !ring_buffer_empty_cpu(buffer, cpu)) { 1070 unsigned long flags; 1071 bool pagebusy; 1072 bool done; 1073 1074 if (!full) 1075 break; 1076 1077 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 1078 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; 1079 done = !pagebusy && full_hit(buffer, cpu, full); 1080 1081 if (!cpu_buffer->shortest_full || 1082 cpu_buffer->shortest_full > full) 1083 cpu_buffer->shortest_full = full; 1084 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 1085 if (done) 1086 break; 1087 } 1088 1089 schedule(); 1090 1091 /* Make sure to see the new wait index */ 1092 smp_rmb(); 1093 if (wait_index != work->wait_index) 1094 break; 1095 } 1096 1097 if (full) 1098 finish_wait(&work->full_waiters, &wait); 1099 else 1100 finish_wait(&work->waiters, &wait); 1101 1102 return ret; 1103 } 1104 1105 /** 1106 * ring_buffer_poll_wait - poll on buffer input 1107 * @buffer: buffer to wait on 1108 * @cpu: the cpu buffer to wait on 1109 * @filp: the file descriptor 1110 * @poll_table: The poll descriptor 1111 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS 1112 * 1113 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 1114 * as data is added to any of the @buffer's cpu buffers. Otherwise 1115 * it will wait for data to be added to a specific cpu buffer. 1116 * 1117 * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers, 1118 * zero otherwise. 1119 */ 1120 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, 1121 struct file *filp, poll_table *poll_table, int full) 1122 { 1123 struct ring_buffer_per_cpu *cpu_buffer; 1124 struct rb_irq_work *work; 1125 1126 if (cpu == RING_BUFFER_ALL_CPUS) { 1127 work = &buffer->irq_work; 1128 full = 0; 1129 } else { 1130 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1131 return -EINVAL; 1132 1133 cpu_buffer = buffer->buffers[cpu]; 1134 work = &cpu_buffer->irq_work; 1135 } 1136 1137 if (full) { 1138 poll_wait(filp, &work->full_waiters, poll_table); 1139 work->full_waiters_pending = true; 1140 if (!cpu_buffer->shortest_full || 1141 cpu_buffer->shortest_full > full) 1142 cpu_buffer->shortest_full = full; 1143 } else { 1144 poll_wait(filp, &work->waiters, poll_table); 1145 work->waiters_pending = true; 1146 } 1147 1148 /* 1149 * There's a tight race between setting the waiters_pending and 1150 * checking if the ring buffer is empty. Once the waiters_pending bit 1151 * is set, the next event will wake the task up, but we can get stuck 1152 * if there's only a single event in. 1153 * 1154 * FIXME: Ideally, we need a memory barrier on the writer side as well, 1155 * but adding a memory barrier to all events will cause too much of a 1156 * performance hit in the fast path. We only need a memory barrier when 1157 * the buffer goes from empty to having content. But as this race is 1158 * extremely small, and it's not a problem if another event comes in, we 1159 * will fix it later. 1160 */ 1161 smp_mb(); 1162 1163 if (full) 1164 return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0; 1165 1166 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || 1167 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) 1168 return EPOLLIN | EPOLLRDNORM; 1169 return 0; 1170 } 1171 1172 /* buffer may be either ring_buffer or ring_buffer_per_cpu */ 1173 #define RB_WARN_ON(b, cond) \ 1174 ({ \ 1175 int _____ret = unlikely(cond); \ 1176 if (_____ret) { \ 1177 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \ 1178 struct ring_buffer_per_cpu *__b = \ 1179 (void *)b; \ 1180 atomic_inc(&__b->buffer->record_disabled); \ 1181 } else \ 1182 atomic_inc(&b->record_disabled); \ 1183 WARN_ON(1); \ 1184 } \ 1185 _____ret; \ 1186 }) 1187 1188 /* Up this if you want to test the TIME_EXTENTS and normalization */ 1189 #define DEBUG_SHIFT 0 1190 1191 static inline u64 rb_time_stamp(struct trace_buffer *buffer) 1192 { 1193 u64 ts; 1194 1195 /* Skip retpolines :-( */ 1196 if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local)) 1197 ts = trace_clock_local(); 1198 else 1199 ts = buffer->clock(); 1200 1201 /* shift to debug/test normalization and TIME_EXTENTS */ 1202 return ts << DEBUG_SHIFT; 1203 } 1204 1205 u64 ring_buffer_time_stamp(struct trace_buffer *buffer) 1206 { 1207 u64 time; 1208 1209 preempt_disable_notrace(); 1210 time = rb_time_stamp(buffer); 1211 preempt_enable_notrace(); 1212 1213 return time; 1214 } 1215 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); 1216 1217 void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer, 1218 int cpu, u64 *ts) 1219 { 1220 /* Just stupid testing the normalize function and deltas */ 1221 *ts >>= DEBUG_SHIFT; 1222 } 1223 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); 1224 1225 /* 1226 * Making the ring buffer lockless makes things tricky. 1227 * Although writes only happen on the CPU that they are on, 1228 * and they only need to worry about interrupts. Reads can 1229 * happen on any CPU. 1230 * 1231 * The reader page is always off the ring buffer, but when the 1232 * reader finishes with a page, it needs to swap its page with 1233 * a new one from the buffer. The reader needs to take from 1234 * the head (writes go to the tail). But if a writer is in overwrite 1235 * mode and wraps, it must push the head page forward. 1236 * 1237 * Here lies the problem. 1238 * 1239 * The reader must be careful to replace only the head page, and 1240 * not another one. As described at the top of the file in the 1241 * ASCII art, the reader sets its old page to point to the next 1242 * page after head. It then sets the page after head to point to 1243 * the old reader page. But if the writer moves the head page 1244 * during this operation, the reader could end up with the tail. 1245 * 1246 * We use cmpxchg to help prevent this race. We also do something 1247 * special with the page before head. We set the LSB to 1. 1248 * 1249 * When the writer must push the page forward, it will clear the 1250 * bit that points to the head page, move the head, and then set 1251 * the bit that points to the new head page. 1252 * 1253 * We also don't want an interrupt coming in and moving the head 1254 * page on another writer. Thus we use the second LSB to catch 1255 * that too. Thus: 1256 * 1257 * head->list->prev->next bit 1 bit 0 1258 * ------- ------- 1259 * Normal page 0 0 1260 * Points to head page 0 1 1261 * New head page 1 0 1262 * 1263 * Note we can not trust the prev pointer of the head page, because: 1264 * 1265 * +----+ +-----+ +-----+ 1266 * | |------>| T |---X--->| N | 1267 * | |<------| | | | 1268 * +----+ +-----+ +-----+ 1269 * ^ ^ | 1270 * | +-----+ | | 1271 * +----------| R |----------+ | 1272 * | |<-----------+ 1273 * +-----+ 1274 * 1275 * Key: ---X--> HEAD flag set in pointer 1276 * T Tail page 1277 * R Reader page 1278 * N Next page 1279 * 1280 * (see __rb_reserve_next() to see where this happens) 1281 * 1282 * What the above shows is that the reader just swapped out 1283 * the reader page with a page in the buffer, but before it 1284 * could make the new header point back to the new page added 1285 * it was preempted by a writer. The writer moved forward onto 1286 * the new page added by the reader and is about to move forward 1287 * again. 1288 * 1289 * You can see, it is legitimate for the previous pointer of 1290 * the head (or any page) not to point back to itself. But only 1291 * temporarily. 1292 */ 1293 1294 #define RB_PAGE_NORMAL 0UL 1295 #define RB_PAGE_HEAD 1UL 1296 #define RB_PAGE_UPDATE 2UL 1297 1298 1299 #define RB_FLAG_MASK 3UL 1300 1301 /* PAGE_MOVED is not part of the mask */ 1302 #define RB_PAGE_MOVED 4UL 1303 1304 /* 1305 * rb_list_head - remove any bit 1306 */ 1307 static struct list_head *rb_list_head(struct list_head *list) 1308 { 1309 unsigned long val = (unsigned long)list; 1310 1311 return (struct list_head *)(val & ~RB_FLAG_MASK); 1312 } 1313 1314 /* 1315 * rb_is_head_page - test if the given page is the head page 1316 * 1317 * Because the reader may move the head_page pointer, we can 1318 * not trust what the head page is (it may be pointing to 1319 * the reader page). But if the next page is a header page, 1320 * its flags will be non zero. 1321 */ 1322 static inline int 1323 rb_is_head_page(struct buffer_page *page, struct list_head *list) 1324 { 1325 unsigned long val; 1326 1327 val = (unsigned long)list->next; 1328 1329 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) 1330 return RB_PAGE_MOVED; 1331 1332 return val & RB_FLAG_MASK; 1333 } 1334 1335 /* 1336 * rb_is_reader_page 1337 * 1338 * The unique thing about the reader page, is that, if the 1339 * writer is ever on it, the previous pointer never points 1340 * back to the reader page. 1341 */ 1342 static bool rb_is_reader_page(struct buffer_page *page) 1343 { 1344 struct list_head *list = page->list.prev; 1345 1346 return rb_list_head(list->next) != &page->list; 1347 } 1348 1349 /* 1350 * rb_set_list_to_head - set a list_head to be pointing to head. 1351 */ 1352 static void rb_set_list_to_head(struct list_head *list) 1353 { 1354 unsigned long *ptr; 1355 1356 ptr = (unsigned long *)&list->next; 1357 *ptr |= RB_PAGE_HEAD; 1358 *ptr &= ~RB_PAGE_UPDATE; 1359 } 1360 1361 /* 1362 * rb_head_page_activate - sets up head page 1363 */ 1364 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) 1365 { 1366 struct buffer_page *head; 1367 1368 head = cpu_buffer->head_page; 1369 if (!head) 1370 return; 1371 1372 /* 1373 * Set the previous list pointer to have the HEAD flag. 1374 */ 1375 rb_set_list_to_head(head->list.prev); 1376 } 1377 1378 static void rb_list_head_clear(struct list_head *list) 1379 { 1380 unsigned long *ptr = (unsigned long *)&list->next; 1381 1382 *ptr &= ~RB_FLAG_MASK; 1383 } 1384 1385 /* 1386 * rb_head_page_deactivate - clears head page ptr (for free list) 1387 */ 1388 static void 1389 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) 1390 { 1391 struct list_head *hd; 1392 1393 /* Go through the whole list and clear any pointers found. */ 1394 rb_list_head_clear(cpu_buffer->pages); 1395 1396 list_for_each(hd, cpu_buffer->pages) 1397 rb_list_head_clear(hd); 1398 } 1399 1400 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, 1401 struct buffer_page *head, 1402 struct buffer_page *prev, 1403 int old_flag, int new_flag) 1404 { 1405 struct list_head *list; 1406 unsigned long val = (unsigned long)&head->list; 1407 unsigned long ret; 1408 1409 list = &prev->list; 1410 1411 val &= ~RB_FLAG_MASK; 1412 1413 ret = cmpxchg((unsigned long *)&list->next, 1414 val | old_flag, val | new_flag); 1415 1416 /* check if the reader took the page */ 1417 if ((ret & ~RB_FLAG_MASK) != val) 1418 return RB_PAGE_MOVED; 1419 1420 return ret & RB_FLAG_MASK; 1421 } 1422 1423 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, 1424 struct buffer_page *head, 1425 struct buffer_page *prev, 1426 int old_flag) 1427 { 1428 return rb_head_page_set(cpu_buffer, head, prev, 1429 old_flag, RB_PAGE_UPDATE); 1430 } 1431 1432 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, 1433 struct buffer_page *head, 1434 struct buffer_page *prev, 1435 int old_flag) 1436 { 1437 return rb_head_page_set(cpu_buffer, head, prev, 1438 old_flag, RB_PAGE_HEAD); 1439 } 1440 1441 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, 1442 struct buffer_page *head, 1443 struct buffer_page *prev, 1444 int old_flag) 1445 { 1446 return rb_head_page_set(cpu_buffer, head, prev, 1447 old_flag, RB_PAGE_NORMAL); 1448 } 1449 1450 static inline void rb_inc_page(struct buffer_page **bpage) 1451 { 1452 struct list_head *p = rb_list_head((*bpage)->list.next); 1453 1454 *bpage = list_entry(p, struct buffer_page, list); 1455 } 1456 1457 static struct buffer_page * 1458 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) 1459 { 1460 struct buffer_page *head; 1461 struct buffer_page *page; 1462 struct list_head *list; 1463 int i; 1464 1465 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) 1466 return NULL; 1467 1468 /* sanity check */ 1469 list = cpu_buffer->pages; 1470 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) 1471 return NULL; 1472 1473 page = head = cpu_buffer->head_page; 1474 /* 1475 * It is possible that the writer moves the header behind 1476 * where we started, and we miss in one loop. 1477 * A second loop should grab the header, but we'll do 1478 * three loops just because I'm paranoid. 1479 */ 1480 for (i = 0; i < 3; i++) { 1481 do { 1482 if (rb_is_head_page(page, page->list.prev)) { 1483 cpu_buffer->head_page = page; 1484 return page; 1485 } 1486 rb_inc_page(&page); 1487 } while (page != head); 1488 } 1489 1490 RB_WARN_ON(cpu_buffer, 1); 1491 1492 return NULL; 1493 } 1494 1495 static bool rb_head_page_replace(struct buffer_page *old, 1496 struct buffer_page *new) 1497 { 1498 unsigned long *ptr = (unsigned long *)&old->list.prev->next; 1499 unsigned long val; 1500 1501 val = *ptr & ~RB_FLAG_MASK; 1502 val |= RB_PAGE_HEAD; 1503 1504 return try_cmpxchg(ptr, &val, (unsigned long)&new->list); 1505 } 1506 1507 /* 1508 * rb_tail_page_update - move the tail page forward 1509 */ 1510 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, 1511 struct buffer_page *tail_page, 1512 struct buffer_page *next_page) 1513 { 1514 unsigned long old_entries; 1515 unsigned long old_write; 1516 1517 /* 1518 * The tail page now needs to be moved forward. 1519 * 1520 * We need to reset the tail page, but without messing 1521 * with possible erasing of data brought in by interrupts 1522 * that have moved the tail page and are currently on it. 1523 * 1524 * We add a counter to the write field to denote this. 1525 */ 1526 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); 1527 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); 1528 1529 local_inc(&cpu_buffer->pages_touched); 1530 /* 1531 * Just make sure we have seen our old_write and synchronize 1532 * with any interrupts that come in. 1533 */ 1534 barrier(); 1535 1536 /* 1537 * If the tail page is still the same as what we think 1538 * it is, then it is up to us to update the tail 1539 * pointer. 1540 */ 1541 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { 1542 /* Zero the write counter */ 1543 unsigned long val = old_write & ~RB_WRITE_MASK; 1544 unsigned long eval = old_entries & ~RB_WRITE_MASK; 1545 1546 /* 1547 * This will only succeed if an interrupt did 1548 * not come in and change it. In which case, we 1549 * do not want to modify it. 1550 * 1551 * We add (void) to let the compiler know that we do not care 1552 * about the return value of these functions. We use the 1553 * cmpxchg to only update if an interrupt did not already 1554 * do it for us. If the cmpxchg fails, we don't care. 1555 */ 1556 (void)local_cmpxchg(&next_page->write, old_write, val); 1557 (void)local_cmpxchg(&next_page->entries, old_entries, eval); 1558 1559 /* 1560 * No need to worry about races with clearing out the commit. 1561 * it only can increment when a commit takes place. But that 1562 * only happens in the outer most nested commit. 1563 */ 1564 local_set(&next_page->page->commit, 0); 1565 1566 /* Again, either we update tail_page or an interrupt does */ 1567 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page); 1568 } 1569 } 1570 1571 static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, 1572 struct buffer_page *bpage) 1573 { 1574 unsigned long val = (unsigned long)bpage; 1575 1576 RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK); 1577 } 1578 1579 /** 1580 * rb_check_pages - integrity check of buffer pages 1581 * @cpu_buffer: CPU buffer with pages to test 1582 * 1583 * As a safety measure we check to make sure the data pages have not 1584 * been corrupted. 1585 */ 1586 static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) 1587 { 1588 struct list_head *head = rb_list_head(cpu_buffer->pages); 1589 struct list_head *tmp; 1590 1591 if (RB_WARN_ON(cpu_buffer, 1592 rb_list_head(rb_list_head(head->next)->prev) != head)) 1593 return; 1594 1595 if (RB_WARN_ON(cpu_buffer, 1596 rb_list_head(rb_list_head(head->prev)->next) != head)) 1597 return; 1598 1599 for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) { 1600 if (RB_WARN_ON(cpu_buffer, 1601 rb_list_head(rb_list_head(tmp->next)->prev) != tmp)) 1602 return; 1603 1604 if (RB_WARN_ON(cpu_buffer, 1605 rb_list_head(rb_list_head(tmp->prev)->next) != tmp)) 1606 return; 1607 } 1608 } 1609 1610 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 1611 long nr_pages, struct list_head *pages) 1612 { 1613 struct buffer_page *bpage, *tmp; 1614 bool user_thread = current->mm != NULL; 1615 gfp_t mflags; 1616 long i; 1617 1618 /* 1619 * Check if the available memory is there first. 1620 * Note, si_mem_available() only gives us a rough estimate of available 1621 * memory. It may not be accurate. But we don't care, we just want 1622 * to prevent doing any allocation when it is obvious that it is 1623 * not going to succeed. 1624 */ 1625 i = si_mem_available(); 1626 if (i < nr_pages) 1627 return -ENOMEM; 1628 1629 /* 1630 * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails 1631 * gracefully without invoking oom-killer and the system is not 1632 * destabilized. 1633 */ 1634 mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL; 1635 1636 /* 1637 * If a user thread allocates too much, and si_mem_available() 1638 * reports there's enough memory, even though there is not. 1639 * Make sure the OOM killer kills this thread. This can happen 1640 * even with RETRY_MAYFAIL because another task may be doing 1641 * an allocation after this task has taken all memory. 1642 * This is the task the OOM killer needs to take out during this 1643 * loop, even if it was triggered by an allocation somewhere else. 1644 */ 1645 if (user_thread) 1646 set_current_oom_origin(); 1647 for (i = 0; i < nr_pages; i++) { 1648 struct page *page; 1649 1650 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1651 mflags, cpu_to_node(cpu_buffer->cpu)); 1652 if (!bpage) 1653 goto free_pages; 1654 1655 rb_check_bpage(cpu_buffer, bpage); 1656 1657 list_add(&bpage->list, pages); 1658 1659 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, 0); 1660 if (!page) 1661 goto free_pages; 1662 bpage->page = page_address(page); 1663 rb_init_page(bpage->page); 1664 1665 if (user_thread && fatal_signal_pending(current)) 1666 goto free_pages; 1667 } 1668 if (user_thread) 1669 clear_current_oom_origin(); 1670 1671 return 0; 1672 1673 free_pages: 1674 list_for_each_entry_safe(bpage, tmp, pages, list) { 1675 list_del_init(&bpage->list); 1676 free_buffer_page(bpage); 1677 } 1678 if (user_thread) 1679 clear_current_oom_origin(); 1680 1681 return -ENOMEM; 1682 } 1683 1684 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 1685 unsigned long nr_pages) 1686 { 1687 LIST_HEAD(pages); 1688 1689 WARN_ON(!nr_pages); 1690 1691 if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages)) 1692 return -ENOMEM; 1693 1694 /* 1695 * The ring buffer page list is a circular list that does not 1696 * start and end with a list head. All page list items point to 1697 * other pages. 1698 */ 1699 cpu_buffer->pages = pages.next; 1700 list_del(&pages); 1701 1702 cpu_buffer->nr_pages = nr_pages; 1703 1704 rb_check_pages(cpu_buffer); 1705 1706 return 0; 1707 } 1708 1709 static struct ring_buffer_per_cpu * 1710 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu) 1711 { 1712 struct ring_buffer_per_cpu *cpu_buffer; 1713 struct buffer_page *bpage; 1714 struct page *page; 1715 int ret; 1716 1717 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), 1718 GFP_KERNEL, cpu_to_node(cpu)); 1719 if (!cpu_buffer) 1720 return NULL; 1721 1722 cpu_buffer->cpu = cpu; 1723 cpu_buffer->buffer = buffer; 1724 raw_spin_lock_init(&cpu_buffer->reader_lock); 1725 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1726 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 1727 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); 1728 init_completion(&cpu_buffer->update_done); 1729 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); 1730 init_waitqueue_head(&cpu_buffer->irq_work.waiters); 1731 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); 1732 1733 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1734 GFP_KERNEL, cpu_to_node(cpu)); 1735 if (!bpage) 1736 goto fail_free_buffer; 1737 1738 rb_check_bpage(cpu_buffer, bpage); 1739 1740 cpu_buffer->reader_page = bpage; 1741 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); 1742 if (!page) 1743 goto fail_free_reader; 1744 bpage->page = page_address(page); 1745 rb_init_page(bpage->page); 1746 1747 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 1748 INIT_LIST_HEAD(&cpu_buffer->new_pages); 1749 1750 ret = rb_allocate_pages(cpu_buffer, nr_pages); 1751 if (ret < 0) 1752 goto fail_free_reader; 1753 1754 cpu_buffer->head_page 1755 = list_entry(cpu_buffer->pages, struct buffer_page, list); 1756 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; 1757 1758 rb_head_page_activate(cpu_buffer); 1759 1760 return cpu_buffer; 1761 1762 fail_free_reader: 1763 free_buffer_page(cpu_buffer->reader_page); 1764 1765 fail_free_buffer: 1766 kfree(cpu_buffer); 1767 return NULL; 1768 } 1769 1770 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 1771 { 1772 struct list_head *head = cpu_buffer->pages; 1773 struct buffer_page *bpage, *tmp; 1774 1775 irq_work_sync(&cpu_buffer->irq_work.work); 1776 1777 free_buffer_page(cpu_buffer->reader_page); 1778 1779 if (head) { 1780 rb_head_page_deactivate(cpu_buffer); 1781 1782 list_for_each_entry_safe(bpage, tmp, head, list) { 1783 list_del_init(&bpage->list); 1784 free_buffer_page(bpage); 1785 } 1786 bpage = list_entry(head, struct buffer_page, list); 1787 free_buffer_page(bpage); 1788 } 1789 1790 kfree(cpu_buffer); 1791 } 1792 1793 /** 1794 * __ring_buffer_alloc - allocate a new ring_buffer 1795 * @size: the size in bytes per cpu that is needed. 1796 * @flags: attributes to set for the ring buffer. 1797 * @key: ring buffer reader_lock_key. 1798 * 1799 * Currently the only flag that is available is the RB_FL_OVERWRITE 1800 * flag. This flag means that the buffer will overwrite old data 1801 * when the buffer wraps. If this flag is not set, the buffer will 1802 * drop data when the tail hits the head. 1803 */ 1804 struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, 1805 struct lock_class_key *key) 1806 { 1807 struct trace_buffer *buffer; 1808 long nr_pages; 1809 int bsize; 1810 int cpu; 1811 int ret; 1812 1813 /* keep it in its own cache line */ 1814 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), 1815 GFP_KERNEL); 1816 if (!buffer) 1817 return NULL; 1818 1819 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) 1820 goto fail_free_buffer; 1821 1822 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 1823 buffer->flags = flags; 1824 buffer->clock = trace_clock_local; 1825 buffer->reader_lock_key = key; 1826 1827 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); 1828 init_waitqueue_head(&buffer->irq_work.waiters); 1829 1830 /* need at least two pages */ 1831 if (nr_pages < 2) 1832 nr_pages = 2; 1833 1834 buffer->cpus = nr_cpu_ids; 1835 1836 bsize = sizeof(void *) * nr_cpu_ids; 1837 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), 1838 GFP_KERNEL); 1839 if (!buffer->buffers) 1840 goto fail_free_cpumask; 1841 1842 cpu = raw_smp_processor_id(); 1843 cpumask_set_cpu(cpu, buffer->cpumask); 1844 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 1845 if (!buffer->buffers[cpu]) 1846 goto fail_free_buffers; 1847 1848 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); 1849 if (ret < 0) 1850 goto fail_free_buffers; 1851 1852 mutex_init(&buffer->mutex); 1853 1854 return buffer; 1855 1856 fail_free_buffers: 1857 for_each_buffer_cpu(buffer, cpu) { 1858 if (buffer->buffers[cpu]) 1859 rb_free_cpu_buffer(buffer->buffers[cpu]); 1860 } 1861 kfree(buffer->buffers); 1862 1863 fail_free_cpumask: 1864 free_cpumask_var(buffer->cpumask); 1865 1866 fail_free_buffer: 1867 kfree(buffer); 1868 return NULL; 1869 } 1870 EXPORT_SYMBOL_GPL(__ring_buffer_alloc); 1871 1872 /** 1873 * ring_buffer_free - free a ring buffer. 1874 * @buffer: the buffer to free. 1875 */ 1876 void 1877 ring_buffer_free(struct trace_buffer *buffer) 1878 { 1879 int cpu; 1880 1881 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); 1882 1883 irq_work_sync(&buffer->irq_work.work); 1884 1885 for_each_buffer_cpu(buffer, cpu) 1886 rb_free_cpu_buffer(buffer->buffers[cpu]); 1887 1888 kfree(buffer->buffers); 1889 free_cpumask_var(buffer->cpumask); 1890 1891 kfree(buffer); 1892 } 1893 EXPORT_SYMBOL_GPL(ring_buffer_free); 1894 1895 void ring_buffer_set_clock(struct trace_buffer *buffer, 1896 u64 (*clock)(void)) 1897 { 1898 buffer->clock = clock; 1899 } 1900 1901 void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs) 1902 { 1903 buffer->time_stamp_abs = abs; 1904 } 1905 1906 bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer) 1907 { 1908 return buffer->time_stamp_abs; 1909 } 1910 1911 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); 1912 1913 static inline unsigned long rb_page_entries(struct buffer_page *bpage) 1914 { 1915 return local_read(&bpage->entries) & RB_WRITE_MASK; 1916 } 1917 1918 static inline unsigned long rb_page_write(struct buffer_page *bpage) 1919 { 1920 return local_read(&bpage->write) & RB_WRITE_MASK; 1921 } 1922 1923 static bool 1924 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) 1925 { 1926 struct list_head *tail_page, *to_remove, *next_page; 1927 struct buffer_page *to_remove_page, *tmp_iter_page; 1928 struct buffer_page *last_page, *first_page; 1929 unsigned long nr_removed; 1930 unsigned long head_bit; 1931 int page_entries; 1932 1933 head_bit = 0; 1934 1935 raw_spin_lock_irq(&cpu_buffer->reader_lock); 1936 atomic_inc(&cpu_buffer->record_disabled); 1937 /* 1938 * We don't race with the readers since we have acquired the reader 1939 * lock. We also don't race with writers after disabling recording. 1940 * This makes it easy to figure out the first and the last page to be 1941 * removed from the list. We unlink all the pages in between including 1942 * the first and last pages. This is done in a busy loop so that we 1943 * lose the least number of traces. 1944 * The pages are freed after we restart recording and unlock readers. 1945 */ 1946 tail_page = &cpu_buffer->tail_page->list; 1947 1948 /* 1949 * tail page might be on reader page, we remove the next page 1950 * from the ring buffer 1951 */ 1952 if (cpu_buffer->tail_page == cpu_buffer->reader_page) 1953 tail_page = rb_list_head(tail_page->next); 1954 to_remove = tail_page; 1955 1956 /* start of pages to remove */ 1957 first_page = list_entry(rb_list_head(to_remove->next), 1958 struct buffer_page, list); 1959 1960 for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) { 1961 to_remove = rb_list_head(to_remove)->next; 1962 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD; 1963 } 1964 /* Read iterators need to reset themselves when some pages removed */ 1965 cpu_buffer->pages_removed += nr_removed; 1966 1967 next_page = rb_list_head(to_remove)->next; 1968 1969 /* 1970 * Now we remove all pages between tail_page and next_page. 1971 * Make sure that we have head_bit value preserved for the 1972 * next page 1973 */ 1974 tail_page->next = (struct list_head *)((unsigned long)next_page | 1975 head_bit); 1976 next_page = rb_list_head(next_page); 1977 next_page->prev = tail_page; 1978 1979 /* make sure pages points to a valid page in the ring buffer */ 1980 cpu_buffer->pages = next_page; 1981 1982 /* update head page */ 1983 if (head_bit) 1984 cpu_buffer->head_page = list_entry(next_page, 1985 struct buffer_page, list); 1986 1987 /* pages are removed, resume tracing and then free the pages */ 1988 atomic_dec(&cpu_buffer->record_disabled); 1989 raw_spin_unlock_irq(&cpu_buffer->reader_lock); 1990 1991 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); 1992 1993 /* last buffer page to remove */ 1994 last_page = list_entry(rb_list_head(to_remove), struct buffer_page, 1995 list); 1996 tmp_iter_page = first_page; 1997 1998 do { 1999 cond_resched(); 2000 2001 to_remove_page = tmp_iter_page; 2002 rb_inc_page(&tmp_iter_page); 2003 2004 /* update the counters */ 2005 page_entries = rb_page_entries(to_remove_page); 2006 if (page_entries) { 2007 /* 2008 * If something was added to this page, it was full 2009 * since it is not the tail page. So we deduct the 2010 * bytes consumed in ring buffer from here. 2011 * Increment overrun to account for the lost events. 2012 */ 2013 local_add(page_entries, &cpu_buffer->overrun); 2014 local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes); 2015 local_inc(&cpu_buffer->pages_lost); 2016 } 2017 2018 /* 2019 * We have already removed references to this list item, just 2020 * free up the buffer_page and its page 2021 */ 2022 free_buffer_page(to_remove_page); 2023 nr_removed--; 2024 2025 } while (to_remove_page != last_page); 2026 2027 RB_WARN_ON(cpu_buffer, nr_removed); 2028 2029 return nr_removed == 0; 2030 } 2031 2032 static bool 2033 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) 2034 { 2035 struct list_head *pages = &cpu_buffer->new_pages; 2036 unsigned long flags; 2037 bool success; 2038 int retries; 2039 2040 /* Can be called at early boot up, where interrupts must not been enabled */ 2041 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2042 /* 2043 * We are holding the reader lock, so the reader page won't be swapped 2044 * in the ring buffer. Now we are racing with the writer trying to 2045 * move head page and the tail page. 2046 * We are going to adapt the reader page update process where: 2047 * 1. We first splice the start and end of list of new pages between 2048 * the head page and its previous page. 2049 * 2. We cmpxchg the prev_page->next to point from head page to the 2050 * start of new pages list. 2051 * 3. Finally, we update the head->prev to the end of new list. 2052 * 2053 * We will try this process 10 times, to make sure that we don't keep 2054 * spinning. 2055 */ 2056 retries = 10; 2057 success = false; 2058 while (retries--) { 2059 struct list_head *head_page, *prev_page, *r; 2060 struct list_head *last_page, *first_page; 2061 struct list_head *head_page_with_bit; 2062 struct buffer_page *hpage = rb_set_head_page(cpu_buffer); 2063 2064 if (!hpage) 2065 break; 2066 head_page = &hpage->list; 2067 prev_page = head_page->prev; 2068 2069 first_page = pages->next; 2070 last_page = pages->prev; 2071 2072 head_page_with_bit = (struct list_head *) 2073 ((unsigned long)head_page | RB_PAGE_HEAD); 2074 2075 last_page->next = head_page_with_bit; 2076 first_page->prev = prev_page; 2077 2078 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page); 2079 2080 if (r == head_page_with_bit) { 2081 /* 2082 * yay, we replaced the page pointer to our new list, 2083 * now, we just have to update to head page's prev 2084 * pointer to point to end of list 2085 */ 2086 head_page->prev = last_page; 2087 success = true; 2088 break; 2089 } 2090 } 2091 2092 if (success) 2093 INIT_LIST_HEAD(pages); 2094 /* 2095 * If we weren't successful in adding in new pages, warn and stop 2096 * tracing 2097 */ 2098 RB_WARN_ON(cpu_buffer, !success); 2099 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2100 2101 /* free pages if they weren't inserted */ 2102 if (!success) { 2103 struct buffer_page *bpage, *tmp; 2104 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 2105 list) { 2106 list_del_init(&bpage->list); 2107 free_buffer_page(bpage); 2108 } 2109 } 2110 return success; 2111 } 2112 2113 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) 2114 { 2115 bool success; 2116 2117 if (cpu_buffer->nr_pages_to_update > 0) 2118 success = rb_insert_pages(cpu_buffer); 2119 else 2120 success = rb_remove_pages(cpu_buffer, 2121 -cpu_buffer->nr_pages_to_update); 2122 2123 if (success) 2124 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; 2125 } 2126 2127 static void update_pages_handler(struct work_struct *work) 2128 { 2129 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, 2130 struct ring_buffer_per_cpu, update_pages_work); 2131 rb_update_pages(cpu_buffer); 2132 complete(&cpu_buffer->update_done); 2133 } 2134 2135 /** 2136 * ring_buffer_resize - resize the ring buffer 2137 * @buffer: the buffer to resize. 2138 * @size: the new size. 2139 * @cpu_id: the cpu buffer to resize 2140 * 2141 * Minimum size is 2 * BUF_PAGE_SIZE. 2142 * 2143 * Returns 0 on success and < 0 on failure. 2144 */ 2145 int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, 2146 int cpu_id) 2147 { 2148 struct ring_buffer_per_cpu *cpu_buffer; 2149 unsigned long nr_pages; 2150 int cpu, err; 2151 2152 /* 2153 * Always succeed at resizing a non-existent buffer: 2154 */ 2155 if (!buffer) 2156 return 0; 2157 2158 /* Make sure the requested buffer exists */ 2159 if (cpu_id != RING_BUFFER_ALL_CPUS && 2160 !cpumask_test_cpu(cpu_id, buffer->cpumask)) 2161 return 0; 2162 2163 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 2164 2165 /* we need a minimum of two pages */ 2166 if (nr_pages < 2) 2167 nr_pages = 2; 2168 2169 /* prevent another thread from changing buffer sizes */ 2170 mutex_lock(&buffer->mutex); 2171 atomic_inc(&buffer->resizing); 2172 2173 if (cpu_id == RING_BUFFER_ALL_CPUS) { 2174 /* 2175 * Don't succeed if resizing is disabled, as a reader might be 2176 * manipulating the ring buffer and is expecting a sane state while 2177 * this is true. 2178 */ 2179 for_each_buffer_cpu(buffer, cpu) { 2180 cpu_buffer = buffer->buffers[cpu]; 2181 if (atomic_read(&cpu_buffer->resize_disabled)) { 2182 err = -EBUSY; 2183 goto out_err_unlock; 2184 } 2185 } 2186 2187 /* calculate the pages to update */ 2188 for_each_buffer_cpu(buffer, cpu) { 2189 cpu_buffer = buffer->buffers[cpu]; 2190 2191 cpu_buffer->nr_pages_to_update = nr_pages - 2192 cpu_buffer->nr_pages; 2193 /* 2194 * nothing more to do for removing pages or no update 2195 */ 2196 if (cpu_buffer->nr_pages_to_update <= 0) 2197 continue; 2198 /* 2199 * to add pages, make sure all new pages can be 2200 * allocated without receiving ENOMEM 2201 */ 2202 INIT_LIST_HEAD(&cpu_buffer->new_pages); 2203 if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, 2204 &cpu_buffer->new_pages)) { 2205 /* not enough memory for new pages */ 2206 err = -ENOMEM; 2207 goto out_err; 2208 } 2209 2210 cond_resched(); 2211 } 2212 2213 cpus_read_lock(); 2214 /* 2215 * Fire off all the required work handlers 2216 * We can't schedule on offline CPUs, but it's not necessary 2217 * since we can change their buffer sizes without any race. 2218 */ 2219 for_each_buffer_cpu(buffer, cpu) { 2220 cpu_buffer = buffer->buffers[cpu]; 2221 if (!cpu_buffer->nr_pages_to_update) 2222 continue; 2223 2224 /* Can't run something on an offline CPU. */ 2225 if (!cpu_online(cpu)) { 2226 rb_update_pages(cpu_buffer); 2227 cpu_buffer->nr_pages_to_update = 0; 2228 } else { 2229 /* Run directly if possible. */ 2230 migrate_disable(); 2231 if (cpu != smp_processor_id()) { 2232 migrate_enable(); 2233 schedule_work_on(cpu, 2234 &cpu_buffer->update_pages_work); 2235 } else { 2236 update_pages_handler(&cpu_buffer->update_pages_work); 2237 migrate_enable(); 2238 } 2239 } 2240 } 2241 2242 /* wait for all the updates to complete */ 2243 for_each_buffer_cpu(buffer, cpu) { 2244 cpu_buffer = buffer->buffers[cpu]; 2245 if (!cpu_buffer->nr_pages_to_update) 2246 continue; 2247 2248 if (cpu_online(cpu)) 2249 wait_for_completion(&cpu_buffer->update_done); 2250 cpu_buffer->nr_pages_to_update = 0; 2251 } 2252 2253 cpus_read_unlock(); 2254 } else { 2255 cpu_buffer = buffer->buffers[cpu_id]; 2256 2257 if (nr_pages == cpu_buffer->nr_pages) 2258 goto out; 2259 2260 /* 2261 * Don't succeed if resizing is disabled, as a reader might be 2262 * manipulating the ring buffer and is expecting a sane state while 2263 * this is true. 2264 */ 2265 if (atomic_read(&cpu_buffer->resize_disabled)) { 2266 err = -EBUSY; 2267 goto out_err_unlock; 2268 } 2269 2270 cpu_buffer->nr_pages_to_update = nr_pages - 2271 cpu_buffer->nr_pages; 2272 2273 INIT_LIST_HEAD(&cpu_buffer->new_pages); 2274 if (cpu_buffer->nr_pages_to_update > 0 && 2275 __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, 2276 &cpu_buffer->new_pages)) { 2277 err = -ENOMEM; 2278 goto out_err; 2279 } 2280 2281 cpus_read_lock(); 2282 2283 /* Can't run something on an offline CPU. */ 2284 if (!cpu_online(cpu_id)) 2285 rb_update_pages(cpu_buffer); 2286 else { 2287 /* Run directly if possible. */ 2288 migrate_disable(); 2289 if (cpu_id == smp_processor_id()) { 2290 rb_update_pages(cpu_buffer); 2291 migrate_enable(); 2292 } else { 2293 migrate_enable(); 2294 schedule_work_on(cpu_id, 2295 &cpu_buffer->update_pages_work); 2296 wait_for_completion(&cpu_buffer->update_done); 2297 } 2298 } 2299 2300 cpu_buffer->nr_pages_to_update = 0; 2301 cpus_read_unlock(); 2302 } 2303 2304 out: 2305 /* 2306 * The ring buffer resize can happen with the ring buffer 2307 * enabled, so that the update disturbs the tracing as little 2308 * as possible. But if the buffer is disabled, we do not need 2309 * to worry about that, and we can take the time to verify 2310 * that the buffer is not corrupt. 2311 */ 2312 if (atomic_read(&buffer->record_disabled)) { 2313 atomic_inc(&buffer->record_disabled); 2314 /* 2315 * Even though the buffer was disabled, we must make sure 2316 * that it is truly disabled before calling rb_check_pages. 2317 * There could have been a race between checking 2318 * record_disable and incrementing it. 2319 */ 2320 synchronize_rcu(); 2321 for_each_buffer_cpu(buffer, cpu) { 2322 cpu_buffer = buffer->buffers[cpu]; 2323 rb_check_pages(cpu_buffer); 2324 } 2325 atomic_dec(&buffer->record_disabled); 2326 } 2327 2328 atomic_dec(&buffer->resizing); 2329 mutex_unlock(&buffer->mutex); 2330 return 0; 2331 2332 out_err: 2333 for_each_buffer_cpu(buffer, cpu) { 2334 struct buffer_page *bpage, *tmp; 2335 2336 cpu_buffer = buffer->buffers[cpu]; 2337 cpu_buffer->nr_pages_to_update = 0; 2338 2339 if (list_empty(&cpu_buffer->new_pages)) 2340 continue; 2341 2342 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 2343 list) { 2344 list_del_init(&bpage->list); 2345 free_buffer_page(bpage); 2346 } 2347 } 2348 out_err_unlock: 2349 atomic_dec(&buffer->resizing); 2350 mutex_unlock(&buffer->mutex); 2351 return err; 2352 } 2353 EXPORT_SYMBOL_GPL(ring_buffer_resize); 2354 2355 void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val) 2356 { 2357 mutex_lock(&buffer->mutex); 2358 if (val) 2359 buffer->flags |= RB_FL_OVERWRITE; 2360 else 2361 buffer->flags &= ~RB_FL_OVERWRITE; 2362 mutex_unlock(&buffer->mutex); 2363 } 2364 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite); 2365 2366 static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) 2367 { 2368 return bpage->page->data + index; 2369 } 2370 2371 static __always_inline struct ring_buffer_event * 2372 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) 2373 { 2374 return __rb_page_index(cpu_buffer->reader_page, 2375 cpu_buffer->reader_page->read); 2376 } 2377 2378 static struct ring_buffer_event * 2379 rb_iter_head_event(struct ring_buffer_iter *iter) 2380 { 2381 struct ring_buffer_event *event; 2382 struct buffer_page *iter_head_page = iter->head_page; 2383 unsigned long commit; 2384 unsigned length; 2385 2386 if (iter->head != iter->next_event) 2387 return iter->event; 2388 2389 /* 2390 * When the writer goes across pages, it issues a cmpxchg which 2391 * is a mb(), which will synchronize with the rmb here. 2392 * (see rb_tail_page_update() and __rb_reserve_next()) 2393 */ 2394 commit = rb_page_commit(iter_head_page); 2395 smp_rmb(); 2396 2397 /* An event needs to be at least 8 bytes in size */ 2398 if (iter->head > commit - 8) 2399 goto reset; 2400 2401 event = __rb_page_index(iter_head_page, iter->head); 2402 length = rb_event_length(event); 2403 2404 /* 2405 * READ_ONCE() doesn't work on functions and we don't want the 2406 * compiler doing any crazy optimizations with length. 2407 */ 2408 barrier(); 2409 2410 if ((iter->head + length) > commit || length > BUF_MAX_DATA_SIZE) 2411 /* Writer corrupted the read? */ 2412 goto reset; 2413 2414 memcpy(iter->event, event, length); 2415 /* 2416 * If the page stamp is still the same after this rmb() then the 2417 * event was safely copied without the writer entering the page. 2418 */ 2419 smp_rmb(); 2420 2421 /* Make sure the page didn't change since we read this */ 2422 if (iter->page_stamp != iter_head_page->page->time_stamp || 2423 commit > rb_page_commit(iter_head_page)) 2424 goto reset; 2425 2426 iter->next_event = iter->head + length; 2427 return iter->event; 2428 reset: 2429 /* Reset to the beginning */ 2430 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; 2431 iter->head = 0; 2432 iter->next_event = 0; 2433 iter->missed_events = 1; 2434 return NULL; 2435 } 2436 2437 /* Size is determined by what has been committed */ 2438 static __always_inline unsigned rb_page_size(struct buffer_page *bpage) 2439 { 2440 return rb_page_commit(bpage); 2441 } 2442 2443 static __always_inline unsigned 2444 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) 2445 { 2446 return rb_page_commit(cpu_buffer->commit_page); 2447 } 2448 2449 static __always_inline unsigned 2450 rb_event_index(struct ring_buffer_event *event) 2451 { 2452 unsigned long addr = (unsigned long)event; 2453 2454 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; 2455 } 2456 2457 static void rb_inc_iter(struct ring_buffer_iter *iter) 2458 { 2459 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 2460 2461 /* 2462 * The iterator could be on the reader page (it starts there). 2463 * But the head could have moved, since the reader was 2464 * found. Check for this case and assign the iterator 2465 * to the head page instead of next. 2466 */ 2467 if (iter->head_page == cpu_buffer->reader_page) 2468 iter->head_page = rb_set_head_page(cpu_buffer); 2469 else 2470 rb_inc_page(&iter->head_page); 2471 2472 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; 2473 iter->head = 0; 2474 iter->next_event = 0; 2475 } 2476 2477 /* 2478 * rb_handle_head_page - writer hit the head page 2479 * 2480 * Returns: +1 to retry page 2481 * 0 to continue 2482 * -1 on error 2483 */ 2484 static int 2485 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, 2486 struct buffer_page *tail_page, 2487 struct buffer_page *next_page) 2488 { 2489 struct buffer_page *new_head; 2490 int entries; 2491 int type; 2492 int ret; 2493 2494 entries = rb_page_entries(next_page); 2495 2496 /* 2497 * The hard part is here. We need to move the head 2498 * forward, and protect against both readers on 2499 * other CPUs and writers coming in via interrupts. 2500 */ 2501 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, 2502 RB_PAGE_HEAD); 2503 2504 /* 2505 * type can be one of four: 2506 * NORMAL - an interrupt already moved it for us 2507 * HEAD - we are the first to get here. 2508 * UPDATE - we are the interrupt interrupting 2509 * a current move. 2510 * MOVED - a reader on another CPU moved the next 2511 * pointer to its reader page. Give up 2512 * and try again. 2513 */ 2514 2515 switch (type) { 2516 case RB_PAGE_HEAD: 2517 /* 2518 * We changed the head to UPDATE, thus 2519 * it is our responsibility to update 2520 * the counters. 2521 */ 2522 local_add(entries, &cpu_buffer->overrun); 2523 local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes); 2524 local_inc(&cpu_buffer->pages_lost); 2525 2526 /* 2527 * The entries will be zeroed out when we move the 2528 * tail page. 2529 */ 2530 2531 /* still more to do */ 2532 break; 2533 2534 case RB_PAGE_UPDATE: 2535 /* 2536 * This is an interrupt that interrupt the 2537 * previous update. Still more to do. 2538 */ 2539 break; 2540 case RB_PAGE_NORMAL: 2541 /* 2542 * An interrupt came in before the update 2543 * and processed this for us. 2544 * Nothing left to do. 2545 */ 2546 return 1; 2547 case RB_PAGE_MOVED: 2548 /* 2549 * The reader is on another CPU and just did 2550 * a swap with our next_page. 2551 * Try again. 2552 */ 2553 return 1; 2554 default: 2555 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ 2556 return -1; 2557 } 2558 2559 /* 2560 * Now that we are here, the old head pointer is 2561 * set to UPDATE. This will keep the reader from 2562 * swapping the head page with the reader page. 2563 * The reader (on another CPU) will spin till 2564 * we are finished. 2565 * 2566 * We just need to protect against interrupts 2567 * doing the job. We will set the next pointer 2568 * to HEAD. After that, we set the old pointer 2569 * to NORMAL, but only if it was HEAD before. 2570 * otherwise we are an interrupt, and only 2571 * want the outer most commit to reset it. 2572 */ 2573 new_head = next_page; 2574 rb_inc_page(&new_head); 2575 2576 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, 2577 RB_PAGE_NORMAL); 2578 2579 /* 2580 * Valid returns are: 2581 * HEAD - an interrupt came in and already set it. 2582 * NORMAL - One of two things: 2583 * 1) We really set it. 2584 * 2) A bunch of interrupts came in and moved 2585 * the page forward again. 2586 */ 2587 switch (ret) { 2588 case RB_PAGE_HEAD: 2589 case RB_PAGE_NORMAL: 2590 /* OK */ 2591 break; 2592 default: 2593 RB_WARN_ON(cpu_buffer, 1); 2594 return -1; 2595 } 2596 2597 /* 2598 * It is possible that an interrupt came in, 2599 * set the head up, then more interrupts came in 2600 * and moved it again. When we get back here, 2601 * the page would have been set to NORMAL but we 2602 * just set it back to HEAD. 2603 * 2604 * How do you detect this? Well, if that happened 2605 * the tail page would have moved. 2606 */ 2607 if (ret == RB_PAGE_NORMAL) { 2608 struct buffer_page *buffer_tail_page; 2609 2610 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); 2611 /* 2612 * If the tail had moved passed next, then we need 2613 * to reset the pointer. 2614 */ 2615 if (buffer_tail_page != tail_page && 2616 buffer_tail_page != next_page) 2617 rb_head_page_set_normal(cpu_buffer, new_head, 2618 next_page, 2619 RB_PAGE_HEAD); 2620 } 2621 2622 /* 2623 * If this was the outer most commit (the one that 2624 * changed the original pointer from HEAD to UPDATE), 2625 * then it is up to us to reset it to NORMAL. 2626 */ 2627 if (type == RB_PAGE_HEAD) { 2628 ret = rb_head_page_set_normal(cpu_buffer, next_page, 2629 tail_page, 2630 RB_PAGE_UPDATE); 2631 if (RB_WARN_ON(cpu_buffer, 2632 ret != RB_PAGE_UPDATE)) 2633 return -1; 2634 } 2635 2636 return 0; 2637 } 2638 2639 static inline void 2640 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, 2641 unsigned long tail, struct rb_event_info *info) 2642 { 2643 struct buffer_page *tail_page = info->tail_page; 2644 struct ring_buffer_event *event; 2645 unsigned long length = info->length; 2646 2647 /* 2648 * Only the event that crossed the page boundary 2649 * must fill the old tail_page with padding. 2650 */ 2651 if (tail >= BUF_PAGE_SIZE) { 2652 /* 2653 * If the page was filled, then we still need 2654 * to update the real_end. Reset it to zero 2655 * and the reader will ignore it. 2656 */ 2657 if (tail == BUF_PAGE_SIZE) 2658 tail_page->real_end = 0; 2659 2660 local_sub(length, &tail_page->write); 2661 return; 2662 } 2663 2664 event = __rb_page_index(tail_page, tail); 2665 2666 /* 2667 * Save the original length to the meta data. 2668 * This will be used by the reader to add lost event 2669 * counter. 2670 */ 2671 tail_page->real_end = tail; 2672 2673 /* 2674 * If this event is bigger than the minimum size, then 2675 * we need to be careful that we don't subtract the 2676 * write counter enough to allow another writer to slip 2677 * in on this page. 2678 * We put in a discarded commit instead, to make sure 2679 * that this space is not used again, and this space will 2680 * not be accounted into 'entries_bytes'. 2681 * 2682 * If we are less than the minimum size, we don't need to 2683 * worry about it. 2684 */ 2685 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) { 2686 /* No room for any events */ 2687 2688 /* Mark the rest of the page with padding */ 2689 rb_event_set_padding(event); 2690 2691 /* Make sure the padding is visible before the write update */ 2692 smp_wmb(); 2693 2694 /* Set the write back to the previous setting */ 2695 local_sub(length, &tail_page->write); 2696 return; 2697 } 2698 2699 /* Put in a discarded event */ 2700 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; 2701 event->type_len = RINGBUF_TYPE_PADDING; 2702 /* time delta must be non zero */ 2703 event->time_delta = 1; 2704 2705 /* account for padding bytes */ 2706 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); 2707 2708 /* Make sure the padding is visible before the tail_page->write update */ 2709 smp_wmb(); 2710 2711 /* Set write to end of buffer */ 2712 length = (tail + length) - BUF_PAGE_SIZE; 2713 local_sub(length, &tail_page->write); 2714 } 2715 2716 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer); 2717 2718 /* 2719 * This is the slow path, force gcc not to inline it. 2720 */ 2721 static noinline struct ring_buffer_event * 2722 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, 2723 unsigned long tail, struct rb_event_info *info) 2724 { 2725 struct buffer_page *tail_page = info->tail_page; 2726 struct buffer_page *commit_page = cpu_buffer->commit_page; 2727 struct trace_buffer *buffer = cpu_buffer->buffer; 2728 struct buffer_page *next_page; 2729 int ret; 2730 2731 next_page = tail_page; 2732 2733 rb_inc_page(&next_page); 2734 2735 /* 2736 * If for some reason, we had an interrupt storm that made 2737 * it all the way around the buffer, bail, and warn 2738 * about it. 2739 */ 2740 if (unlikely(next_page == commit_page)) { 2741 local_inc(&cpu_buffer->commit_overrun); 2742 goto out_reset; 2743 } 2744 2745 /* 2746 * This is where the fun begins! 2747 * 2748 * We are fighting against races between a reader that 2749 * could be on another CPU trying to swap its reader 2750 * page with the buffer head. 2751 * 2752 * We are also fighting against interrupts coming in and 2753 * moving the head or tail on us as well. 2754 * 2755 * If the next page is the head page then we have filled 2756 * the buffer, unless the commit page is still on the 2757 * reader page. 2758 */ 2759 if (rb_is_head_page(next_page, &tail_page->list)) { 2760 2761 /* 2762 * If the commit is not on the reader page, then 2763 * move the header page. 2764 */ 2765 if (!rb_is_reader_page(cpu_buffer->commit_page)) { 2766 /* 2767 * If we are not in overwrite mode, 2768 * this is easy, just stop here. 2769 */ 2770 if (!(buffer->flags & RB_FL_OVERWRITE)) { 2771 local_inc(&cpu_buffer->dropped_events); 2772 goto out_reset; 2773 } 2774 2775 ret = rb_handle_head_page(cpu_buffer, 2776 tail_page, 2777 next_page); 2778 if (ret < 0) 2779 goto out_reset; 2780 if (ret) 2781 goto out_again; 2782 } else { 2783 /* 2784 * We need to be careful here too. The 2785 * commit page could still be on the reader 2786 * page. We could have a small buffer, and 2787 * have filled up the buffer with events 2788 * from interrupts and such, and wrapped. 2789 * 2790 * Note, if the tail page is also on the 2791 * reader_page, we let it move out. 2792 */ 2793 if (unlikely((cpu_buffer->commit_page != 2794 cpu_buffer->tail_page) && 2795 (cpu_buffer->commit_page == 2796 cpu_buffer->reader_page))) { 2797 local_inc(&cpu_buffer->commit_overrun); 2798 goto out_reset; 2799 } 2800 } 2801 } 2802 2803 rb_tail_page_update(cpu_buffer, tail_page, next_page); 2804 2805 out_again: 2806 2807 rb_reset_tail(cpu_buffer, tail, info); 2808 2809 /* Commit what we have for now. */ 2810 rb_end_commit(cpu_buffer); 2811 /* rb_end_commit() decs committing */ 2812 local_inc(&cpu_buffer->committing); 2813 2814 /* fail and let the caller try again */ 2815 return ERR_PTR(-EAGAIN); 2816 2817 out_reset: 2818 /* reset write */ 2819 rb_reset_tail(cpu_buffer, tail, info); 2820 2821 return NULL; 2822 } 2823 2824 /* Slow path */ 2825 static struct ring_buffer_event * 2826 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs) 2827 { 2828 if (abs) 2829 event->type_len = RINGBUF_TYPE_TIME_STAMP; 2830 else 2831 event->type_len = RINGBUF_TYPE_TIME_EXTEND; 2832 2833 /* Not the first event on the page, or not delta? */ 2834 if (abs || rb_event_index(event)) { 2835 event->time_delta = delta & TS_MASK; 2836 event->array[0] = delta >> TS_SHIFT; 2837 } else { 2838 /* nope, just zero it */ 2839 event->time_delta = 0; 2840 event->array[0] = 0; 2841 } 2842 2843 return skip_time_extend(event); 2844 } 2845 2846 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 2847 static inline bool sched_clock_stable(void) 2848 { 2849 return true; 2850 } 2851 #endif 2852 2853 static void 2854 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer, 2855 struct rb_event_info *info) 2856 { 2857 u64 write_stamp; 2858 2859 WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s", 2860 (unsigned long long)info->delta, 2861 (unsigned long long)info->ts, 2862 (unsigned long long)info->before, 2863 (unsigned long long)info->after, 2864 (unsigned long long)(rb_time_read(&cpu_buffer->write_stamp, &write_stamp) ? write_stamp : 0), 2865 sched_clock_stable() ? "" : 2866 "If you just came from a suspend/resume,\n" 2867 "please switch to the trace global clock:\n" 2868 " echo global > /sys/kernel/tracing/trace_clock\n" 2869 "or add trace_clock=global to the kernel command line\n"); 2870 } 2871 2872 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer, 2873 struct ring_buffer_event **event, 2874 struct rb_event_info *info, 2875 u64 *delta, 2876 unsigned int *length) 2877 { 2878 bool abs = info->add_timestamp & 2879 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE); 2880 2881 if (unlikely(info->delta > (1ULL << 59))) { 2882 /* 2883 * Some timers can use more than 59 bits, and when a timestamp 2884 * is added to the buffer, it will lose those bits. 2885 */ 2886 if (abs && (info->ts & TS_MSB)) { 2887 info->delta &= ABS_TS_MASK; 2888 2889 /* did the clock go backwards */ 2890 } else if (info->before == info->after && info->before > info->ts) { 2891 /* not interrupted */ 2892 static int once; 2893 2894 /* 2895 * This is possible with a recalibrating of the TSC. 2896 * Do not produce a call stack, but just report it. 2897 */ 2898 if (!once) { 2899 once++; 2900 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n", 2901 info->before, info->ts); 2902 } 2903 } else 2904 rb_check_timestamp(cpu_buffer, info); 2905 if (!abs) 2906 info->delta = 0; 2907 } 2908 *event = rb_add_time_stamp(*event, info->delta, abs); 2909 *length -= RB_LEN_TIME_EXTEND; 2910 *delta = 0; 2911 } 2912 2913 /** 2914 * rb_update_event - update event type and data 2915 * @cpu_buffer: The per cpu buffer of the @event 2916 * @event: the event to update 2917 * @info: The info to update the @event with (contains length and delta) 2918 * 2919 * Update the type and data fields of the @event. The length 2920 * is the actual size that is written to the ring buffer, 2921 * and with this, we can determine what to place into the 2922 * data field. 2923 */ 2924 static void 2925 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, 2926 struct ring_buffer_event *event, 2927 struct rb_event_info *info) 2928 { 2929 unsigned length = info->length; 2930 u64 delta = info->delta; 2931 unsigned int nest = local_read(&cpu_buffer->committing) - 1; 2932 2933 if (!WARN_ON_ONCE(nest >= MAX_NEST)) 2934 cpu_buffer->event_stamp[nest] = info->ts; 2935 2936 /* 2937 * If we need to add a timestamp, then we 2938 * add it to the start of the reserved space. 2939 */ 2940 if (unlikely(info->add_timestamp)) 2941 rb_add_timestamp(cpu_buffer, &event, info, &delta, &length); 2942 2943 event->time_delta = delta; 2944 length -= RB_EVNT_HDR_SIZE; 2945 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) { 2946 event->type_len = 0; 2947 event->array[0] = length; 2948 } else 2949 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); 2950 } 2951 2952 static unsigned rb_calculate_event_length(unsigned length) 2953 { 2954 struct ring_buffer_event event; /* Used only for sizeof array */ 2955 2956 /* zero length can cause confusions */ 2957 if (!length) 2958 length++; 2959 2960 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) 2961 length += sizeof(event.array[0]); 2962 2963 length += RB_EVNT_HDR_SIZE; 2964 length = ALIGN(length, RB_ARCH_ALIGNMENT); 2965 2966 /* 2967 * In case the time delta is larger than the 27 bits for it 2968 * in the header, we need to add a timestamp. If another 2969 * event comes in when trying to discard this one to increase 2970 * the length, then the timestamp will be added in the allocated 2971 * space of this event. If length is bigger than the size needed 2972 * for the TIME_EXTEND, then padding has to be used. The events 2973 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal 2974 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding. 2975 * As length is a multiple of 4, we only need to worry if it 2976 * is 12 (RB_LEN_TIME_EXTEND + 4). 2977 */ 2978 if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT) 2979 length += RB_ALIGNMENT; 2980 2981 return length; 2982 } 2983 2984 static u64 rb_time_delta(struct ring_buffer_event *event) 2985 { 2986 switch (event->type_len) { 2987 case RINGBUF_TYPE_PADDING: 2988 return 0; 2989 2990 case RINGBUF_TYPE_TIME_EXTEND: 2991 return rb_event_time_stamp(event); 2992 2993 case RINGBUF_TYPE_TIME_STAMP: 2994 return 0; 2995 2996 case RINGBUF_TYPE_DATA: 2997 return event->time_delta; 2998 default: 2999 return 0; 3000 } 3001 } 3002 3003 static inline bool 3004 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, 3005 struct ring_buffer_event *event) 3006 { 3007 unsigned long new_index, old_index; 3008 struct buffer_page *bpage; 3009 unsigned long addr; 3010 u64 write_stamp; 3011 u64 delta; 3012 3013 new_index = rb_event_index(event); 3014 old_index = new_index + rb_event_ts_length(event); 3015 addr = (unsigned long)event; 3016 addr &= PAGE_MASK; 3017 3018 bpage = READ_ONCE(cpu_buffer->tail_page); 3019 3020 delta = rb_time_delta(event); 3021 3022 if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp)) 3023 return false; 3024 3025 /* Make sure the write stamp is read before testing the location */ 3026 barrier(); 3027 3028 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { 3029 unsigned long write_mask = 3030 local_read(&bpage->write) & ~RB_WRITE_MASK; 3031 unsigned long event_length = rb_event_length(event); 3032 3033 /* Something came in, can't discard */ 3034 if (!rb_time_cmpxchg(&cpu_buffer->write_stamp, 3035 write_stamp, write_stamp - delta)) 3036 return false; 3037 3038 /* 3039 * It's possible that the event time delta is zero 3040 * (has the same time stamp as the previous event) 3041 * in which case write_stamp and before_stamp could 3042 * be the same. In such a case, force before_stamp 3043 * to be different than write_stamp. It doesn't 3044 * matter what it is, as long as its different. 3045 */ 3046 if (!delta) 3047 rb_time_set(&cpu_buffer->before_stamp, 0); 3048 3049 /* 3050 * If an event were to come in now, it would see that the 3051 * write_stamp and the before_stamp are different, and assume 3052 * that this event just added itself before updating 3053 * the write stamp. The interrupting event will fix the 3054 * write stamp for us, and use the before stamp as its delta. 3055 */ 3056 3057 /* 3058 * This is on the tail page. It is possible that 3059 * a write could come in and move the tail page 3060 * and write to the next page. That is fine 3061 * because we just shorten what is on this page. 3062 */ 3063 old_index += write_mask; 3064 new_index += write_mask; 3065 3066 /* caution: old_index gets updated on cmpxchg failure */ 3067 if (local_try_cmpxchg(&bpage->write, &old_index, new_index)) { 3068 /* update counters */ 3069 local_sub(event_length, &cpu_buffer->entries_bytes); 3070 return true; 3071 } 3072 } 3073 3074 /* could not discard */ 3075 return false; 3076 } 3077 3078 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) 3079 { 3080 local_inc(&cpu_buffer->committing); 3081 local_inc(&cpu_buffer->commits); 3082 } 3083 3084 static __always_inline void 3085 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) 3086 { 3087 unsigned long max_count; 3088 3089 /* 3090 * We only race with interrupts and NMIs on this CPU. 3091 * If we own the commit event, then we can commit 3092 * all others that interrupted us, since the interruptions 3093 * are in stack format (they finish before they come 3094 * back to us). This allows us to do a simple loop to 3095 * assign the commit to the tail. 3096 */ 3097 again: 3098 max_count = cpu_buffer->nr_pages * 100; 3099 3100 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { 3101 if (RB_WARN_ON(cpu_buffer, !(--max_count))) 3102 return; 3103 if (RB_WARN_ON(cpu_buffer, 3104 rb_is_reader_page(cpu_buffer->tail_page))) 3105 return; 3106 /* 3107 * No need for a memory barrier here, as the update 3108 * of the tail_page did it for this page. 3109 */ 3110 local_set(&cpu_buffer->commit_page->page->commit, 3111 rb_page_write(cpu_buffer->commit_page)); 3112 rb_inc_page(&cpu_buffer->commit_page); 3113 /* add barrier to keep gcc from optimizing too much */ 3114 barrier(); 3115 } 3116 while (rb_commit_index(cpu_buffer) != 3117 rb_page_write(cpu_buffer->commit_page)) { 3118 3119 /* Make sure the readers see the content of what is committed. */ 3120 smp_wmb(); 3121 local_set(&cpu_buffer->commit_page->page->commit, 3122 rb_page_write(cpu_buffer->commit_page)); 3123 RB_WARN_ON(cpu_buffer, 3124 local_read(&cpu_buffer->commit_page->page->commit) & 3125 ~RB_WRITE_MASK); 3126 barrier(); 3127 } 3128 3129 /* again, keep gcc from optimizing */ 3130 barrier(); 3131 3132 /* 3133 * If an interrupt came in just after the first while loop 3134 * and pushed the tail page forward, we will be left with 3135 * a dangling commit that will never go forward. 3136 */ 3137 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) 3138 goto again; 3139 } 3140 3141 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) 3142 { 3143 unsigned long commits; 3144 3145 if (RB_WARN_ON(cpu_buffer, 3146 !local_read(&cpu_buffer->committing))) 3147 return; 3148 3149 again: 3150 commits = local_read(&cpu_buffer->commits); 3151 /* synchronize with interrupts */ 3152 barrier(); 3153 if (local_read(&cpu_buffer->committing) == 1) 3154 rb_set_commit_to_write(cpu_buffer); 3155 3156 local_dec(&cpu_buffer->committing); 3157 3158 /* synchronize with interrupts */ 3159 barrier(); 3160 3161 /* 3162 * Need to account for interrupts coming in between the 3163 * updating of the commit page and the clearing of the 3164 * committing counter. 3165 */ 3166 if (unlikely(local_read(&cpu_buffer->commits) != commits) && 3167 !local_read(&cpu_buffer->committing)) { 3168 local_inc(&cpu_buffer->committing); 3169 goto again; 3170 } 3171 } 3172 3173 static inline void rb_event_discard(struct ring_buffer_event *event) 3174 { 3175 if (extended_time(event)) 3176 event = skip_time_extend(event); 3177 3178 /* array[0] holds the actual length for the discarded event */ 3179 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; 3180 event->type_len = RINGBUF_TYPE_PADDING; 3181 /* time delta must be non zero */ 3182 if (!event->time_delta) 3183 event->time_delta = 1; 3184 } 3185 3186 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer) 3187 { 3188 local_inc(&cpu_buffer->entries); 3189 rb_end_commit(cpu_buffer); 3190 } 3191 3192 static __always_inline void 3193 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) 3194 { 3195 if (buffer->irq_work.waiters_pending) { 3196 buffer->irq_work.waiters_pending = false; 3197 /* irq_work_queue() supplies it's own memory barriers */ 3198 irq_work_queue(&buffer->irq_work.work); 3199 } 3200 3201 if (cpu_buffer->irq_work.waiters_pending) { 3202 cpu_buffer->irq_work.waiters_pending = false; 3203 /* irq_work_queue() supplies it's own memory barriers */ 3204 irq_work_queue(&cpu_buffer->irq_work.work); 3205 } 3206 3207 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched)) 3208 return; 3209 3210 if (cpu_buffer->reader_page == cpu_buffer->commit_page) 3211 return; 3212 3213 if (!cpu_buffer->irq_work.full_waiters_pending) 3214 return; 3215 3216 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); 3217 3218 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) 3219 return; 3220 3221 cpu_buffer->irq_work.wakeup_full = true; 3222 cpu_buffer->irq_work.full_waiters_pending = false; 3223 /* irq_work_queue() supplies it's own memory barriers */ 3224 irq_work_queue(&cpu_buffer->irq_work.work); 3225 } 3226 3227 #ifdef CONFIG_RING_BUFFER_RECORD_RECURSION 3228 # define do_ring_buffer_record_recursion() \ 3229 do_ftrace_record_recursion(_THIS_IP_, _RET_IP_) 3230 #else 3231 # define do_ring_buffer_record_recursion() do { } while (0) 3232 #endif 3233 3234 /* 3235 * The lock and unlock are done within a preempt disable section. 3236 * The current_context per_cpu variable can only be modified 3237 * by the current task between lock and unlock. But it can 3238 * be modified more than once via an interrupt. To pass this 3239 * information from the lock to the unlock without having to 3240 * access the 'in_interrupt()' functions again (which do show 3241 * a bit of overhead in something as critical as function tracing, 3242 * we use a bitmask trick. 3243 * 3244 * bit 1 = NMI context 3245 * bit 2 = IRQ context 3246 * bit 3 = SoftIRQ context 3247 * bit 4 = normal context. 3248 * 3249 * This works because this is the order of contexts that can 3250 * preempt other contexts. A SoftIRQ never preempts an IRQ 3251 * context. 3252 * 3253 * When the context is determined, the corresponding bit is 3254 * checked and set (if it was set, then a recursion of that context 3255 * happened). 3256 * 3257 * On unlock, we need to clear this bit. To do so, just subtract 3258 * 1 from the current_context and AND it to itself. 3259 * 3260 * (binary) 3261 * 101 - 1 = 100 3262 * 101 & 100 = 100 (clearing bit zero) 3263 * 3264 * 1010 - 1 = 1001 3265 * 1010 & 1001 = 1000 (clearing bit 1) 3266 * 3267 * The least significant bit can be cleared this way, and it 3268 * just so happens that it is the same bit corresponding to 3269 * the current context. 3270 * 3271 * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit 3272 * is set when a recursion is detected at the current context, and if 3273 * the TRANSITION bit is already set, it will fail the recursion. 3274 * This is needed because there's a lag between the changing of 3275 * interrupt context and updating the preempt count. In this case, 3276 * a false positive will be found. To handle this, one extra recursion 3277 * is allowed, and this is done by the TRANSITION bit. If the TRANSITION 3278 * bit is already set, then it is considered a recursion and the function 3279 * ends. Otherwise, the TRANSITION bit is set, and that bit is returned. 3280 * 3281 * On the trace_recursive_unlock(), the TRANSITION bit will be the first 3282 * to be cleared. Even if it wasn't the context that set it. That is, 3283 * if an interrupt comes in while NORMAL bit is set and the ring buffer 3284 * is called before preempt_count() is updated, since the check will 3285 * be on the NORMAL bit, the TRANSITION bit will then be set. If an 3286 * NMI then comes in, it will set the NMI bit, but when the NMI code 3287 * does the trace_recursive_unlock() it will clear the TRANSITION bit 3288 * and leave the NMI bit set. But this is fine, because the interrupt 3289 * code that set the TRANSITION bit will then clear the NMI bit when it 3290 * calls trace_recursive_unlock(). If another NMI comes in, it will 3291 * set the TRANSITION bit and continue. 3292 * 3293 * Note: The TRANSITION bit only handles a single transition between context. 3294 */ 3295 3296 static __always_inline bool 3297 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) 3298 { 3299 unsigned int val = cpu_buffer->current_context; 3300 int bit = interrupt_context_level(); 3301 3302 bit = RB_CTX_NORMAL - bit; 3303 3304 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { 3305 /* 3306 * It is possible that this was called by transitioning 3307 * between interrupt context, and preempt_count() has not 3308 * been updated yet. In this case, use the TRANSITION bit. 3309 */ 3310 bit = RB_CTX_TRANSITION; 3311 if (val & (1 << (bit + cpu_buffer->nest))) { 3312 do_ring_buffer_record_recursion(); 3313 return true; 3314 } 3315 } 3316 3317 val |= (1 << (bit + cpu_buffer->nest)); 3318 cpu_buffer->current_context = val; 3319 3320 return false; 3321 } 3322 3323 static __always_inline void 3324 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) 3325 { 3326 cpu_buffer->current_context &= 3327 cpu_buffer->current_context - (1 << cpu_buffer->nest); 3328 } 3329 3330 /* The recursive locking above uses 5 bits */ 3331 #define NESTED_BITS 5 3332 3333 /** 3334 * ring_buffer_nest_start - Allow to trace while nested 3335 * @buffer: The ring buffer to modify 3336 * 3337 * The ring buffer has a safety mechanism to prevent recursion. 3338 * But there may be a case where a trace needs to be done while 3339 * tracing something else. In this case, calling this function 3340 * will allow this function to nest within a currently active 3341 * ring_buffer_lock_reserve(). 3342 * 3343 * Call this function before calling another ring_buffer_lock_reserve() and 3344 * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit(). 3345 */ 3346 void ring_buffer_nest_start(struct trace_buffer *buffer) 3347 { 3348 struct ring_buffer_per_cpu *cpu_buffer; 3349 int cpu; 3350 3351 /* Enabled by ring_buffer_nest_end() */ 3352 preempt_disable_notrace(); 3353 cpu = raw_smp_processor_id(); 3354 cpu_buffer = buffer->buffers[cpu]; 3355 /* This is the shift value for the above recursive locking */ 3356 cpu_buffer->nest += NESTED_BITS; 3357 } 3358 3359 /** 3360 * ring_buffer_nest_end - Allow to trace while nested 3361 * @buffer: The ring buffer to modify 3362 * 3363 * Must be called after ring_buffer_nest_start() and after the 3364 * ring_buffer_unlock_commit(). 3365 */ 3366 void ring_buffer_nest_end(struct trace_buffer *buffer) 3367 { 3368 struct ring_buffer_per_cpu *cpu_buffer; 3369 int cpu; 3370 3371 /* disabled by ring_buffer_nest_start() */ 3372 cpu = raw_smp_processor_id(); 3373 cpu_buffer = buffer->buffers[cpu]; 3374 /* This is the shift value for the above recursive locking */ 3375 cpu_buffer->nest -= NESTED_BITS; 3376 preempt_enable_notrace(); 3377 } 3378 3379 /** 3380 * ring_buffer_unlock_commit - commit a reserved 3381 * @buffer: The buffer to commit to 3382 * 3383 * This commits the data to the ring buffer, and releases any locks held. 3384 * 3385 * Must be paired with ring_buffer_lock_reserve. 3386 */ 3387 int ring_buffer_unlock_commit(struct trace_buffer *buffer) 3388 { 3389 struct ring_buffer_per_cpu *cpu_buffer; 3390 int cpu = raw_smp_processor_id(); 3391 3392 cpu_buffer = buffer->buffers[cpu]; 3393 3394 rb_commit(cpu_buffer); 3395 3396 rb_wakeups(buffer, cpu_buffer); 3397 3398 trace_recursive_unlock(cpu_buffer); 3399 3400 preempt_enable_notrace(); 3401 3402 return 0; 3403 } 3404 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); 3405 3406 /* Special value to validate all deltas on a page. */ 3407 #define CHECK_FULL_PAGE 1L 3408 3409 #ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS 3410 static void dump_buffer_page(struct buffer_data_page *bpage, 3411 struct rb_event_info *info, 3412 unsigned long tail) 3413 { 3414 struct ring_buffer_event *event; 3415 u64 ts, delta; 3416 int e; 3417 3418 ts = bpage->time_stamp; 3419 pr_warn(" [%lld] PAGE TIME STAMP\n", ts); 3420 3421 for (e = 0; e < tail; e += rb_event_length(event)) { 3422 3423 event = (struct ring_buffer_event *)(bpage->data + e); 3424 3425 switch (event->type_len) { 3426 3427 case RINGBUF_TYPE_TIME_EXTEND: 3428 delta = rb_event_time_stamp(event); 3429 ts += delta; 3430 pr_warn(" [%lld] delta:%lld TIME EXTEND\n", ts, delta); 3431 break; 3432 3433 case RINGBUF_TYPE_TIME_STAMP: 3434 delta = rb_event_time_stamp(event); 3435 ts = rb_fix_abs_ts(delta, ts); 3436 pr_warn(" [%lld] absolute:%lld TIME STAMP\n", ts, delta); 3437 break; 3438 3439 case RINGBUF_TYPE_PADDING: 3440 ts += event->time_delta; 3441 pr_warn(" [%lld] delta:%d PADDING\n", ts, event->time_delta); 3442 break; 3443 3444 case RINGBUF_TYPE_DATA: 3445 ts += event->time_delta; 3446 pr_warn(" [%lld] delta:%d\n", ts, event->time_delta); 3447 break; 3448 3449 default: 3450 break; 3451 } 3452 } 3453 } 3454 3455 static DEFINE_PER_CPU(atomic_t, checking); 3456 static atomic_t ts_dump; 3457 3458 /* 3459 * Check if the current event time stamp matches the deltas on 3460 * the buffer page. 3461 */ 3462 static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, 3463 struct rb_event_info *info, 3464 unsigned long tail) 3465 { 3466 struct ring_buffer_event *event; 3467 struct buffer_data_page *bpage; 3468 u64 ts, delta; 3469 bool full = false; 3470 int e; 3471 3472 bpage = info->tail_page->page; 3473 3474 if (tail == CHECK_FULL_PAGE) { 3475 full = true; 3476 tail = local_read(&bpage->commit); 3477 } else if (info->add_timestamp & 3478 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) { 3479 /* Ignore events with absolute time stamps */ 3480 return; 3481 } 3482 3483 /* 3484 * Do not check the first event (skip possible extends too). 3485 * Also do not check if previous events have not been committed. 3486 */ 3487 if (tail <= 8 || tail > local_read(&bpage->commit)) 3488 return; 3489 3490 /* 3491 * If this interrupted another event, 3492 */ 3493 if (atomic_inc_return(this_cpu_ptr(&checking)) != 1) 3494 goto out; 3495 3496 ts = bpage->time_stamp; 3497 3498 for (e = 0; e < tail; e += rb_event_length(event)) { 3499 3500 event = (struct ring_buffer_event *)(bpage->data + e); 3501 3502 switch (event->type_len) { 3503 3504 case RINGBUF_TYPE_TIME_EXTEND: 3505 delta = rb_event_time_stamp(event); 3506 ts += delta; 3507 break; 3508 3509 case RINGBUF_TYPE_TIME_STAMP: 3510 delta = rb_event_time_stamp(event); 3511 ts = rb_fix_abs_ts(delta, ts); 3512 break; 3513 3514 case RINGBUF_TYPE_PADDING: 3515 if (event->time_delta == 1) 3516 break; 3517 fallthrough; 3518 case RINGBUF_TYPE_DATA: 3519 ts += event->time_delta; 3520 break; 3521 3522 default: 3523 RB_WARN_ON(cpu_buffer, 1); 3524 } 3525 } 3526 if ((full && ts > info->ts) || 3527 (!full && ts + info->delta != info->ts)) { 3528 /* If another report is happening, ignore this one */ 3529 if (atomic_inc_return(&ts_dump) != 1) { 3530 atomic_dec(&ts_dump); 3531 goto out; 3532 } 3533 atomic_inc(&cpu_buffer->record_disabled); 3534 /* There's some cases in boot up that this can happen */ 3535 WARN_ON_ONCE(system_state != SYSTEM_BOOTING); 3536 pr_warn("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s\n", 3537 cpu_buffer->cpu, 3538 ts + info->delta, info->ts, info->delta, 3539 info->before, info->after, 3540 full ? " (full)" : ""); 3541 dump_buffer_page(bpage, info, tail); 3542 atomic_dec(&ts_dump); 3543 /* Do not re-enable checking */ 3544 return; 3545 } 3546 out: 3547 atomic_dec(this_cpu_ptr(&checking)); 3548 } 3549 #else 3550 static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, 3551 struct rb_event_info *info, 3552 unsigned long tail) 3553 { 3554 } 3555 #endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */ 3556 3557 static struct ring_buffer_event * 3558 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 3559 struct rb_event_info *info) 3560 { 3561 struct ring_buffer_event *event; 3562 struct buffer_page *tail_page; 3563 unsigned long tail, write, w; 3564 bool a_ok; 3565 bool b_ok; 3566 3567 /* Don't let the compiler play games with cpu_buffer->tail_page */ 3568 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); 3569 3570 /*A*/ w = local_read(&tail_page->write) & RB_WRITE_MASK; 3571 barrier(); 3572 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); 3573 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3574 barrier(); 3575 info->ts = rb_time_stamp(cpu_buffer->buffer); 3576 3577 if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) { 3578 info->delta = info->ts; 3579 } else { 3580 /* 3581 * If interrupting an event time update, we may need an 3582 * absolute timestamp. 3583 * Don't bother if this is the start of a new page (w == 0). 3584 */ 3585 if (unlikely(!a_ok || !b_ok || (info->before != info->after && w))) { 3586 info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND; 3587 info->length += RB_LEN_TIME_EXTEND; 3588 } else { 3589 info->delta = info->ts - info->after; 3590 if (unlikely(test_time_stamp(info->delta))) { 3591 info->add_timestamp |= RB_ADD_STAMP_EXTEND; 3592 info->length += RB_LEN_TIME_EXTEND; 3593 } 3594 } 3595 } 3596 3597 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts); 3598 3599 /*C*/ write = local_add_return(info->length, &tail_page->write); 3600 3601 /* set write to only the index of the write */ 3602 write &= RB_WRITE_MASK; 3603 3604 tail = write - info->length; 3605 3606 /* See if we shot pass the end of this buffer page */ 3607 if (unlikely(write > BUF_PAGE_SIZE)) { 3608 /* before and after may now different, fix it up*/ 3609 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); 3610 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3611 if (a_ok && b_ok && info->before != info->after) 3612 (void)rb_time_cmpxchg(&cpu_buffer->before_stamp, 3613 info->before, info->after); 3614 if (a_ok && b_ok) 3615 check_buffer(cpu_buffer, info, CHECK_FULL_PAGE); 3616 return rb_move_tail(cpu_buffer, tail, info); 3617 } 3618 3619 if (likely(tail == w)) { 3620 u64 save_before; 3621 bool s_ok; 3622 3623 /* Nothing interrupted us between A and C */ 3624 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts); 3625 barrier(); 3626 /*E*/ s_ok = rb_time_read(&cpu_buffer->before_stamp, &save_before); 3627 RB_WARN_ON(cpu_buffer, !s_ok); 3628 if (likely(!(info->add_timestamp & 3629 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)))) 3630 /* This did not interrupt any time update */ 3631 info->delta = info->ts - info->after; 3632 else 3633 /* Just use full timestamp for interrupting event */ 3634 info->delta = info->ts; 3635 barrier(); 3636 check_buffer(cpu_buffer, info, tail); 3637 if (unlikely(info->ts != save_before)) { 3638 /* SLOW PATH - Interrupted between C and E */ 3639 3640 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3641 RB_WARN_ON(cpu_buffer, !a_ok); 3642 3643 /* Write stamp must only go forward */ 3644 if (save_before > info->after) { 3645 /* 3646 * We do not care about the result, only that 3647 * it gets updated atomically. 3648 */ 3649 (void)rb_time_cmpxchg(&cpu_buffer->write_stamp, 3650 info->after, save_before); 3651 } 3652 } 3653 } else { 3654 u64 ts; 3655 /* SLOW PATH - Interrupted between A and C */ 3656 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3657 /* Was interrupted before here, write_stamp must be valid */ 3658 RB_WARN_ON(cpu_buffer, !a_ok); 3659 ts = rb_time_stamp(cpu_buffer->buffer); 3660 barrier(); 3661 /*E*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) && 3662 info->after < ts && 3663 rb_time_cmpxchg(&cpu_buffer->write_stamp, 3664 info->after, ts)) { 3665 /* Nothing came after this event between C and E */ 3666 info->delta = ts - info->after; 3667 } else { 3668 /* 3669 * Interrupted between C and E: 3670 * Lost the previous events time stamp. Just set the 3671 * delta to zero, and this will be the same time as 3672 * the event this event interrupted. And the events that 3673 * came after this will still be correct (as they would 3674 * have built their delta on the previous event. 3675 */ 3676 info->delta = 0; 3677 } 3678 info->ts = ts; 3679 info->add_timestamp &= ~RB_ADD_STAMP_FORCE; 3680 } 3681 3682 /* 3683 * If this is the first commit on the page, then it has the same 3684 * timestamp as the page itself. 3685 */ 3686 if (unlikely(!tail && !(info->add_timestamp & 3687 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)))) 3688 info->delta = 0; 3689 3690 /* We reserved something on the buffer */ 3691 3692 event = __rb_page_index(tail_page, tail); 3693 rb_update_event(cpu_buffer, event, info); 3694 3695 local_inc(&tail_page->entries); 3696 3697 /* 3698 * If this is the first commit on the page, then update 3699 * its timestamp. 3700 */ 3701 if (unlikely(!tail)) 3702 tail_page->page->time_stamp = info->ts; 3703 3704 /* account for these added bytes */ 3705 local_add(info->length, &cpu_buffer->entries_bytes); 3706 3707 return event; 3708 } 3709 3710 static __always_inline struct ring_buffer_event * 3711 rb_reserve_next_event(struct trace_buffer *buffer, 3712 struct ring_buffer_per_cpu *cpu_buffer, 3713 unsigned long length) 3714 { 3715 struct ring_buffer_event *event; 3716 struct rb_event_info info; 3717 int nr_loops = 0; 3718 int add_ts_default; 3719 3720 rb_start_commit(cpu_buffer); 3721 /* The commit page can not change after this */ 3722 3723 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 3724 /* 3725 * Due to the ability to swap a cpu buffer from a buffer 3726 * it is possible it was swapped before we committed. 3727 * (committing stops a swap). We check for it here and 3728 * if it happened, we have to fail the write. 3729 */ 3730 barrier(); 3731 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { 3732 local_dec(&cpu_buffer->committing); 3733 local_dec(&cpu_buffer->commits); 3734 return NULL; 3735 } 3736 #endif 3737 3738 info.length = rb_calculate_event_length(length); 3739 3740 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { 3741 add_ts_default = RB_ADD_STAMP_ABSOLUTE; 3742 info.length += RB_LEN_TIME_EXTEND; 3743 } else { 3744 add_ts_default = RB_ADD_STAMP_NONE; 3745 } 3746 3747 again: 3748 info.add_timestamp = add_ts_default; 3749 info.delta = 0; 3750 3751 /* 3752 * We allow for interrupts to reenter here and do a trace. 3753 * If one does, it will cause this original code to loop 3754 * back here. Even with heavy interrupts happening, this 3755 * should only happen a few times in a row. If this happens 3756 * 1000 times in a row, there must be either an interrupt 3757 * storm or we have something buggy. 3758 * Bail! 3759 */ 3760 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) 3761 goto out_fail; 3762 3763 event = __rb_reserve_next(cpu_buffer, &info); 3764 3765 if (unlikely(PTR_ERR(event) == -EAGAIN)) { 3766 if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND)) 3767 info.length -= RB_LEN_TIME_EXTEND; 3768 goto again; 3769 } 3770 3771 if (likely(event)) 3772 return event; 3773 out_fail: 3774 rb_end_commit(cpu_buffer); 3775 return NULL; 3776 } 3777 3778 /** 3779 * ring_buffer_lock_reserve - reserve a part of the buffer 3780 * @buffer: the ring buffer to reserve from 3781 * @length: the length of the data to reserve (excluding event header) 3782 * 3783 * Returns a reserved event on the ring buffer to copy directly to. 3784 * The user of this interface will need to get the body to write into 3785 * and can use the ring_buffer_event_data() interface. 3786 * 3787 * The length is the length of the data needed, not the event length 3788 * which also includes the event header. 3789 * 3790 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned. 3791 * If NULL is returned, then nothing has been allocated or locked. 3792 */ 3793 struct ring_buffer_event * 3794 ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length) 3795 { 3796 struct ring_buffer_per_cpu *cpu_buffer; 3797 struct ring_buffer_event *event; 3798 int cpu; 3799 3800 /* If we are tracing schedule, we don't want to recurse */ 3801 preempt_disable_notrace(); 3802 3803 if (unlikely(atomic_read(&buffer->record_disabled))) 3804 goto out; 3805 3806 cpu = raw_smp_processor_id(); 3807 3808 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) 3809 goto out; 3810 3811 cpu_buffer = buffer->buffers[cpu]; 3812 3813 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) 3814 goto out; 3815 3816 if (unlikely(length > BUF_MAX_DATA_SIZE)) 3817 goto out; 3818 3819 if (unlikely(trace_recursive_lock(cpu_buffer))) 3820 goto out; 3821 3822 event = rb_reserve_next_event(buffer, cpu_buffer, length); 3823 if (!event) 3824 goto out_unlock; 3825 3826 return event; 3827 3828 out_unlock: 3829 trace_recursive_unlock(cpu_buffer); 3830 out: 3831 preempt_enable_notrace(); 3832 return NULL; 3833 } 3834 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); 3835 3836 /* 3837 * Decrement the entries to the page that an event is on. 3838 * The event does not even need to exist, only the pointer 3839 * to the page it is on. This may only be called before the commit 3840 * takes place. 3841 */ 3842 static inline void 3843 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, 3844 struct ring_buffer_event *event) 3845 { 3846 unsigned long addr = (unsigned long)event; 3847 struct buffer_page *bpage = cpu_buffer->commit_page; 3848 struct buffer_page *start; 3849 3850 addr &= PAGE_MASK; 3851 3852 /* Do the likely case first */ 3853 if (likely(bpage->page == (void *)addr)) { 3854 local_dec(&bpage->entries); 3855 return; 3856 } 3857 3858 /* 3859 * Because the commit page may be on the reader page we 3860 * start with the next page and check the end loop there. 3861 */ 3862 rb_inc_page(&bpage); 3863 start = bpage; 3864 do { 3865 if (bpage->page == (void *)addr) { 3866 local_dec(&bpage->entries); 3867 return; 3868 } 3869 rb_inc_page(&bpage); 3870 } while (bpage != start); 3871 3872 /* commit not part of this buffer?? */ 3873 RB_WARN_ON(cpu_buffer, 1); 3874 } 3875 3876 /** 3877 * ring_buffer_discard_commit - discard an event that has not been committed 3878 * @buffer: the ring buffer 3879 * @event: non committed event to discard 3880 * 3881 * Sometimes an event that is in the ring buffer needs to be ignored. 3882 * This function lets the user discard an event in the ring buffer 3883 * and then that event will not be read later. 3884 * 3885 * This function only works if it is called before the item has been 3886 * committed. It will try to free the event from the ring buffer 3887 * if another event has not been added behind it. 3888 * 3889 * If another event has been added behind it, it will set the event 3890 * up as discarded, and perform the commit. 3891 * 3892 * If this function is called, do not call ring_buffer_unlock_commit on 3893 * the event. 3894 */ 3895 void ring_buffer_discard_commit(struct trace_buffer *buffer, 3896 struct ring_buffer_event *event) 3897 { 3898 struct ring_buffer_per_cpu *cpu_buffer; 3899 int cpu; 3900 3901 /* The event is discarded regardless */ 3902 rb_event_discard(event); 3903 3904 cpu = smp_processor_id(); 3905 cpu_buffer = buffer->buffers[cpu]; 3906 3907 /* 3908 * This must only be called if the event has not been 3909 * committed yet. Thus we can assume that preemption 3910 * is still disabled. 3911 */ 3912 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); 3913 3914 rb_decrement_entry(cpu_buffer, event); 3915 if (rb_try_to_discard(cpu_buffer, event)) 3916 goto out; 3917 3918 out: 3919 rb_end_commit(cpu_buffer); 3920 3921 trace_recursive_unlock(cpu_buffer); 3922 3923 preempt_enable_notrace(); 3924 3925 } 3926 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); 3927 3928 /** 3929 * ring_buffer_write - write data to the buffer without reserving 3930 * @buffer: The ring buffer to write to. 3931 * @length: The length of the data being written (excluding the event header) 3932 * @data: The data to write to the buffer. 3933 * 3934 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as 3935 * one function. If you already have the data to write to the buffer, it 3936 * may be easier to simply call this function. 3937 * 3938 * Note, like ring_buffer_lock_reserve, the length is the length of the data 3939 * and not the length of the event which would hold the header. 3940 */ 3941 int ring_buffer_write(struct trace_buffer *buffer, 3942 unsigned long length, 3943 void *data) 3944 { 3945 struct ring_buffer_per_cpu *cpu_buffer; 3946 struct ring_buffer_event *event; 3947 void *body; 3948 int ret = -EBUSY; 3949 int cpu; 3950 3951 preempt_disable_notrace(); 3952 3953 if (atomic_read(&buffer->record_disabled)) 3954 goto out; 3955 3956 cpu = raw_smp_processor_id(); 3957 3958 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3959 goto out; 3960 3961 cpu_buffer = buffer->buffers[cpu]; 3962 3963 if (atomic_read(&cpu_buffer->record_disabled)) 3964 goto out; 3965 3966 if (length > BUF_MAX_DATA_SIZE) 3967 goto out; 3968 3969 if (unlikely(trace_recursive_lock(cpu_buffer))) 3970 goto out; 3971 3972 event = rb_reserve_next_event(buffer, cpu_buffer, length); 3973 if (!event) 3974 goto out_unlock; 3975 3976 body = rb_event_data(event); 3977 3978 memcpy(body, data, length); 3979 3980 rb_commit(cpu_buffer); 3981 3982 rb_wakeups(buffer, cpu_buffer); 3983 3984 ret = 0; 3985 3986 out_unlock: 3987 trace_recursive_unlock(cpu_buffer); 3988 3989 out: 3990 preempt_enable_notrace(); 3991 3992 return ret; 3993 } 3994 EXPORT_SYMBOL_GPL(ring_buffer_write); 3995 3996 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 3997 { 3998 struct buffer_page *reader = cpu_buffer->reader_page; 3999 struct buffer_page *head = rb_set_head_page(cpu_buffer); 4000 struct buffer_page *commit = cpu_buffer->commit_page; 4001 4002 /* In case of error, head will be NULL */ 4003 if (unlikely(!head)) 4004 return true; 4005 4006 /* Reader should exhaust content in reader page */ 4007 if (reader->read != rb_page_commit(reader)) 4008 return false; 4009 4010 /* 4011 * If writers are committing on the reader page, knowing all 4012 * committed content has been read, the ring buffer is empty. 4013 */ 4014 if (commit == reader) 4015 return true; 4016 4017 /* 4018 * If writers are committing on a page other than reader page 4019 * and head page, there should always be content to read. 4020 */ 4021 if (commit != head) 4022 return false; 4023 4024 /* 4025 * Writers are committing on the head page, we just need 4026 * to care about there're committed data, and the reader will 4027 * swap reader page with head page when it is to read data. 4028 */ 4029 return rb_page_commit(commit) == 0; 4030 } 4031 4032 /** 4033 * ring_buffer_record_disable - stop all writes into the buffer 4034 * @buffer: The ring buffer to stop writes to. 4035 * 4036 * This prevents all writes to the buffer. Any attempt to write 4037 * to the buffer after this will fail and return NULL. 4038 * 4039 * The caller should call synchronize_rcu() after this. 4040 */ 4041 void ring_buffer_record_disable(struct trace_buffer *buffer) 4042 { 4043 atomic_inc(&buffer->record_disabled); 4044 } 4045 EXPORT_SYMBOL_GPL(ring_buffer_record_disable); 4046 4047 /** 4048 * ring_buffer_record_enable - enable writes to the buffer 4049 * @buffer: The ring buffer to enable writes 4050 * 4051 * Note, multiple disables will need the same number of enables 4052 * to truly enable the writing (much like preempt_disable). 4053 */ 4054 void ring_buffer_record_enable(struct trace_buffer *buffer) 4055 { 4056 atomic_dec(&buffer->record_disabled); 4057 } 4058 EXPORT_SYMBOL_GPL(ring_buffer_record_enable); 4059 4060 /** 4061 * ring_buffer_record_off - stop all writes into the buffer 4062 * @buffer: The ring buffer to stop writes to. 4063 * 4064 * This prevents all writes to the buffer. Any attempt to write 4065 * to the buffer after this will fail and return NULL. 4066 * 4067 * This is different than ring_buffer_record_disable() as 4068 * it works like an on/off switch, where as the disable() version 4069 * must be paired with a enable(). 4070 */ 4071 void ring_buffer_record_off(struct trace_buffer *buffer) 4072 { 4073 unsigned int rd; 4074 unsigned int new_rd; 4075 4076 rd = atomic_read(&buffer->record_disabled); 4077 do { 4078 new_rd = rd | RB_BUFFER_OFF; 4079 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd)); 4080 } 4081 EXPORT_SYMBOL_GPL(ring_buffer_record_off); 4082 4083 /** 4084 * ring_buffer_record_on - restart writes into the buffer 4085 * @buffer: The ring buffer to start writes to. 4086 * 4087 * This enables all writes to the buffer that was disabled by 4088 * ring_buffer_record_off(). 4089 * 4090 * This is different than ring_buffer_record_enable() as 4091 * it works like an on/off switch, where as the enable() version 4092 * must be paired with a disable(). 4093 */ 4094 void ring_buffer_record_on(struct trace_buffer *buffer) 4095 { 4096 unsigned int rd; 4097 unsigned int new_rd; 4098 4099 rd = atomic_read(&buffer->record_disabled); 4100 do { 4101 new_rd = rd & ~RB_BUFFER_OFF; 4102 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd)); 4103 } 4104 EXPORT_SYMBOL_GPL(ring_buffer_record_on); 4105 4106 /** 4107 * ring_buffer_record_is_on - return true if the ring buffer can write 4108 * @buffer: The ring buffer to see if write is enabled 4109 * 4110 * Returns true if the ring buffer is in a state that it accepts writes. 4111 */ 4112 bool ring_buffer_record_is_on(struct trace_buffer *buffer) 4113 { 4114 return !atomic_read(&buffer->record_disabled); 4115 } 4116 4117 /** 4118 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable 4119 * @buffer: The ring buffer to see if write is set enabled 4120 * 4121 * Returns true if the ring buffer is set writable by ring_buffer_record_on(). 4122 * Note that this does NOT mean it is in a writable state. 4123 * 4124 * It may return true when the ring buffer has been disabled by 4125 * ring_buffer_record_disable(), as that is a temporary disabling of 4126 * the ring buffer. 4127 */ 4128 bool ring_buffer_record_is_set_on(struct trace_buffer *buffer) 4129 { 4130 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); 4131 } 4132 4133 /** 4134 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 4135 * @buffer: The ring buffer to stop writes to. 4136 * @cpu: The CPU buffer to stop 4137 * 4138 * This prevents all writes to the buffer. Any attempt to write 4139 * to the buffer after this will fail and return NULL. 4140 * 4141 * The caller should call synchronize_rcu() after this. 4142 */ 4143 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu) 4144 { 4145 struct ring_buffer_per_cpu *cpu_buffer; 4146 4147 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4148 return; 4149 4150 cpu_buffer = buffer->buffers[cpu]; 4151 atomic_inc(&cpu_buffer->record_disabled); 4152 } 4153 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); 4154 4155 /** 4156 * ring_buffer_record_enable_cpu - enable writes to the buffer 4157 * @buffer: The ring buffer to enable writes 4158 * @cpu: The CPU to enable. 4159 * 4160 * Note, multiple disables will need the same number of enables 4161 * to truly enable the writing (much like preempt_disable). 4162 */ 4163 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu) 4164 { 4165 struct ring_buffer_per_cpu *cpu_buffer; 4166 4167 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4168 return; 4169 4170 cpu_buffer = buffer->buffers[cpu]; 4171 atomic_dec(&cpu_buffer->record_disabled); 4172 } 4173 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); 4174 4175 /* 4176 * The total entries in the ring buffer is the running counter 4177 * of entries entered into the ring buffer, minus the sum of 4178 * the entries read from the ring buffer and the number of 4179 * entries that were overwritten. 4180 */ 4181 static inline unsigned long 4182 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) 4183 { 4184 return local_read(&cpu_buffer->entries) - 4185 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); 4186 } 4187 4188 /** 4189 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer 4190 * @buffer: The ring buffer 4191 * @cpu: The per CPU buffer to read from. 4192 */ 4193 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu) 4194 { 4195 unsigned long flags; 4196 struct ring_buffer_per_cpu *cpu_buffer; 4197 struct buffer_page *bpage; 4198 u64 ret = 0; 4199 4200 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4201 return 0; 4202 4203 cpu_buffer = buffer->buffers[cpu]; 4204 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4205 /* 4206 * if the tail is on reader_page, oldest time stamp is on the reader 4207 * page 4208 */ 4209 if (cpu_buffer->tail_page == cpu_buffer->reader_page) 4210 bpage = cpu_buffer->reader_page; 4211 else 4212 bpage = rb_set_head_page(cpu_buffer); 4213 if (bpage) 4214 ret = bpage->page->time_stamp; 4215 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4216 4217 return ret; 4218 } 4219 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts); 4220 4221 /** 4222 * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer 4223 * @buffer: The ring buffer 4224 * @cpu: The per CPU buffer to read from. 4225 */ 4226 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu) 4227 { 4228 struct ring_buffer_per_cpu *cpu_buffer; 4229 unsigned long ret; 4230 4231 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4232 return 0; 4233 4234 cpu_buffer = buffer->buffers[cpu]; 4235 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; 4236 4237 return ret; 4238 } 4239 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu); 4240 4241 /** 4242 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer 4243 * @buffer: The ring buffer 4244 * @cpu: The per CPU buffer to get the entries from. 4245 */ 4246 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu) 4247 { 4248 struct ring_buffer_per_cpu *cpu_buffer; 4249 4250 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4251 return 0; 4252 4253 cpu_buffer = buffer->buffers[cpu]; 4254 4255 return rb_num_of_entries(cpu_buffer); 4256 } 4257 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); 4258 4259 /** 4260 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring 4261 * buffer wrapping around (only if RB_FL_OVERWRITE is on). 4262 * @buffer: The ring buffer 4263 * @cpu: The per CPU buffer to get the number of overruns from 4264 */ 4265 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu) 4266 { 4267 struct ring_buffer_per_cpu *cpu_buffer; 4268 unsigned long ret; 4269 4270 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4271 return 0; 4272 4273 cpu_buffer = buffer->buffers[cpu]; 4274 ret = local_read(&cpu_buffer->overrun); 4275 4276 return ret; 4277 } 4278 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); 4279 4280 /** 4281 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by 4282 * commits failing due to the buffer wrapping around while there are uncommitted 4283 * events, such as during an interrupt storm. 4284 * @buffer: The ring buffer 4285 * @cpu: The per CPU buffer to get the number of overruns from 4286 */ 4287 unsigned long 4288 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu) 4289 { 4290 struct ring_buffer_per_cpu *cpu_buffer; 4291 unsigned long ret; 4292 4293 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4294 return 0; 4295 4296 cpu_buffer = buffer->buffers[cpu]; 4297 ret = local_read(&cpu_buffer->commit_overrun); 4298 4299 return ret; 4300 } 4301 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); 4302 4303 /** 4304 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by 4305 * the ring buffer filling up (only if RB_FL_OVERWRITE is off). 4306 * @buffer: The ring buffer 4307 * @cpu: The per CPU buffer to get the number of overruns from 4308 */ 4309 unsigned long 4310 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu) 4311 { 4312 struct ring_buffer_per_cpu *cpu_buffer; 4313 unsigned long ret; 4314 4315 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4316 return 0; 4317 4318 cpu_buffer = buffer->buffers[cpu]; 4319 ret = local_read(&cpu_buffer->dropped_events); 4320 4321 return ret; 4322 } 4323 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu); 4324 4325 /** 4326 * ring_buffer_read_events_cpu - get the number of events successfully read 4327 * @buffer: The ring buffer 4328 * @cpu: The per CPU buffer to get the number of events read 4329 */ 4330 unsigned long 4331 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu) 4332 { 4333 struct ring_buffer_per_cpu *cpu_buffer; 4334 4335 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4336 return 0; 4337 4338 cpu_buffer = buffer->buffers[cpu]; 4339 return cpu_buffer->read; 4340 } 4341 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu); 4342 4343 /** 4344 * ring_buffer_entries - get the number of entries in a buffer 4345 * @buffer: The ring buffer 4346 * 4347 * Returns the total number of entries in the ring buffer 4348 * (all CPU entries) 4349 */ 4350 unsigned long ring_buffer_entries(struct trace_buffer *buffer) 4351 { 4352 struct ring_buffer_per_cpu *cpu_buffer; 4353 unsigned long entries = 0; 4354 int cpu; 4355 4356 /* if you care about this being correct, lock the buffer */ 4357 for_each_buffer_cpu(buffer, cpu) { 4358 cpu_buffer = buffer->buffers[cpu]; 4359 entries += rb_num_of_entries(cpu_buffer); 4360 } 4361 4362 return entries; 4363 } 4364 EXPORT_SYMBOL_GPL(ring_buffer_entries); 4365 4366 /** 4367 * ring_buffer_overruns - get the number of overruns in buffer 4368 * @buffer: The ring buffer 4369 * 4370 * Returns the total number of overruns in the ring buffer 4371 * (all CPU entries) 4372 */ 4373 unsigned long ring_buffer_overruns(struct trace_buffer *buffer) 4374 { 4375 struct ring_buffer_per_cpu *cpu_buffer; 4376 unsigned long overruns = 0; 4377 int cpu; 4378 4379 /* if you care about this being correct, lock the buffer */ 4380 for_each_buffer_cpu(buffer, cpu) { 4381 cpu_buffer = buffer->buffers[cpu]; 4382 overruns += local_read(&cpu_buffer->overrun); 4383 } 4384 4385 return overruns; 4386 } 4387 EXPORT_SYMBOL_GPL(ring_buffer_overruns); 4388 4389 static void rb_iter_reset(struct ring_buffer_iter *iter) 4390 { 4391 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 4392 4393 /* Iterator usage is expected to have record disabled */ 4394 iter->head_page = cpu_buffer->reader_page; 4395 iter->head = cpu_buffer->reader_page->read; 4396 iter->next_event = iter->head; 4397 4398 iter->cache_reader_page = iter->head_page; 4399 iter->cache_read = cpu_buffer->read; 4400 iter->cache_pages_removed = cpu_buffer->pages_removed; 4401 4402 if (iter->head) { 4403 iter->read_stamp = cpu_buffer->read_stamp; 4404 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp; 4405 } else { 4406 iter->read_stamp = iter->head_page->page->time_stamp; 4407 iter->page_stamp = iter->read_stamp; 4408 } 4409 } 4410 4411 /** 4412 * ring_buffer_iter_reset - reset an iterator 4413 * @iter: The iterator to reset 4414 * 4415 * Resets the iterator, so that it will start from the beginning 4416 * again. 4417 */ 4418 void ring_buffer_iter_reset(struct ring_buffer_iter *iter) 4419 { 4420 struct ring_buffer_per_cpu *cpu_buffer; 4421 unsigned long flags; 4422 4423 if (!iter) 4424 return; 4425 4426 cpu_buffer = iter->cpu_buffer; 4427 4428 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4429 rb_iter_reset(iter); 4430 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4431 } 4432 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); 4433 4434 /** 4435 * ring_buffer_iter_empty - check if an iterator has no more to read 4436 * @iter: The iterator to check 4437 */ 4438 int ring_buffer_iter_empty(struct ring_buffer_iter *iter) 4439 { 4440 struct ring_buffer_per_cpu *cpu_buffer; 4441 struct buffer_page *reader; 4442 struct buffer_page *head_page; 4443 struct buffer_page *commit_page; 4444 struct buffer_page *curr_commit_page; 4445 unsigned commit; 4446 u64 curr_commit_ts; 4447 u64 commit_ts; 4448 4449 cpu_buffer = iter->cpu_buffer; 4450 reader = cpu_buffer->reader_page; 4451 head_page = cpu_buffer->head_page; 4452 commit_page = cpu_buffer->commit_page; 4453 commit_ts = commit_page->page->time_stamp; 4454 4455 /* 4456 * When the writer goes across pages, it issues a cmpxchg which 4457 * is a mb(), which will synchronize with the rmb here. 4458 * (see rb_tail_page_update()) 4459 */ 4460 smp_rmb(); 4461 commit = rb_page_commit(commit_page); 4462 /* We want to make sure that the commit page doesn't change */ 4463 smp_rmb(); 4464 4465 /* Make sure commit page didn't change */ 4466 curr_commit_page = READ_ONCE(cpu_buffer->commit_page); 4467 curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp); 4468 4469 /* If the commit page changed, then there's more data */ 4470 if (curr_commit_page != commit_page || 4471 curr_commit_ts != commit_ts) 4472 return 0; 4473 4474 /* Still racy, as it may return a false positive, but that's OK */ 4475 return ((iter->head_page == commit_page && iter->head >= commit) || 4476 (iter->head_page == reader && commit_page == head_page && 4477 head_page->read == commit && 4478 iter->head == rb_page_commit(cpu_buffer->reader_page))); 4479 } 4480 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); 4481 4482 static void 4483 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, 4484 struct ring_buffer_event *event) 4485 { 4486 u64 delta; 4487 4488 switch (event->type_len) { 4489 case RINGBUF_TYPE_PADDING: 4490 return; 4491 4492 case RINGBUF_TYPE_TIME_EXTEND: 4493 delta = rb_event_time_stamp(event); 4494 cpu_buffer->read_stamp += delta; 4495 return; 4496 4497 case RINGBUF_TYPE_TIME_STAMP: 4498 delta = rb_event_time_stamp(event); 4499 delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp); 4500 cpu_buffer->read_stamp = delta; 4501 return; 4502 4503 case RINGBUF_TYPE_DATA: 4504 cpu_buffer->read_stamp += event->time_delta; 4505 return; 4506 4507 default: 4508 RB_WARN_ON(cpu_buffer, 1); 4509 } 4510 } 4511 4512 static void 4513 rb_update_iter_read_stamp(struct ring_buffer_iter *iter, 4514 struct ring_buffer_event *event) 4515 { 4516 u64 delta; 4517 4518 switch (event->type_len) { 4519 case RINGBUF_TYPE_PADDING: 4520 return; 4521 4522 case RINGBUF_TYPE_TIME_EXTEND: 4523 delta = rb_event_time_stamp(event); 4524 iter->read_stamp += delta; 4525 return; 4526 4527 case RINGBUF_TYPE_TIME_STAMP: 4528 delta = rb_event_time_stamp(event); 4529 delta = rb_fix_abs_ts(delta, iter->read_stamp); 4530 iter->read_stamp = delta; 4531 return; 4532 4533 case RINGBUF_TYPE_DATA: 4534 iter->read_stamp += event->time_delta; 4535 return; 4536 4537 default: 4538 RB_WARN_ON(iter->cpu_buffer, 1); 4539 } 4540 } 4541 4542 static struct buffer_page * 4543 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 4544 { 4545 struct buffer_page *reader = NULL; 4546 unsigned long overwrite; 4547 unsigned long flags; 4548 int nr_loops = 0; 4549 bool ret; 4550 4551 local_irq_save(flags); 4552 arch_spin_lock(&cpu_buffer->lock); 4553 4554 again: 4555 /* 4556 * This should normally only loop twice. But because the 4557 * start of the reader inserts an empty page, it causes 4558 * a case where we will loop three times. There should be no 4559 * reason to loop four times (that I know of). 4560 */ 4561 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { 4562 reader = NULL; 4563 goto out; 4564 } 4565 4566 reader = cpu_buffer->reader_page; 4567 4568 /* If there's more to read, return this page */ 4569 if (cpu_buffer->reader_page->read < rb_page_size(reader)) 4570 goto out; 4571 4572 /* Never should we have an index greater than the size */ 4573 if (RB_WARN_ON(cpu_buffer, 4574 cpu_buffer->reader_page->read > rb_page_size(reader))) 4575 goto out; 4576 4577 /* check if we caught up to the tail */ 4578 reader = NULL; 4579 if (cpu_buffer->commit_page == cpu_buffer->reader_page) 4580 goto out; 4581 4582 /* Don't bother swapping if the ring buffer is empty */ 4583 if (rb_num_of_entries(cpu_buffer) == 0) 4584 goto out; 4585 4586 /* 4587 * Reset the reader page to size zero. 4588 */ 4589 local_set(&cpu_buffer->reader_page->write, 0); 4590 local_set(&cpu_buffer->reader_page->entries, 0); 4591 local_set(&cpu_buffer->reader_page->page->commit, 0); 4592 cpu_buffer->reader_page->real_end = 0; 4593 4594 spin: 4595 /* 4596 * Splice the empty reader page into the list around the head. 4597 */ 4598 reader = rb_set_head_page(cpu_buffer); 4599 if (!reader) 4600 goto out; 4601 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); 4602 cpu_buffer->reader_page->list.prev = reader->list.prev; 4603 4604 /* 4605 * cpu_buffer->pages just needs to point to the buffer, it 4606 * has no specific buffer page to point to. Lets move it out 4607 * of our way so we don't accidentally swap it. 4608 */ 4609 cpu_buffer->pages = reader->list.prev; 4610 4611 /* The reader page will be pointing to the new head */ 4612 rb_set_list_to_head(&cpu_buffer->reader_page->list); 4613 4614 /* 4615 * We want to make sure we read the overruns after we set up our 4616 * pointers to the next object. The writer side does a 4617 * cmpxchg to cross pages which acts as the mb on the writer 4618 * side. Note, the reader will constantly fail the swap 4619 * while the writer is updating the pointers, so this 4620 * guarantees that the overwrite recorded here is the one we 4621 * want to compare with the last_overrun. 4622 */ 4623 smp_mb(); 4624 overwrite = local_read(&(cpu_buffer->overrun)); 4625 4626 /* 4627 * Here's the tricky part. 4628 * 4629 * We need to move the pointer past the header page. 4630 * But we can only do that if a writer is not currently 4631 * moving it. The page before the header page has the 4632 * flag bit '1' set if it is pointing to the page we want. 4633 * but if the writer is in the process of moving it 4634 * than it will be '2' or already moved '0'. 4635 */ 4636 4637 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); 4638 4639 /* 4640 * If we did not convert it, then we must try again. 4641 */ 4642 if (!ret) 4643 goto spin; 4644 4645 /* 4646 * Yay! We succeeded in replacing the page. 4647 * 4648 * Now make the new head point back to the reader page. 4649 */ 4650 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; 4651 rb_inc_page(&cpu_buffer->head_page); 4652 4653 local_inc(&cpu_buffer->pages_read); 4654 4655 /* Finally update the reader page to the new head */ 4656 cpu_buffer->reader_page = reader; 4657 cpu_buffer->reader_page->read = 0; 4658 4659 if (overwrite != cpu_buffer->last_overrun) { 4660 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; 4661 cpu_buffer->last_overrun = overwrite; 4662 } 4663 4664 goto again; 4665 4666 out: 4667 /* Update the read_stamp on the first event */ 4668 if (reader && reader->read == 0) 4669 cpu_buffer->read_stamp = reader->page->time_stamp; 4670 4671 arch_spin_unlock(&cpu_buffer->lock); 4672 local_irq_restore(flags); 4673 4674 /* 4675 * The writer has preempt disable, wait for it. But not forever 4676 * Although, 1 second is pretty much "forever" 4677 */ 4678 #define USECS_WAIT 1000000 4679 for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) { 4680 /* If the write is past the end of page, a writer is still updating it */ 4681 if (likely(!reader || rb_page_write(reader) <= BUF_PAGE_SIZE)) 4682 break; 4683 4684 udelay(1); 4685 4686 /* Get the latest version of the reader write value */ 4687 smp_rmb(); 4688 } 4689 4690 /* The writer is not moving forward? Something is wrong */ 4691 if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT)) 4692 reader = NULL; 4693 4694 /* 4695 * Make sure we see any padding after the write update 4696 * (see rb_reset_tail()). 4697 * 4698 * In addition, a writer may be writing on the reader page 4699 * if the page has not been fully filled, so the read barrier 4700 * is also needed to make sure we see the content of what is 4701 * committed by the writer (see rb_set_commit_to_write()). 4702 */ 4703 smp_rmb(); 4704 4705 4706 return reader; 4707 } 4708 4709 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) 4710 { 4711 struct ring_buffer_event *event; 4712 struct buffer_page *reader; 4713 unsigned length; 4714 4715 reader = rb_get_reader_page(cpu_buffer); 4716 4717 /* This function should not be called when buffer is empty */ 4718 if (RB_WARN_ON(cpu_buffer, !reader)) 4719 return; 4720 4721 event = rb_reader_event(cpu_buffer); 4722 4723 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 4724 cpu_buffer->read++; 4725 4726 rb_update_read_stamp(cpu_buffer, event); 4727 4728 length = rb_event_length(event); 4729 cpu_buffer->reader_page->read += length; 4730 cpu_buffer->read_bytes += length; 4731 } 4732 4733 static void rb_advance_iter(struct ring_buffer_iter *iter) 4734 { 4735 struct ring_buffer_per_cpu *cpu_buffer; 4736 4737 cpu_buffer = iter->cpu_buffer; 4738 4739 /* If head == next_event then we need to jump to the next event */ 4740 if (iter->head == iter->next_event) { 4741 /* If the event gets overwritten again, there's nothing to do */ 4742 if (rb_iter_head_event(iter) == NULL) 4743 return; 4744 } 4745 4746 iter->head = iter->next_event; 4747 4748 /* 4749 * Check if we are at the end of the buffer. 4750 */ 4751 if (iter->next_event >= rb_page_size(iter->head_page)) { 4752 /* discarded commits can make the page empty */ 4753 if (iter->head_page == cpu_buffer->commit_page) 4754 return; 4755 rb_inc_iter(iter); 4756 return; 4757 } 4758 4759 rb_update_iter_read_stamp(iter, iter->event); 4760 } 4761 4762 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) 4763 { 4764 return cpu_buffer->lost_events; 4765 } 4766 4767 static struct ring_buffer_event * 4768 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, 4769 unsigned long *lost_events) 4770 { 4771 struct ring_buffer_event *event; 4772 struct buffer_page *reader; 4773 int nr_loops = 0; 4774 4775 if (ts) 4776 *ts = 0; 4777 again: 4778 /* 4779 * We repeat when a time extend is encountered. 4780 * Since the time extend is always attached to a data event, 4781 * we should never loop more than once. 4782 * (We never hit the following condition more than twice). 4783 */ 4784 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) 4785 return NULL; 4786 4787 reader = rb_get_reader_page(cpu_buffer); 4788 if (!reader) 4789 return NULL; 4790 4791 event = rb_reader_event(cpu_buffer); 4792 4793 switch (event->type_len) { 4794 case RINGBUF_TYPE_PADDING: 4795 if (rb_null_event(event)) 4796 RB_WARN_ON(cpu_buffer, 1); 4797 /* 4798 * Because the writer could be discarding every 4799 * event it creates (which would probably be bad) 4800 * if we were to go back to "again" then we may never 4801 * catch up, and will trigger the warn on, or lock 4802 * the box. Return the padding, and we will release 4803 * the current locks, and try again. 4804 */ 4805 return event; 4806 4807 case RINGBUF_TYPE_TIME_EXTEND: 4808 /* Internal data, OK to advance */ 4809 rb_advance_reader(cpu_buffer); 4810 goto again; 4811 4812 case RINGBUF_TYPE_TIME_STAMP: 4813 if (ts) { 4814 *ts = rb_event_time_stamp(event); 4815 *ts = rb_fix_abs_ts(*ts, reader->page->time_stamp); 4816 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 4817 cpu_buffer->cpu, ts); 4818 } 4819 /* Internal data, OK to advance */ 4820 rb_advance_reader(cpu_buffer); 4821 goto again; 4822 4823 case RINGBUF_TYPE_DATA: 4824 if (ts && !(*ts)) { 4825 *ts = cpu_buffer->read_stamp + event->time_delta; 4826 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 4827 cpu_buffer->cpu, ts); 4828 } 4829 if (lost_events) 4830 *lost_events = rb_lost_events(cpu_buffer); 4831 return event; 4832 4833 default: 4834 RB_WARN_ON(cpu_buffer, 1); 4835 } 4836 4837 return NULL; 4838 } 4839 EXPORT_SYMBOL_GPL(ring_buffer_peek); 4840 4841 static struct ring_buffer_event * 4842 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 4843 { 4844 struct trace_buffer *buffer; 4845 struct ring_buffer_per_cpu *cpu_buffer; 4846 struct ring_buffer_event *event; 4847 int nr_loops = 0; 4848 4849 if (ts) 4850 *ts = 0; 4851 4852 cpu_buffer = iter->cpu_buffer; 4853 buffer = cpu_buffer->buffer; 4854 4855 /* 4856 * Check if someone performed a consuming read to the buffer 4857 * or removed some pages from the buffer. In these cases, 4858 * iterator was invalidated and we need to reset it. 4859 */ 4860 if (unlikely(iter->cache_read != cpu_buffer->read || 4861 iter->cache_reader_page != cpu_buffer->reader_page || 4862 iter->cache_pages_removed != cpu_buffer->pages_removed)) 4863 rb_iter_reset(iter); 4864 4865 again: 4866 if (ring_buffer_iter_empty(iter)) 4867 return NULL; 4868 4869 /* 4870 * As the writer can mess with what the iterator is trying 4871 * to read, just give up if we fail to get an event after 4872 * three tries. The iterator is not as reliable when reading 4873 * the ring buffer with an active write as the consumer is. 4874 * Do not warn if the three failures is reached. 4875 */ 4876 if (++nr_loops > 3) 4877 return NULL; 4878 4879 if (rb_per_cpu_empty(cpu_buffer)) 4880 return NULL; 4881 4882 if (iter->head >= rb_page_size(iter->head_page)) { 4883 rb_inc_iter(iter); 4884 goto again; 4885 } 4886 4887 event = rb_iter_head_event(iter); 4888 if (!event) 4889 goto again; 4890 4891 switch (event->type_len) { 4892 case RINGBUF_TYPE_PADDING: 4893 if (rb_null_event(event)) { 4894 rb_inc_iter(iter); 4895 goto again; 4896 } 4897 rb_advance_iter(iter); 4898 return event; 4899 4900 case RINGBUF_TYPE_TIME_EXTEND: 4901 /* Internal data, OK to advance */ 4902 rb_advance_iter(iter); 4903 goto again; 4904 4905 case RINGBUF_TYPE_TIME_STAMP: 4906 if (ts) { 4907 *ts = rb_event_time_stamp(event); 4908 *ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp); 4909 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 4910 cpu_buffer->cpu, ts); 4911 } 4912 /* Internal data, OK to advance */ 4913 rb_advance_iter(iter); 4914 goto again; 4915 4916 case RINGBUF_TYPE_DATA: 4917 if (ts && !(*ts)) { 4918 *ts = iter->read_stamp + event->time_delta; 4919 ring_buffer_normalize_time_stamp(buffer, 4920 cpu_buffer->cpu, ts); 4921 } 4922 return event; 4923 4924 default: 4925 RB_WARN_ON(cpu_buffer, 1); 4926 } 4927 4928 return NULL; 4929 } 4930 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); 4931 4932 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) 4933 { 4934 if (likely(!in_nmi())) { 4935 raw_spin_lock(&cpu_buffer->reader_lock); 4936 return true; 4937 } 4938 4939 /* 4940 * If an NMI die dumps out the content of the ring buffer 4941 * trylock must be used to prevent a deadlock if the NMI 4942 * preempted a task that holds the ring buffer locks. If 4943 * we get the lock then all is fine, if not, then continue 4944 * to do the read, but this can corrupt the ring buffer, 4945 * so it must be permanently disabled from future writes. 4946 * Reading from NMI is a oneshot deal. 4947 */ 4948 if (raw_spin_trylock(&cpu_buffer->reader_lock)) 4949 return true; 4950 4951 /* Continue without locking, but disable the ring buffer */ 4952 atomic_inc(&cpu_buffer->record_disabled); 4953 return false; 4954 } 4955 4956 static inline void 4957 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) 4958 { 4959 if (likely(locked)) 4960 raw_spin_unlock(&cpu_buffer->reader_lock); 4961 } 4962 4963 /** 4964 * ring_buffer_peek - peek at the next event to be read 4965 * @buffer: The ring buffer to read 4966 * @cpu: The cpu to peak at 4967 * @ts: The timestamp counter of this event. 4968 * @lost_events: a variable to store if events were lost (may be NULL) 4969 * 4970 * This will return the event that will be read next, but does 4971 * not consume the data. 4972 */ 4973 struct ring_buffer_event * 4974 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, 4975 unsigned long *lost_events) 4976 { 4977 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 4978 struct ring_buffer_event *event; 4979 unsigned long flags; 4980 bool dolock; 4981 4982 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4983 return NULL; 4984 4985 again: 4986 local_irq_save(flags); 4987 dolock = rb_reader_lock(cpu_buffer); 4988 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 4989 if (event && event->type_len == RINGBUF_TYPE_PADDING) 4990 rb_advance_reader(cpu_buffer); 4991 rb_reader_unlock(cpu_buffer, dolock); 4992 local_irq_restore(flags); 4993 4994 if (event && event->type_len == RINGBUF_TYPE_PADDING) 4995 goto again; 4996 4997 return event; 4998 } 4999 5000 /** ring_buffer_iter_dropped - report if there are dropped events 5001 * @iter: The ring buffer iterator 5002 * 5003 * Returns true if there was dropped events since the last peek. 5004 */ 5005 bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter) 5006 { 5007 bool ret = iter->missed_events != 0; 5008 5009 iter->missed_events = 0; 5010 return ret; 5011 } 5012 EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped); 5013 5014 /** 5015 * ring_buffer_iter_peek - peek at the next event to be read 5016 * @iter: The ring buffer iterator 5017 * @ts: The timestamp counter of this event. 5018 * 5019 * This will return the event that will be read next, but does 5020 * not increment the iterator. 5021 */ 5022 struct ring_buffer_event * 5023 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 5024 { 5025 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5026 struct ring_buffer_event *event; 5027 unsigned long flags; 5028 5029 again: 5030 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5031 event = rb_iter_peek(iter, ts); 5032 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5033 5034 if (event && event->type_len == RINGBUF_TYPE_PADDING) 5035 goto again; 5036 5037 return event; 5038 } 5039 5040 /** 5041 * ring_buffer_consume - return an event and consume it 5042 * @buffer: The ring buffer to get the next event from 5043 * @cpu: the cpu to read the buffer from 5044 * @ts: a variable to store the timestamp (may be NULL) 5045 * @lost_events: a variable to store if events were lost (may be NULL) 5046 * 5047 * Returns the next event in the ring buffer, and that event is consumed. 5048 * Meaning, that sequential reads will keep returning a different event, 5049 * and eventually empty the ring buffer if the producer is slower. 5050 */ 5051 struct ring_buffer_event * 5052 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, 5053 unsigned long *lost_events) 5054 { 5055 struct ring_buffer_per_cpu *cpu_buffer; 5056 struct ring_buffer_event *event = NULL; 5057 unsigned long flags; 5058 bool dolock; 5059 5060 again: 5061 /* might be called in atomic */ 5062 preempt_disable(); 5063 5064 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5065 goto out; 5066 5067 cpu_buffer = buffer->buffers[cpu]; 5068 local_irq_save(flags); 5069 dolock = rb_reader_lock(cpu_buffer); 5070 5071 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 5072 if (event) { 5073 cpu_buffer->lost_events = 0; 5074 rb_advance_reader(cpu_buffer); 5075 } 5076 5077 rb_reader_unlock(cpu_buffer, dolock); 5078 local_irq_restore(flags); 5079 5080 out: 5081 preempt_enable(); 5082 5083 if (event && event->type_len == RINGBUF_TYPE_PADDING) 5084 goto again; 5085 5086 return event; 5087 } 5088 EXPORT_SYMBOL_GPL(ring_buffer_consume); 5089 5090 /** 5091 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer 5092 * @buffer: The ring buffer to read from 5093 * @cpu: The cpu buffer to iterate over 5094 * @flags: gfp flags to use for memory allocation 5095 * 5096 * This performs the initial preparations necessary to iterate 5097 * through the buffer. Memory is allocated, buffer recording 5098 * is disabled, and the iterator pointer is returned to the caller. 5099 * 5100 * Disabling buffer recording prevents the reading from being 5101 * corrupted. This is not a consuming read, so a producer is not 5102 * expected. 5103 * 5104 * After a sequence of ring_buffer_read_prepare calls, the user is 5105 * expected to make at least one call to ring_buffer_read_prepare_sync. 5106 * Afterwards, ring_buffer_read_start is invoked to get things going 5107 * for real. 5108 * 5109 * This overall must be paired with ring_buffer_read_finish. 5110 */ 5111 struct ring_buffer_iter * 5112 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags) 5113 { 5114 struct ring_buffer_per_cpu *cpu_buffer; 5115 struct ring_buffer_iter *iter; 5116 5117 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5118 return NULL; 5119 5120 iter = kzalloc(sizeof(*iter), flags); 5121 if (!iter) 5122 return NULL; 5123 5124 iter->event = kmalloc(BUF_MAX_DATA_SIZE, flags); 5125 if (!iter->event) { 5126 kfree(iter); 5127 return NULL; 5128 } 5129 5130 cpu_buffer = buffer->buffers[cpu]; 5131 5132 iter->cpu_buffer = cpu_buffer; 5133 5134 atomic_inc(&cpu_buffer->resize_disabled); 5135 5136 return iter; 5137 } 5138 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare); 5139 5140 /** 5141 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls 5142 * 5143 * All previously invoked ring_buffer_read_prepare calls to prepare 5144 * iterators will be synchronized. Afterwards, read_buffer_read_start 5145 * calls on those iterators are allowed. 5146 */ 5147 void 5148 ring_buffer_read_prepare_sync(void) 5149 { 5150 synchronize_rcu(); 5151 } 5152 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); 5153 5154 /** 5155 * ring_buffer_read_start - start a non consuming read of the buffer 5156 * @iter: The iterator returned by ring_buffer_read_prepare 5157 * 5158 * This finalizes the startup of an iteration through the buffer. 5159 * The iterator comes from a call to ring_buffer_read_prepare and 5160 * an intervening ring_buffer_read_prepare_sync must have been 5161 * performed. 5162 * 5163 * Must be paired with ring_buffer_read_finish. 5164 */ 5165 void 5166 ring_buffer_read_start(struct ring_buffer_iter *iter) 5167 { 5168 struct ring_buffer_per_cpu *cpu_buffer; 5169 unsigned long flags; 5170 5171 if (!iter) 5172 return; 5173 5174 cpu_buffer = iter->cpu_buffer; 5175 5176 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5177 arch_spin_lock(&cpu_buffer->lock); 5178 rb_iter_reset(iter); 5179 arch_spin_unlock(&cpu_buffer->lock); 5180 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5181 } 5182 EXPORT_SYMBOL_GPL(ring_buffer_read_start); 5183 5184 /** 5185 * ring_buffer_read_finish - finish reading the iterator of the buffer 5186 * @iter: The iterator retrieved by ring_buffer_start 5187 * 5188 * This re-enables the recording to the buffer, and frees the 5189 * iterator. 5190 */ 5191 void 5192 ring_buffer_read_finish(struct ring_buffer_iter *iter) 5193 { 5194 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5195 unsigned long flags; 5196 5197 /* 5198 * Ring buffer is disabled from recording, here's a good place 5199 * to check the integrity of the ring buffer. 5200 * Must prevent readers from trying to read, as the check 5201 * clears the HEAD page and readers require it. 5202 */ 5203 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5204 rb_check_pages(cpu_buffer); 5205 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5206 5207 atomic_dec(&cpu_buffer->resize_disabled); 5208 kfree(iter->event); 5209 kfree(iter); 5210 } 5211 EXPORT_SYMBOL_GPL(ring_buffer_read_finish); 5212 5213 /** 5214 * ring_buffer_iter_advance - advance the iterator to the next location 5215 * @iter: The ring buffer iterator 5216 * 5217 * Move the location of the iterator such that the next read will 5218 * be the next location of the iterator. 5219 */ 5220 void ring_buffer_iter_advance(struct ring_buffer_iter *iter) 5221 { 5222 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5223 unsigned long flags; 5224 5225 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5226 5227 rb_advance_iter(iter); 5228 5229 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5230 } 5231 EXPORT_SYMBOL_GPL(ring_buffer_iter_advance); 5232 5233 /** 5234 * ring_buffer_size - return the size of the ring buffer (in bytes) 5235 * @buffer: The ring buffer. 5236 * @cpu: The CPU to get ring buffer size from. 5237 */ 5238 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu) 5239 { 5240 /* 5241 * Earlier, this method returned 5242 * BUF_PAGE_SIZE * buffer->nr_pages 5243 * Since the nr_pages field is now removed, we have converted this to 5244 * return the per cpu buffer value. 5245 */ 5246 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5247 return 0; 5248 5249 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; 5250 } 5251 EXPORT_SYMBOL_GPL(ring_buffer_size); 5252 5253 static void rb_clear_buffer_page(struct buffer_page *page) 5254 { 5255 local_set(&page->write, 0); 5256 local_set(&page->entries, 0); 5257 rb_init_page(page->page); 5258 page->read = 0; 5259 } 5260 5261 static void 5262 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 5263 { 5264 struct buffer_page *page; 5265 5266 rb_head_page_deactivate(cpu_buffer); 5267 5268 cpu_buffer->head_page 5269 = list_entry(cpu_buffer->pages, struct buffer_page, list); 5270 rb_clear_buffer_page(cpu_buffer->head_page); 5271 list_for_each_entry(page, cpu_buffer->pages, list) { 5272 rb_clear_buffer_page(page); 5273 } 5274 5275 cpu_buffer->tail_page = cpu_buffer->head_page; 5276 cpu_buffer->commit_page = cpu_buffer->head_page; 5277 5278 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 5279 INIT_LIST_HEAD(&cpu_buffer->new_pages); 5280 rb_clear_buffer_page(cpu_buffer->reader_page); 5281 5282 local_set(&cpu_buffer->entries_bytes, 0); 5283 local_set(&cpu_buffer->overrun, 0); 5284 local_set(&cpu_buffer->commit_overrun, 0); 5285 local_set(&cpu_buffer->dropped_events, 0); 5286 local_set(&cpu_buffer->entries, 0); 5287 local_set(&cpu_buffer->committing, 0); 5288 local_set(&cpu_buffer->commits, 0); 5289 local_set(&cpu_buffer->pages_touched, 0); 5290 local_set(&cpu_buffer->pages_lost, 0); 5291 local_set(&cpu_buffer->pages_read, 0); 5292 cpu_buffer->last_pages_touch = 0; 5293 cpu_buffer->shortest_full = 0; 5294 cpu_buffer->read = 0; 5295 cpu_buffer->read_bytes = 0; 5296 5297 rb_time_set(&cpu_buffer->write_stamp, 0); 5298 rb_time_set(&cpu_buffer->before_stamp, 0); 5299 5300 memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp)); 5301 5302 cpu_buffer->lost_events = 0; 5303 cpu_buffer->last_overrun = 0; 5304 5305 rb_head_page_activate(cpu_buffer); 5306 cpu_buffer->pages_removed = 0; 5307 } 5308 5309 /* Must have disabled the cpu buffer then done a synchronize_rcu */ 5310 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 5311 { 5312 unsigned long flags; 5313 5314 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5315 5316 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 5317 goto out; 5318 5319 arch_spin_lock(&cpu_buffer->lock); 5320 5321 rb_reset_cpu(cpu_buffer); 5322 5323 arch_spin_unlock(&cpu_buffer->lock); 5324 5325 out: 5326 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5327 } 5328 5329 /** 5330 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer 5331 * @buffer: The ring buffer to reset a per cpu buffer of 5332 * @cpu: The CPU buffer to be reset 5333 */ 5334 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu) 5335 { 5336 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 5337 5338 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5339 return; 5340 5341 /* prevent another thread from changing buffer sizes */ 5342 mutex_lock(&buffer->mutex); 5343 5344 atomic_inc(&cpu_buffer->resize_disabled); 5345 atomic_inc(&cpu_buffer->record_disabled); 5346 5347 /* Make sure all commits have finished */ 5348 synchronize_rcu(); 5349 5350 reset_disabled_cpu_buffer(cpu_buffer); 5351 5352 atomic_dec(&cpu_buffer->record_disabled); 5353 atomic_dec(&cpu_buffer->resize_disabled); 5354 5355 mutex_unlock(&buffer->mutex); 5356 } 5357 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); 5358 5359 /* Flag to ensure proper resetting of atomic variables */ 5360 #define RESET_BIT (1 << 30) 5361 5362 /** 5363 * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer 5364 * @buffer: The ring buffer to reset a per cpu buffer of 5365 */ 5366 void ring_buffer_reset_online_cpus(struct trace_buffer *buffer) 5367 { 5368 struct ring_buffer_per_cpu *cpu_buffer; 5369 int cpu; 5370 5371 /* prevent another thread from changing buffer sizes */ 5372 mutex_lock(&buffer->mutex); 5373 5374 for_each_online_buffer_cpu(buffer, cpu) { 5375 cpu_buffer = buffer->buffers[cpu]; 5376 5377 atomic_add(RESET_BIT, &cpu_buffer->resize_disabled); 5378 atomic_inc(&cpu_buffer->record_disabled); 5379 } 5380 5381 /* Make sure all commits have finished */ 5382 synchronize_rcu(); 5383 5384 for_each_buffer_cpu(buffer, cpu) { 5385 cpu_buffer = buffer->buffers[cpu]; 5386 5387 /* 5388 * If a CPU came online during the synchronize_rcu(), then 5389 * ignore it. 5390 */ 5391 if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT)) 5392 continue; 5393 5394 reset_disabled_cpu_buffer(cpu_buffer); 5395 5396 atomic_dec(&cpu_buffer->record_disabled); 5397 atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled); 5398 } 5399 5400 mutex_unlock(&buffer->mutex); 5401 } 5402 5403 /** 5404 * ring_buffer_reset - reset a ring buffer 5405 * @buffer: The ring buffer to reset all cpu buffers 5406 */ 5407 void ring_buffer_reset(struct trace_buffer *buffer) 5408 { 5409 struct ring_buffer_per_cpu *cpu_buffer; 5410 int cpu; 5411 5412 /* prevent another thread from changing buffer sizes */ 5413 mutex_lock(&buffer->mutex); 5414 5415 for_each_buffer_cpu(buffer, cpu) { 5416 cpu_buffer = buffer->buffers[cpu]; 5417 5418 atomic_inc(&cpu_buffer->resize_disabled); 5419 atomic_inc(&cpu_buffer->record_disabled); 5420 } 5421 5422 /* Make sure all commits have finished */ 5423 synchronize_rcu(); 5424 5425 for_each_buffer_cpu(buffer, cpu) { 5426 cpu_buffer = buffer->buffers[cpu]; 5427 5428 reset_disabled_cpu_buffer(cpu_buffer); 5429 5430 atomic_dec(&cpu_buffer->record_disabled); 5431 atomic_dec(&cpu_buffer->resize_disabled); 5432 } 5433 5434 mutex_unlock(&buffer->mutex); 5435 } 5436 EXPORT_SYMBOL_GPL(ring_buffer_reset); 5437 5438 /** 5439 * ring_buffer_empty - is the ring buffer empty? 5440 * @buffer: The ring buffer to test 5441 */ 5442 bool ring_buffer_empty(struct trace_buffer *buffer) 5443 { 5444 struct ring_buffer_per_cpu *cpu_buffer; 5445 unsigned long flags; 5446 bool dolock; 5447 bool ret; 5448 int cpu; 5449 5450 /* yes this is racy, but if you don't like the race, lock the buffer */ 5451 for_each_buffer_cpu(buffer, cpu) { 5452 cpu_buffer = buffer->buffers[cpu]; 5453 local_irq_save(flags); 5454 dolock = rb_reader_lock(cpu_buffer); 5455 ret = rb_per_cpu_empty(cpu_buffer); 5456 rb_reader_unlock(cpu_buffer, dolock); 5457 local_irq_restore(flags); 5458 5459 if (!ret) 5460 return false; 5461 } 5462 5463 return true; 5464 } 5465 EXPORT_SYMBOL_GPL(ring_buffer_empty); 5466 5467 /** 5468 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? 5469 * @buffer: The ring buffer 5470 * @cpu: The CPU buffer to test 5471 */ 5472 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu) 5473 { 5474 struct ring_buffer_per_cpu *cpu_buffer; 5475 unsigned long flags; 5476 bool dolock; 5477 bool ret; 5478 5479 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5480 return true; 5481 5482 cpu_buffer = buffer->buffers[cpu]; 5483 local_irq_save(flags); 5484 dolock = rb_reader_lock(cpu_buffer); 5485 ret = rb_per_cpu_empty(cpu_buffer); 5486 rb_reader_unlock(cpu_buffer, dolock); 5487 local_irq_restore(flags); 5488 5489 return ret; 5490 } 5491 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); 5492 5493 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 5494 /** 5495 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers 5496 * @buffer_a: One buffer to swap with 5497 * @buffer_b: The other buffer to swap with 5498 * @cpu: the CPU of the buffers to swap 5499 * 5500 * This function is useful for tracers that want to take a "snapshot" 5501 * of a CPU buffer and has another back up buffer lying around. 5502 * it is expected that the tracer handles the cpu buffer not being 5503 * used at the moment. 5504 */ 5505 int ring_buffer_swap_cpu(struct trace_buffer *buffer_a, 5506 struct trace_buffer *buffer_b, int cpu) 5507 { 5508 struct ring_buffer_per_cpu *cpu_buffer_a; 5509 struct ring_buffer_per_cpu *cpu_buffer_b; 5510 int ret = -EINVAL; 5511 5512 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || 5513 !cpumask_test_cpu(cpu, buffer_b->cpumask)) 5514 goto out; 5515 5516 cpu_buffer_a = buffer_a->buffers[cpu]; 5517 cpu_buffer_b = buffer_b->buffers[cpu]; 5518 5519 /* At least make sure the two buffers are somewhat the same */ 5520 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages) 5521 goto out; 5522 5523 ret = -EAGAIN; 5524 5525 if (atomic_read(&buffer_a->record_disabled)) 5526 goto out; 5527 5528 if (atomic_read(&buffer_b->record_disabled)) 5529 goto out; 5530 5531 if (atomic_read(&cpu_buffer_a->record_disabled)) 5532 goto out; 5533 5534 if (atomic_read(&cpu_buffer_b->record_disabled)) 5535 goto out; 5536 5537 /* 5538 * We can't do a synchronize_rcu here because this 5539 * function can be called in atomic context. 5540 * Normally this will be called from the same CPU as cpu. 5541 * If not it's up to the caller to protect this. 5542 */ 5543 atomic_inc(&cpu_buffer_a->record_disabled); 5544 atomic_inc(&cpu_buffer_b->record_disabled); 5545 5546 ret = -EBUSY; 5547 if (local_read(&cpu_buffer_a->committing)) 5548 goto out_dec; 5549 if (local_read(&cpu_buffer_b->committing)) 5550 goto out_dec; 5551 5552 /* 5553 * When resize is in progress, we cannot swap it because 5554 * it will mess the state of the cpu buffer. 5555 */ 5556 if (atomic_read(&buffer_a->resizing)) 5557 goto out_dec; 5558 if (atomic_read(&buffer_b->resizing)) 5559 goto out_dec; 5560 5561 buffer_a->buffers[cpu] = cpu_buffer_b; 5562 buffer_b->buffers[cpu] = cpu_buffer_a; 5563 5564 cpu_buffer_b->buffer = buffer_a; 5565 cpu_buffer_a->buffer = buffer_b; 5566 5567 ret = 0; 5568 5569 out_dec: 5570 atomic_dec(&cpu_buffer_a->record_disabled); 5571 atomic_dec(&cpu_buffer_b->record_disabled); 5572 out: 5573 return ret; 5574 } 5575 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); 5576 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */ 5577 5578 /** 5579 * ring_buffer_alloc_read_page - allocate a page to read from buffer 5580 * @buffer: the buffer to allocate for. 5581 * @cpu: the cpu buffer to allocate. 5582 * 5583 * This function is used in conjunction with ring_buffer_read_page. 5584 * When reading a full page from the ring buffer, these functions 5585 * can be used to speed up the process. The calling function should 5586 * allocate a few pages first with this function. Then when it 5587 * needs to get pages from the ring buffer, it passes the result 5588 * of this function into ring_buffer_read_page, which will swap 5589 * the page that was allocated, with the read page of the buffer. 5590 * 5591 * Returns: 5592 * The page allocated, or ERR_PTR 5593 */ 5594 void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu) 5595 { 5596 struct ring_buffer_per_cpu *cpu_buffer; 5597 struct buffer_data_page *bpage = NULL; 5598 unsigned long flags; 5599 struct page *page; 5600 5601 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5602 return ERR_PTR(-ENODEV); 5603 5604 cpu_buffer = buffer->buffers[cpu]; 5605 local_irq_save(flags); 5606 arch_spin_lock(&cpu_buffer->lock); 5607 5608 if (cpu_buffer->free_page) { 5609 bpage = cpu_buffer->free_page; 5610 cpu_buffer->free_page = NULL; 5611 } 5612 5613 arch_spin_unlock(&cpu_buffer->lock); 5614 local_irq_restore(flags); 5615 5616 if (bpage) 5617 goto out; 5618 5619 page = alloc_pages_node(cpu_to_node(cpu), 5620 GFP_KERNEL | __GFP_NORETRY, 0); 5621 if (!page) 5622 return ERR_PTR(-ENOMEM); 5623 5624 bpage = page_address(page); 5625 5626 out: 5627 rb_init_page(bpage); 5628 5629 return bpage; 5630 } 5631 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page); 5632 5633 /** 5634 * ring_buffer_free_read_page - free an allocated read page 5635 * @buffer: the buffer the page was allocate for 5636 * @cpu: the cpu buffer the page came from 5637 * @data: the page to free 5638 * 5639 * Free a page allocated from ring_buffer_alloc_read_page. 5640 */ 5641 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data) 5642 { 5643 struct ring_buffer_per_cpu *cpu_buffer; 5644 struct buffer_data_page *bpage = data; 5645 struct page *page = virt_to_page(bpage); 5646 unsigned long flags; 5647 5648 if (!buffer || !buffer->buffers || !buffer->buffers[cpu]) 5649 return; 5650 5651 cpu_buffer = buffer->buffers[cpu]; 5652 5653 /* If the page is still in use someplace else, we can't reuse it */ 5654 if (page_ref_count(page) > 1) 5655 goto out; 5656 5657 local_irq_save(flags); 5658 arch_spin_lock(&cpu_buffer->lock); 5659 5660 if (!cpu_buffer->free_page) { 5661 cpu_buffer->free_page = bpage; 5662 bpage = NULL; 5663 } 5664 5665 arch_spin_unlock(&cpu_buffer->lock); 5666 local_irq_restore(flags); 5667 5668 out: 5669 free_page((unsigned long)bpage); 5670 } 5671 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); 5672 5673 /** 5674 * ring_buffer_read_page - extract a page from the ring buffer 5675 * @buffer: buffer to extract from 5676 * @data_page: the page to use allocated from ring_buffer_alloc_read_page 5677 * @len: amount to extract 5678 * @cpu: the cpu of the buffer to extract 5679 * @full: should the extraction only happen when the page is full. 5680 * 5681 * This function will pull out a page from the ring buffer and consume it. 5682 * @data_page must be the address of the variable that was returned 5683 * from ring_buffer_alloc_read_page. This is because the page might be used 5684 * to swap with a page in the ring buffer. 5685 * 5686 * for example: 5687 * rpage = ring_buffer_alloc_read_page(buffer, cpu); 5688 * if (IS_ERR(rpage)) 5689 * return PTR_ERR(rpage); 5690 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); 5691 * if (ret >= 0) 5692 * process_page(rpage, ret); 5693 * 5694 * When @full is set, the function will not return true unless 5695 * the writer is off the reader page. 5696 * 5697 * Note: it is up to the calling functions to handle sleeps and wakeups. 5698 * The ring buffer can be used anywhere in the kernel and can not 5699 * blindly call wake_up. The layer that uses the ring buffer must be 5700 * responsible for that. 5701 * 5702 * Returns: 5703 * >=0 if data has been transferred, returns the offset of consumed data. 5704 * <0 if no data has been transferred. 5705 */ 5706 int ring_buffer_read_page(struct trace_buffer *buffer, 5707 void **data_page, size_t len, int cpu, int full) 5708 { 5709 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 5710 struct ring_buffer_event *event; 5711 struct buffer_data_page *bpage; 5712 struct buffer_page *reader; 5713 unsigned long missed_events; 5714 unsigned long flags; 5715 unsigned int commit; 5716 unsigned int read; 5717 u64 save_timestamp; 5718 int ret = -1; 5719 5720 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5721 goto out; 5722 5723 /* 5724 * If len is not big enough to hold the page header, then 5725 * we can not copy anything. 5726 */ 5727 if (len <= BUF_PAGE_HDR_SIZE) 5728 goto out; 5729 5730 len -= BUF_PAGE_HDR_SIZE; 5731 5732 if (!data_page) 5733 goto out; 5734 5735 bpage = *data_page; 5736 if (!bpage) 5737 goto out; 5738 5739 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5740 5741 reader = rb_get_reader_page(cpu_buffer); 5742 if (!reader) 5743 goto out_unlock; 5744 5745 event = rb_reader_event(cpu_buffer); 5746 5747 read = reader->read; 5748 commit = rb_page_commit(reader); 5749 5750 /* Check if any events were dropped */ 5751 missed_events = cpu_buffer->lost_events; 5752 5753 /* 5754 * If this page has been partially read or 5755 * if len is not big enough to read the rest of the page or 5756 * a writer is still on the page, then 5757 * we must copy the data from the page to the buffer. 5758 * Otherwise, we can simply swap the page with the one passed in. 5759 */ 5760 if (read || (len < (commit - read)) || 5761 cpu_buffer->reader_page == cpu_buffer->commit_page) { 5762 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; 5763 unsigned int rpos = read; 5764 unsigned int pos = 0; 5765 unsigned int size; 5766 5767 /* 5768 * If a full page is expected, this can still be returned 5769 * if there's been a previous partial read and the 5770 * rest of the page can be read and the commit page is off 5771 * the reader page. 5772 */ 5773 if (full && 5774 (!read || (len < (commit - read)) || 5775 cpu_buffer->reader_page == cpu_buffer->commit_page)) 5776 goto out_unlock; 5777 5778 if (len > (commit - read)) 5779 len = (commit - read); 5780 5781 /* Always keep the time extend and data together */ 5782 size = rb_event_ts_length(event); 5783 5784 if (len < size) 5785 goto out_unlock; 5786 5787 /* save the current timestamp, since the user will need it */ 5788 save_timestamp = cpu_buffer->read_stamp; 5789 5790 /* Need to copy one event at a time */ 5791 do { 5792 /* We need the size of one event, because 5793 * rb_advance_reader only advances by one event, 5794 * whereas rb_event_ts_length may include the size of 5795 * one or two events. 5796 * We have already ensured there's enough space if this 5797 * is a time extend. */ 5798 size = rb_event_length(event); 5799 memcpy(bpage->data + pos, rpage->data + rpos, size); 5800 5801 len -= size; 5802 5803 rb_advance_reader(cpu_buffer); 5804 rpos = reader->read; 5805 pos += size; 5806 5807 if (rpos >= commit) 5808 break; 5809 5810 event = rb_reader_event(cpu_buffer); 5811 /* Always keep the time extend and data together */ 5812 size = rb_event_ts_length(event); 5813 } while (len >= size); 5814 5815 /* update bpage */ 5816 local_set(&bpage->commit, pos); 5817 bpage->time_stamp = save_timestamp; 5818 5819 /* we copied everything to the beginning */ 5820 read = 0; 5821 } else { 5822 /* update the entry counter */ 5823 cpu_buffer->read += rb_page_entries(reader); 5824 cpu_buffer->read_bytes += rb_page_commit(reader); 5825 5826 /* swap the pages */ 5827 rb_init_page(bpage); 5828 bpage = reader->page; 5829 reader->page = *data_page; 5830 local_set(&reader->write, 0); 5831 local_set(&reader->entries, 0); 5832 reader->read = 0; 5833 *data_page = bpage; 5834 5835 /* 5836 * Use the real_end for the data size, 5837 * This gives us a chance to store the lost events 5838 * on the page. 5839 */ 5840 if (reader->real_end) 5841 local_set(&bpage->commit, reader->real_end); 5842 } 5843 ret = read; 5844 5845 cpu_buffer->lost_events = 0; 5846 5847 commit = local_read(&bpage->commit); 5848 /* 5849 * Set a flag in the commit field if we lost events 5850 */ 5851 if (missed_events) { 5852 /* If there is room at the end of the page to save the 5853 * missed events, then record it there. 5854 */ 5855 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) { 5856 memcpy(&bpage->data[commit], &missed_events, 5857 sizeof(missed_events)); 5858 local_add(RB_MISSED_STORED, &bpage->commit); 5859 commit += sizeof(missed_events); 5860 } 5861 local_add(RB_MISSED_EVENTS, &bpage->commit); 5862 } 5863 5864 /* 5865 * This page may be off to user land. Zero it out here. 5866 */ 5867 if (commit < BUF_PAGE_SIZE) 5868 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); 5869 5870 out_unlock: 5871 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5872 5873 out: 5874 return ret; 5875 } 5876 EXPORT_SYMBOL_GPL(ring_buffer_read_page); 5877 5878 /* 5879 * We only allocate new buffers, never free them if the CPU goes down. 5880 * If we were to free the buffer, then the user would lose any trace that was in 5881 * the buffer. 5882 */ 5883 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node) 5884 { 5885 struct trace_buffer *buffer; 5886 long nr_pages_same; 5887 int cpu_i; 5888 unsigned long nr_pages; 5889 5890 buffer = container_of(node, struct trace_buffer, node); 5891 if (cpumask_test_cpu(cpu, buffer->cpumask)) 5892 return 0; 5893 5894 nr_pages = 0; 5895 nr_pages_same = 1; 5896 /* check if all cpu sizes are same */ 5897 for_each_buffer_cpu(buffer, cpu_i) { 5898 /* fill in the size from first enabled cpu */ 5899 if (nr_pages == 0) 5900 nr_pages = buffer->buffers[cpu_i]->nr_pages; 5901 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { 5902 nr_pages_same = 0; 5903 break; 5904 } 5905 } 5906 /* allocate minimum pages, user can later expand it */ 5907 if (!nr_pages_same) 5908 nr_pages = 2; 5909 buffer->buffers[cpu] = 5910 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 5911 if (!buffer->buffers[cpu]) { 5912 WARN(1, "failed to allocate ring buffer on CPU %u\n", 5913 cpu); 5914 return -ENOMEM; 5915 } 5916 smp_wmb(); 5917 cpumask_set_cpu(cpu, buffer->cpumask); 5918 return 0; 5919 } 5920 5921 #ifdef CONFIG_RING_BUFFER_STARTUP_TEST 5922 /* 5923 * This is a basic integrity check of the ring buffer. 5924 * Late in the boot cycle this test will run when configured in. 5925 * It will kick off a thread per CPU that will go into a loop 5926 * writing to the per cpu ring buffer various sizes of data. 5927 * Some of the data will be large items, some small. 5928 * 5929 * Another thread is created that goes into a spin, sending out 5930 * IPIs to the other CPUs to also write into the ring buffer. 5931 * this is to test the nesting ability of the buffer. 5932 * 5933 * Basic stats are recorded and reported. If something in the 5934 * ring buffer should happen that's not expected, a big warning 5935 * is displayed and all ring buffers are disabled. 5936 */ 5937 static struct task_struct *rb_threads[NR_CPUS] __initdata; 5938 5939 struct rb_test_data { 5940 struct trace_buffer *buffer; 5941 unsigned long events; 5942 unsigned long bytes_written; 5943 unsigned long bytes_alloc; 5944 unsigned long bytes_dropped; 5945 unsigned long events_nested; 5946 unsigned long bytes_written_nested; 5947 unsigned long bytes_alloc_nested; 5948 unsigned long bytes_dropped_nested; 5949 int min_size_nested; 5950 int max_size_nested; 5951 int max_size; 5952 int min_size; 5953 int cpu; 5954 int cnt; 5955 }; 5956 5957 static struct rb_test_data rb_data[NR_CPUS] __initdata; 5958 5959 /* 1 meg per cpu */ 5960 #define RB_TEST_BUFFER_SIZE 1048576 5961 5962 static char rb_string[] __initdata = 5963 "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\" 5964 "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890" 5965 "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv"; 5966 5967 static bool rb_test_started __initdata; 5968 5969 struct rb_item { 5970 int size; 5971 char str[]; 5972 }; 5973 5974 static __init int rb_write_something(struct rb_test_data *data, bool nested) 5975 { 5976 struct ring_buffer_event *event; 5977 struct rb_item *item; 5978 bool started; 5979 int event_len; 5980 int size; 5981 int len; 5982 int cnt; 5983 5984 /* Have nested writes different that what is written */ 5985 cnt = data->cnt + (nested ? 27 : 0); 5986 5987 /* Multiply cnt by ~e, to make some unique increment */ 5988 size = (cnt * 68 / 25) % (sizeof(rb_string) - 1); 5989 5990 len = size + sizeof(struct rb_item); 5991 5992 started = rb_test_started; 5993 /* read rb_test_started before checking buffer enabled */ 5994 smp_rmb(); 5995 5996 event = ring_buffer_lock_reserve(data->buffer, len); 5997 if (!event) { 5998 /* Ignore dropped events before test starts. */ 5999 if (started) { 6000 if (nested) 6001 data->bytes_dropped += len; 6002 else 6003 data->bytes_dropped_nested += len; 6004 } 6005 return len; 6006 } 6007 6008 event_len = ring_buffer_event_length(event); 6009 6010 if (RB_WARN_ON(data->buffer, event_len < len)) 6011 goto out; 6012 6013 item = ring_buffer_event_data(event); 6014 item->size = size; 6015 memcpy(item->str, rb_string, size); 6016 6017 if (nested) { 6018 data->bytes_alloc_nested += event_len; 6019 data->bytes_written_nested += len; 6020 data->events_nested++; 6021 if (!data->min_size_nested || len < data->min_size_nested) 6022 data->min_size_nested = len; 6023 if (len > data->max_size_nested) 6024 data->max_size_nested = len; 6025 } else { 6026 data->bytes_alloc += event_len; 6027 data->bytes_written += len; 6028 data->events++; 6029 if (!data->min_size || len < data->min_size) 6030 data->max_size = len; 6031 if (len > data->max_size) 6032 data->max_size = len; 6033 } 6034 6035 out: 6036 ring_buffer_unlock_commit(data->buffer); 6037 6038 return 0; 6039 } 6040 6041 static __init int rb_test(void *arg) 6042 { 6043 struct rb_test_data *data = arg; 6044 6045 while (!kthread_should_stop()) { 6046 rb_write_something(data, false); 6047 data->cnt++; 6048 6049 set_current_state(TASK_INTERRUPTIBLE); 6050 /* Now sleep between a min of 100-300us and a max of 1ms */ 6051 usleep_range(((data->cnt % 3) + 1) * 100, 1000); 6052 } 6053 6054 return 0; 6055 } 6056 6057 static __init void rb_ipi(void *ignore) 6058 { 6059 struct rb_test_data *data; 6060 int cpu = smp_processor_id(); 6061 6062 data = &rb_data[cpu]; 6063 rb_write_something(data, true); 6064 } 6065 6066 static __init int rb_hammer_test(void *arg) 6067 { 6068 while (!kthread_should_stop()) { 6069 6070 /* Send an IPI to all cpus to write data! */ 6071 smp_call_function(rb_ipi, NULL, 1); 6072 /* No sleep, but for non preempt, let others run */ 6073 schedule(); 6074 } 6075 6076 return 0; 6077 } 6078 6079 static __init int test_ringbuffer(void) 6080 { 6081 struct task_struct *rb_hammer; 6082 struct trace_buffer *buffer; 6083 int cpu; 6084 int ret = 0; 6085 6086 if (security_locked_down(LOCKDOWN_TRACEFS)) { 6087 pr_warn("Lockdown is enabled, skipping ring buffer tests\n"); 6088 return 0; 6089 } 6090 6091 pr_info("Running ring buffer tests...\n"); 6092 6093 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE); 6094 if (WARN_ON(!buffer)) 6095 return 0; 6096 6097 /* Disable buffer so that threads can't write to it yet */ 6098 ring_buffer_record_off(buffer); 6099 6100 for_each_online_cpu(cpu) { 6101 rb_data[cpu].buffer = buffer; 6102 rb_data[cpu].cpu = cpu; 6103 rb_data[cpu].cnt = cpu; 6104 rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu], 6105 cpu, "rbtester/%u"); 6106 if (WARN_ON(IS_ERR(rb_threads[cpu]))) { 6107 pr_cont("FAILED\n"); 6108 ret = PTR_ERR(rb_threads[cpu]); 6109 goto out_free; 6110 } 6111 } 6112 6113 /* Now create the rb hammer! */ 6114 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer"); 6115 if (WARN_ON(IS_ERR(rb_hammer))) { 6116 pr_cont("FAILED\n"); 6117 ret = PTR_ERR(rb_hammer); 6118 goto out_free; 6119 } 6120 6121 ring_buffer_record_on(buffer); 6122 /* 6123 * Show buffer is enabled before setting rb_test_started. 6124 * Yes there's a small race window where events could be 6125 * dropped and the thread wont catch it. But when a ring 6126 * buffer gets enabled, there will always be some kind of 6127 * delay before other CPUs see it. Thus, we don't care about 6128 * those dropped events. We care about events dropped after 6129 * the threads see that the buffer is active. 6130 */ 6131 smp_wmb(); 6132 rb_test_started = true; 6133 6134 set_current_state(TASK_INTERRUPTIBLE); 6135 /* Just run for 10 seconds */; 6136 schedule_timeout(10 * HZ); 6137 6138 kthread_stop(rb_hammer); 6139 6140 out_free: 6141 for_each_online_cpu(cpu) { 6142 if (!rb_threads[cpu]) 6143 break; 6144 kthread_stop(rb_threads[cpu]); 6145 } 6146 if (ret) { 6147 ring_buffer_free(buffer); 6148 return ret; 6149 } 6150 6151 /* Report! */ 6152 pr_info("finished\n"); 6153 for_each_online_cpu(cpu) { 6154 struct ring_buffer_event *event; 6155 struct rb_test_data *data = &rb_data[cpu]; 6156 struct rb_item *item; 6157 unsigned long total_events; 6158 unsigned long total_dropped; 6159 unsigned long total_written; 6160 unsigned long total_alloc; 6161 unsigned long total_read = 0; 6162 unsigned long total_size = 0; 6163 unsigned long total_len = 0; 6164 unsigned long total_lost = 0; 6165 unsigned long lost; 6166 int big_event_size; 6167 int small_event_size; 6168 6169 ret = -1; 6170 6171 total_events = data->events + data->events_nested; 6172 total_written = data->bytes_written + data->bytes_written_nested; 6173 total_alloc = data->bytes_alloc + data->bytes_alloc_nested; 6174 total_dropped = data->bytes_dropped + data->bytes_dropped_nested; 6175 6176 big_event_size = data->max_size + data->max_size_nested; 6177 small_event_size = data->min_size + data->min_size_nested; 6178 6179 pr_info("CPU %d:\n", cpu); 6180 pr_info(" events: %ld\n", total_events); 6181 pr_info(" dropped bytes: %ld\n", total_dropped); 6182 pr_info(" alloced bytes: %ld\n", total_alloc); 6183 pr_info(" written bytes: %ld\n", total_written); 6184 pr_info(" biggest event: %d\n", big_event_size); 6185 pr_info(" smallest event: %d\n", small_event_size); 6186 6187 if (RB_WARN_ON(buffer, total_dropped)) 6188 break; 6189 6190 ret = 0; 6191 6192 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { 6193 total_lost += lost; 6194 item = ring_buffer_event_data(event); 6195 total_len += ring_buffer_event_length(event); 6196 total_size += item->size + sizeof(struct rb_item); 6197 if (memcmp(&item->str[0], rb_string, item->size) != 0) { 6198 pr_info("FAILED!\n"); 6199 pr_info("buffer had: %.*s\n", item->size, item->str); 6200 pr_info("expected: %.*s\n", item->size, rb_string); 6201 RB_WARN_ON(buffer, 1); 6202 ret = -1; 6203 break; 6204 } 6205 total_read++; 6206 } 6207 if (ret) 6208 break; 6209 6210 ret = -1; 6211 6212 pr_info(" read events: %ld\n", total_read); 6213 pr_info(" lost events: %ld\n", total_lost); 6214 pr_info(" total events: %ld\n", total_lost + total_read); 6215 pr_info(" recorded len bytes: %ld\n", total_len); 6216 pr_info(" recorded size bytes: %ld\n", total_size); 6217 if (total_lost) { 6218 pr_info(" With dropped events, record len and size may not match\n" 6219 " alloced and written from above\n"); 6220 } else { 6221 if (RB_WARN_ON(buffer, total_len != total_alloc || 6222 total_size != total_written)) 6223 break; 6224 } 6225 if (RB_WARN_ON(buffer, total_lost + total_read != total_events)) 6226 break; 6227 6228 ret = 0; 6229 } 6230 if (!ret) 6231 pr_info("Ring buffer PASSED!\n"); 6232 6233 ring_buffer_free(buffer); 6234 return 0; 6235 } 6236 6237 late_initcall(test_ringbuffer); 6238 #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */ 6239