1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Generic ring buffer 4 * 5 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> 6 */ 7 #include <linux/trace_recursion.h> 8 #include <linux/trace_events.h> 9 #include <linux/ring_buffer.h> 10 #include <linux/trace_clock.h> 11 #include <linux/sched/clock.h> 12 #include <linux/trace_seq.h> 13 #include <linux/spinlock.h> 14 #include <linux/irq_work.h> 15 #include <linux/security.h> 16 #include <linux/uaccess.h> 17 #include <linux/hardirq.h> 18 #include <linux/kthread.h> /* for self test */ 19 #include <linux/module.h> 20 #include <linux/percpu.h> 21 #include <linux/mutex.h> 22 #include <linux/delay.h> 23 #include <linux/slab.h> 24 #include <linux/init.h> 25 #include <linux/hash.h> 26 #include <linux/list.h> 27 #include <linux/cpu.h> 28 #include <linux/oom.h> 29 30 #include <asm/local.h> 31 32 /* 33 * The "absolute" timestamp in the buffer is only 59 bits. 34 * If a clock has the 5 MSBs set, it needs to be saved and 35 * reinserted. 36 */ 37 #define TS_MSB (0xf8ULL << 56) 38 #define ABS_TS_MASK (~TS_MSB) 39 40 static void update_pages_handler(struct work_struct *work); 41 42 /* 43 * The ring buffer header is special. We must manually up keep it. 44 */ 45 int ring_buffer_print_entry_header(struct trace_seq *s) 46 { 47 trace_seq_puts(s, "# compressed entry header\n"); 48 trace_seq_puts(s, "\ttype_len : 5 bits\n"); 49 trace_seq_puts(s, "\ttime_delta : 27 bits\n"); 50 trace_seq_puts(s, "\tarray : 32 bits\n"); 51 trace_seq_putc(s, '\n'); 52 trace_seq_printf(s, "\tpadding : type == %d\n", 53 RINGBUF_TYPE_PADDING); 54 trace_seq_printf(s, "\ttime_extend : type == %d\n", 55 RINGBUF_TYPE_TIME_EXTEND); 56 trace_seq_printf(s, "\ttime_stamp : type == %d\n", 57 RINGBUF_TYPE_TIME_STAMP); 58 trace_seq_printf(s, "\tdata max type_len == %d\n", 59 RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 60 61 return !trace_seq_has_overflowed(s); 62 } 63 64 /* 65 * The ring buffer is made up of a list of pages. A separate list of pages is 66 * allocated for each CPU. A writer may only write to a buffer that is 67 * associated with the CPU it is currently executing on. A reader may read 68 * from any per cpu buffer. 69 * 70 * The reader is special. For each per cpu buffer, the reader has its own 71 * reader page. When a reader has read the entire reader page, this reader 72 * page is swapped with another page in the ring buffer. 73 * 74 * Now, as long as the writer is off the reader page, the reader can do what 75 * ever it wants with that page. The writer will never write to that page 76 * again (as long as it is out of the ring buffer). 77 * 78 * Here's some silly ASCII art. 79 * 80 * +------+ 81 * |reader| RING BUFFER 82 * |page | 83 * +------+ +---+ +---+ +---+ 84 * | |-->| |-->| | 85 * +---+ +---+ +---+ 86 * ^ | 87 * | | 88 * +---------------+ 89 * 90 * 91 * +------+ 92 * |reader| RING BUFFER 93 * |page |------------------v 94 * +------+ +---+ +---+ +---+ 95 * | |-->| |-->| | 96 * +---+ +---+ +---+ 97 * ^ | 98 * | | 99 * +---------------+ 100 * 101 * 102 * +------+ 103 * |reader| RING BUFFER 104 * |page |------------------v 105 * +------+ +---+ +---+ +---+ 106 * ^ | |-->| |-->| | 107 * | +---+ +---+ +---+ 108 * | | 109 * | | 110 * +------------------------------+ 111 * 112 * 113 * +------+ 114 * |buffer| RING BUFFER 115 * |page |------------------v 116 * +------+ +---+ +---+ +---+ 117 * ^ | | | |-->| | 118 * | New +---+ +---+ +---+ 119 * | Reader------^ | 120 * | page | 121 * +------------------------------+ 122 * 123 * 124 * After we make this swap, the reader can hand this page off to the splice 125 * code and be done with it. It can even allocate a new page if it needs to 126 * and swap that into the ring buffer. 127 * 128 * We will be using cmpxchg soon to make all this lockless. 129 * 130 */ 131 132 /* Used for individual buffers (after the counter) */ 133 #define RB_BUFFER_OFF (1 << 20) 134 135 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) 136 137 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) 138 #define RB_ALIGNMENT 4U 139 #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 140 #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ 141 142 #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS 143 # define RB_FORCE_8BYTE_ALIGNMENT 0 144 # define RB_ARCH_ALIGNMENT RB_ALIGNMENT 145 #else 146 # define RB_FORCE_8BYTE_ALIGNMENT 1 147 # define RB_ARCH_ALIGNMENT 8U 148 #endif 149 150 #define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT) 151 152 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ 153 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX 154 155 enum { 156 RB_LEN_TIME_EXTEND = 8, 157 RB_LEN_TIME_STAMP = 8, 158 }; 159 160 #define skip_time_extend(event) \ 161 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND)) 162 163 #define extended_time(event) \ 164 (event->type_len >= RINGBUF_TYPE_TIME_EXTEND) 165 166 static inline bool rb_null_event(struct ring_buffer_event *event) 167 { 168 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; 169 } 170 171 static void rb_event_set_padding(struct ring_buffer_event *event) 172 { 173 /* padding has a NULL time_delta */ 174 event->type_len = RINGBUF_TYPE_PADDING; 175 event->time_delta = 0; 176 } 177 178 static unsigned 179 rb_event_data_length(struct ring_buffer_event *event) 180 { 181 unsigned length; 182 183 if (event->type_len) 184 length = event->type_len * RB_ALIGNMENT; 185 else 186 length = event->array[0]; 187 return length + RB_EVNT_HDR_SIZE; 188 } 189 190 /* 191 * Return the length of the given event. Will return 192 * the length of the time extend if the event is a 193 * time extend. 194 */ 195 static inline unsigned 196 rb_event_length(struct ring_buffer_event *event) 197 { 198 switch (event->type_len) { 199 case RINGBUF_TYPE_PADDING: 200 if (rb_null_event(event)) 201 /* undefined */ 202 return -1; 203 return event->array[0] + RB_EVNT_HDR_SIZE; 204 205 case RINGBUF_TYPE_TIME_EXTEND: 206 return RB_LEN_TIME_EXTEND; 207 208 case RINGBUF_TYPE_TIME_STAMP: 209 return RB_LEN_TIME_STAMP; 210 211 case RINGBUF_TYPE_DATA: 212 return rb_event_data_length(event); 213 default: 214 WARN_ON_ONCE(1); 215 } 216 /* not hit */ 217 return 0; 218 } 219 220 /* 221 * Return total length of time extend and data, 222 * or just the event length for all other events. 223 */ 224 static inline unsigned 225 rb_event_ts_length(struct ring_buffer_event *event) 226 { 227 unsigned len = 0; 228 229 if (extended_time(event)) { 230 /* time extends include the data event after it */ 231 len = RB_LEN_TIME_EXTEND; 232 event = skip_time_extend(event); 233 } 234 return len + rb_event_length(event); 235 } 236 237 /** 238 * ring_buffer_event_length - return the length of the event 239 * @event: the event to get the length of 240 * 241 * Returns the size of the data load of a data event. 242 * If the event is something other than a data event, it 243 * returns the size of the event itself. With the exception 244 * of a TIME EXTEND, where it still returns the size of the 245 * data load of the data event after it. 246 */ 247 unsigned ring_buffer_event_length(struct ring_buffer_event *event) 248 { 249 unsigned length; 250 251 if (extended_time(event)) 252 event = skip_time_extend(event); 253 254 length = rb_event_length(event); 255 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 256 return length; 257 length -= RB_EVNT_HDR_SIZE; 258 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) 259 length -= sizeof(event->array[0]); 260 return length; 261 } 262 EXPORT_SYMBOL_GPL(ring_buffer_event_length); 263 264 /* inline for ring buffer fast paths */ 265 static __always_inline void * 266 rb_event_data(struct ring_buffer_event *event) 267 { 268 if (extended_time(event)) 269 event = skip_time_extend(event); 270 WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 271 /* If length is in len field, then array[0] has the data */ 272 if (event->type_len) 273 return (void *)&event->array[0]; 274 /* Otherwise length is in array[0] and array[1] has the data */ 275 return (void *)&event->array[1]; 276 } 277 278 /** 279 * ring_buffer_event_data - return the data of the event 280 * @event: the event to get the data from 281 */ 282 void *ring_buffer_event_data(struct ring_buffer_event *event) 283 { 284 return rb_event_data(event); 285 } 286 EXPORT_SYMBOL_GPL(ring_buffer_event_data); 287 288 #define for_each_buffer_cpu(buffer, cpu) \ 289 for_each_cpu(cpu, buffer->cpumask) 290 291 #define for_each_online_buffer_cpu(buffer, cpu) \ 292 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask) 293 294 #define TS_SHIFT 27 295 #define TS_MASK ((1ULL << TS_SHIFT) - 1) 296 #define TS_DELTA_TEST (~TS_MASK) 297 298 static u64 rb_event_time_stamp(struct ring_buffer_event *event) 299 { 300 u64 ts; 301 302 ts = event->array[0]; 303 ts <<= TS_SHIFT; 304 ts += event->time_delta; 305 306 return ts; 307 } 308 309 /* Flag when events were overwritten */ 310 #define RB_MISSED_EVENTS (1 << 31) 311 /* Missed count stored at end */ 312 #define RB_MISSED_STORED (1 << 30) 313 314 struct buffer_data_page { 315 u64 time_stamp; /* page time stamp */ 316 local_t commit; /* write committed index */ 317 unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */ 318 }; 319 320 /* 321 * Note, the buffer_page list must be first. The buffer pages 322 * are allocated in cache lines, which means that each buffer 323 * page will be at the beginning of a cache line, and thus 324 * the least significant bits will be zero. We use this to 325 * add flags in the list struct pointers, to make the ring buffer 326 * lockless. 327 */ 328 struct buffer_page { 329 struct list_head list; /* list of buffer pages */ 330 local_t write; /* index for next write */ 331 unsigned read; /* index for next read */ 332 local_t entries; /* entries on this page */ 333 unsigned long real_end; /* real end of data */ 334 struct buffer_data_page *page; /* Actual data page */ 335 }; 336 337 /* 338 * The buffer page counters, write and entries, must be reset 339 * atomically when crossing page boundaries. To synchronize this 340 * update, two counters are inserted into the number. One is 341 * the actual counter for the write position or count on the page. 342 * 343 * The other is a counter of updaters. Before an update happens 344 * the update partition of the counter is incremented. This will 345 * allow the updater to update the counter atomically. 346 * 347 * The counter is 20 bits, and the state data is 12. 348 */ 349 #define RB_WRITE_MASK 0xfffff 350 #define RB_WRITE_INTCNT (1 << 20) 351 352 static void rb_init_page(struct buffer_data_page *bpage) 353 { 354 local_set(&bpage->commit, 0); 355 } 356 357 static void free_buffer_page(struct buffer_page *bpage) 358 { 359 free_page((unsigned long)bpage->page); 360 kfree(bpage); 361 } 362 363 /* 364 * We need to fit the time_stamp delta into 27 bits. 365 */ 366 static inline bool test_time_stamp(u64 delta) 367 { 368 return !!(delta & TS_DELTA_TEST); 369 } 370 371 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE) 372 373 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */ 374 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) 375 376 int ring_buffer_print_page_header(struct trace_seq *s) 377 { 378 struct buffer_data_page field; 379 380 trace_seq_printf(s, "\tfield: u64 timestamp;\t" 381 "offset:0;\tsize:%u;\tsigned:%u;\n", 382 (unsigned int)sizeof(field.time_stamp), 383 (unsigned int)is_signed_type(u64)); 384 385 trace_seq_printf(s, "\tfield: local_t commit;\t" 386 "offset:%u;\tsize:%u;\tsigned:%u;\n", 387 (unsigned int)offsetof(typeof(field), commit), 388 (unsigned int)sizeof(field.commit), 389 (unsigned int)is_signed_type(long)); 390 391 trace_seq_printf(s, "\tfield: int overwrite;\t" 392 "offset:%u;\tsize:%u;\tsigned:%u;\n", 393 (unsigned int)offsetof(typeof(field), commit), 394 1, 395 (unsigned int)is_signed_type(long)); 396 397 trace_seq_printf(s, "\tfield: char data;\t" 398 "offset:%u;\tsize:%u;\tsigned:%u;\n", 399 (unsigned int)offsetof(typeof(field), data), 400 (unsigned int)BUF_PAGE_SIZE, 401 (unsigned int)is_signed_type(char)); 402 403 return !trace_seq_has_overflowed(s); 404 } 405 406 struct rb_irq_work { 407 struct irq_work work; 408 wait_queue_head_t waiters; 409 wait_queue_head_t full_waiters; 410 long wait_index; 411 bool waiters_pending; 412 bool full_waiters_pending; 413 bool wakeup_full; 414 }; 415 416 /* 417 * Structure to hold event state and handle nested events. 418 */ 419 struct rb_event_info { 420 u64 ts; 421 u64 delta; 422 u64 before; 423 u64 after; 424 unsigned long length; 425 struct buffer_page *tail_page; 426 int add_timestamp; 427 }; 428 429 /* 430 * Used for the add_timestamp 431 * NONE 432 * EXTEND - wants a time extend 433 * ABSOLUTE - the buffer requests all events to have absolute time stamps 434 * FORCE - force a full time stamp. 435 */ 436 enum { 437 RB_ADD_STAMP_NONE = 0, 438 RB_ADD_STAMP_EXTEND = BIT(1), 439 RB_ADD_STAMP_ABSOLUTE = BIT(2), 440 RB_ADD_STAMP_FORCE = BIT(3) 441 }; 442 /* 443 * Used for which event context the event is in. 444 * TRANSITION = 0 445 * NMI = 1 446 * IRQ = 2 447 * SOFTIRQ = 3 448 * NORMAL = 4 449 * 450 * See trace_recursive_lock() comment below for more details. 451 */ 452 enum { 453 RB_CTX_TRANSITION, 454 RB_CTX_NMI, 455 RB_CTX_IRQ, 456 RB_CTX_SOFTIRQ, 457 RB_CTX_NORMAL, 458 RB_CTX_MAX 459 }; 460 461 #if BITS_PER_LONG == 32 462 #define RB_TIME_32 463 #endif 464 465 /* To test on 64 bit machines */ 466 //#define RB_TIME_32 467 468 #ifdef RB_TIME_32 469 470 struct rb_time_struct { 471 local_t cnt; 472 local_t top; 473 local_t bottom; 474 local_t msb; 475 }; 476 #else 477 #include <asm/local64.h> 478 struct rb_time_struct { 479 local64_t time; 480 }; 481 #endif 482 typedef struct rb_time_struct rb_time_t; 483 484 #define MAX_NEST 5 485 486 /* 487 * head_page == tail_page && head == tail then buffer is empty. 488 */ 489 struct ring_buffer_per_cpu { 490 int cpu; 491 atomic_t record_disabled; 492 atomic_t resize_disabled; 493 struct trace_buffer *buffer; 494 raw_spinlock_t reader_lock; /* serialize readers */ 495 arch_spinlock_t lock; 496 struct lock_class_key lock_key; 497 struct buffer_data_page *free_page; 498 unsigned long nr_pages; 499 unsigned int current_context; 500 struct list_head *pages; 501 struct buffer_page *head_page; /* read from head */ 502 struct buffer_page *tail_page; /* write to tail */ 503 struct buffer_page *commit_page; /* committed pages */ 504 struct buffer_page *reader_page; 505 unsigned long lost_events; 506 unsigned long last_overrun; 507 unsigned long nest; 508 local_t entries_bytes; 509 local_t entries; 510 local_t overrun; 511 local_t commit_overrun; 512 local_t dropped_events; 513 local_t committing; 514 local_t commits; 515 local_t pages_touched; 516 local_t pages_lost; 517 local_t pages_read; 518 long last_pages_touch; 519 size_t shortest_full; 520 unsigned long read; 521 unsigned long read_bytes; 522 rb_time_t write_stamp; 523 rb_time_t before_stamp; 524 u64 event_stamp[MAX_NEST]; 525 u64 read_stamp; 526 /* pages removed since last reset */ 527 unsigned long pages_removed; 528 /* ring buffer pages to update, > 0 to add, < 0 to remove */ 529 long nr_pages_to_update; 530 struct list_head new_pages; /* new pages to add */ 531 struct work_struct update_pages_work; 532 struct completion update_done; 533 534 struct rb_irq_work irq_work; 535 }; 536 537 struct trace_buffer { 538 unsigned flags; 539 int cpus; 540 atomic_t record_disabled; 541 atomic_t resizing; 542 cpumask_var_t cpumask; 543 544 struct lock_class_key *reader_lock_key; 545 546 struct mutex mutex; 547 548 struct ring_buffer_per_cpu **buffers; 549 550 struct hlist_node node; 551 u64 (*clock)(void); 552 553 struct rb_irq_work irq_work; 554 bool time_stamp_abs; 555 }; 556 557 struct ring_buffer_iter { 558 struct ring_buffer_per_cpu *cpu_buffer; 559 unsigned long head; 560 unsigned long next_event; 561 struct buffer_page *head_page; 562 struct buffer_page *cache_reader_page; 563 unsigned long cache_read; 564 unsigned long cache_pages_removed; 565 u64 read_stamp; 566 u64 page_stamp; 567 struct ring_buffer_event *event; 568 int missed_events; 569 }; 570 571 #ifdef RB_TIME_32 572 573 /* 574 * On 32 bit machines, local64_t is very expensive. As the ring 575 * buffer doesn't need all the features of a true 64 bit atomic, 576 * on 32 bit, it uses these functions (64 still uses local64_t). 577 * 578 * For the ring buffer, 64 bit required operations for the time is 579 * the following: 580 * 581 * - Reads may fail if it interrupted a modification of the time stamp. 582 * It will succeed if it did not interrupt another write even if 583 * the read itself is interrupted by a write. 584 * It returns whether it was successful or not. 585 * 586 * - Writes always succeed and will overwrite other writes and writes 587 * that were done by events interrupting the current write. 588 * 589 * - A write followed by a read of the same time stamp will always succeed, 590 * but may not contain the same value. 591 * 592 * - A cmpxchg will fail if it interrupted another write or cmpxchg. 593 * Other than that, it acts like a normal cmpxchg. 594 * 595 * The 60 bit time stamp is broken up by 30 bits in a top and bottom half 596 * (bottom being the least significant 30 bits of the 60 bit time stamp). 597 * 598 * The two most significant bits of each half holds a 2 bit counter (0-3). 599 * Each update will increment this counter by one. 600 * When reading the top and bottom, if the two counter bits match then the 601 * top and bottom together make a valid 60 bit number. 602 */ 603 #define RB_TIME_SHIFT 30 604 #define RB_TIME_VAL_MASK ((1 << RB_TIME_SHIFT) - 1) 605 #define RB_TIME_MSB_SHIFT 60 606 607 static inline int rb_time_cnt(unsigned long val) 608 { 609 return (val >> RB_TIME_SHIFT) & 3; 610 } 611 612 static inline u64 rb_time_val(unsigned long top, unsigned long bottom) 613 { 614 u64 val; 615 616 val = top & RB_TIME_VAL_MASK; 617 val <<= RB_TIME_SHIFT; 618 val |= bottom & RB_TIME_VAL_MASK; 619 620 return val; 621 } 622 623 static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt) 624 { 625 unsigned long top, bottom, msb; 626 unsigned long c; 627 628 /* 629 * If the read is interrupted by a write, then the cnt will 630 * be different. Loop until both top and bottom have been read 631 * without interruption. 632 */ 633 do { 634 c = local_read(&t->cnt); 635 top = local_read(&t->top); 636 bottom = local_read(&t->bottom); 637 msb = local_read(&t->msb); 638 } while (c != local_read(&t->cnt)); 639 640 *cnt = rb_time_cnt(top); 641 642 /* If top and bottom counts don't match, this interrupted a write */ 643 if (*cnt != rb_time_cnt(bottom)) 644 return false; 645 646 /* The shift to msb will lose its cnt bits */ 647 *ret = rb_time_val(top, bottom) | ((u64)msb << RB_TIME_MSB_SHIFT); 648 return true; 649 } 650 651 static bool rb_time_read(rb_time_t *t, u64 *ret) 652 { 653 unsigned long cnt; 654 655 return __rb_time_read(t, ret, &cnt); 656 } 657 658 static inline unsigned long rb_time_val_cnt(unsigned long val, unsigned long cnt) 659 { 660 return (val & RB_TIME_VAL_MASK) | ((cnt & 3) << RB_TIME_SHIFT); 661 } 662 663 static inline void rb_time_split(u64 val, unsigned long *top, unsigned long *bottom, 664 unsigned long *msb) 665 { 666 *top = (unsigned long)((val >> RB_TIME_SHIFT) & RB_TIME_VAL_MASK); 667 *bottom = (unsigned long)(val & RB_TIME_VAL_MASK); 668 *msb = (unsigned long)(val >> RB_TIME_MSB_SHIFT); 669 } 670 671 static inline void rb_time_val_set(local_t *t, unsigned long val, unsigned long cnt) 672 { 673 val = rb_time_val_cnt(val, cnt); 674 local_set(t, val); 675 } 676 677 static void rb_time_set(rb_time_t *t, u64 val) 678 { 679 unsigned long cnt, top, bottom, msb; 680 681 rb_time_split(val, &top, &bottom, &msb); 682 683 /* Writes always succeed with a valid number even if it gets interrupted. */ 684 do { 685 cnt = local_inc_return(&t->cnt); 686 rb_time_val_set(&t->top, top, cnt); 687 rb_time_val_set(&t->bottom, bottom, cnt); 688 rb_time_val_set(&t->msb, val >> RB_TIME_MSB_SHIFT, cnt); 689 } while (cnt != local_read(&t->cnt)); 690 } 691 692 static inline bool 693 rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set) 694 { 695 return local_try_cmpxchg(l, &expect, set); 696 } 697 698 static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set) 699 { 700 unsigned long cnt, top, bottom, msb; 701 unsigned long cnt2, top2, bottom2, msb2; 702 u64 val; 703 704 /* The cmpxchg always fails if it interrupted an update */ 705 if (!__rb_time_read(t, &val, &cnt2)) 706 return false; 707 708 if (val != expect) 709 return false; 710 711 cnt = local_read(&t->cnt); 712 if ((cnt & 3) != cnt2) 713 return false; 714 715 cnt2 = cnt + 1; 716 717 rb_time_split(val, &top, &bottom, &msb); 718 top = rb_time_val_cnt(top, cnt); 719 bottom = rb_time_val_cnt(bottom, cnt); 720 721 rb_time_split(set, &top2, &bottom2, &msb2); 722 top2 = rb_time_val_cnt(top2, cnt2); 723 bottom2 = rb_time_val_cnt(bottom2, cnt2); 724 725 if (!rb_time_read_cmpxchg(&t->cnt, cnt, cnt2)) 726 return false; 727 if (!rb_time_read_cmpxchg(&t->msb, msb, msb2)) 728 return false; 729 if (!rb_time_read_cmpxchg(&t->top, top, top2)) 730 return false; 731 if (!rb_time_read_cmpxchg(&t->bottom, bottom, bottom2)) 732 return false; 733 return true; 734 } 735 736 #else /* 64 bits */ 737 738 /* local64_t always succeeds */ 739 740 static inline bool rb_time_read(rb_time_t *t, u64 *ret) 741 { 742 *ret = local64_read(&t->time); 743 return true; 744 } 745 static void rb_time_set(rb_time_t *t, u64 val) 746 { 747 local64_set(&t->time, val); 748 } 749 750 static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set) 751 { 752 return local64_try_cmpxchg(&t->time, &expect, set); 753 } 754 #endif 755 756 /* 757 * Enable this to make sure that the event passed to 758 * ring_buffer_event_time_stamp() is not committed and also 759 * is on the buffer that it passed in. 760 */ 761 //#define RB_VERIFY_EVENT 762 #ifdef RB_VERIFY_EVENT 763 static struct list_head *rb_list_head(struct list_head *list); 764 static void verify_event(struct ring_buffer_per_cpu *cpu_buffer, 765 void *event) 766 { 767 struct buffer_page *page = cpu_buffer->commit_page; 768 struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page); 769 struct list_head *next; 770 long commit, write; 771 unsigned long addr = (unsigned long)event; 772 bool done = false; 773 int stop = 0; 774 775 /* Make sure the event exists and is not committed yet */ 776 do { 777 if (page == tail_page || WARN_ON_ONCE(stop++ > 100)) 778 done = true; 779 commit = local_read(&page->page->commit); 780 write = local_read(&page->write); 781 if (addr >= (unsigned long)&page->page->data[commit] && 782 addr < (unsigned long)&page->page->data[write]) 783 return; 784 785 next = rb_list_head(page->list.next); 786 page = list_entry(next, struct buffer_page, list); 787 } while (!done); 788 WARN_ON_ONCE(1); 789 } 790 #else 791 static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer, 792 void *event) 793 { 794 } 795 #endif 796 797 /* 798 * The absolute time stamp drops the 5 MSBs and some clocks may 799 * require them. The rb_fix_abs_ts() will take a previous full 800 * time stamp, and add the 5 MSB of that time stamp on to the 801 * saved absolute time stamp. Then they are compared in case of 802 * the unlikely event that the latest time stamp incremented 803 * the 5 MSB. 804 */ 805 static inline u64 rb_fix_abs_ts(u64 abs, u64 save_ts) 806 { 807 if (save_ts & TS_MSB) { 808 abs |= save_ts & TS_MSB; 809 /* Check for overflow */ 810 if (unlikely(abs < save_ts)) 811 abs += 1ULL << 59; 812 } 813 return abs; 814 } 815 816 static inline u64 rb_time_stamp(struct trace_buffer *buffer); 817 818 /** 819 * ring_buffer_event_time_stamp - return the event's current time stamp 820 * @buffer: The buffer that the event is on 821 * @event: the event to get the time stamp of 822 * 823 * Note, this must be called after @event is reserved, and before it is 824 * committed to the ring buffer. And must be called from the same 825 * context where the event was reserved (normal, softirq, irq, etc). 826 * 827 * Returns the time stamp associated with the current event. 828 * If the event has an extended time stamp, then that is used as 829 * the time stamp to return. 830 * In the highly unlikely case that the event was nested more than 831 * the max nesting, then the write_stamp of the buffer is returned, 832 * otherwise current time is returned, but that really neither of 833 * the last two cases should ever happen. 834 */ 835 u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer, 836 struct ring_buffer_event *event) 837 { 838 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; 839 unsigned int nest; 840 u64 ts; 841 842 /* If the event includes an absolute time, then just use that */ 843 if (event->type_len == RINGBUF_TYPE_TIME_STAMP) { 844 ts = rb_event_time_stamp(event); 845 return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp); 846 } 847 848 nest = local_read(&cpu_buffer->committing); 849 verify_event(cpu_buffer, event); 850 if (WARN_ON_ONCE(!nest)) 851 goto fail; 852 853 /* Read the current saved nesting level time stamp */ 854 if (likely(--nest < MAX_NEST)) 855 return cpu_buffer->event_stamp[nest]; 856 857 /* Shouldn't happen, warn if it does */ 858 WARN_ONCE(1, "nest (%d) greater than max", nest); 859 860 fail: 861 /* Can only fail on 32 bit */ 862 if (!rb_time_read(&cpu_buffer->write_stamp, &ts)) 863 /* Screw it, just read the current time */ 864 ts = rb_time_stamp(cpu_buffer->buffer); 865 866 return ts; 867 } 868 869 /** 870 * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer 871 * @buffer: The ring_buffer to get the number of pages from 872 * @cpu: The cpu of the ring_buffer to get the number of pages from 873 * 874 * Returns the number of pages used by a per_cpu buffer of the ring buffer. 875 */ 876 size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu) 877 { 878 return buffer->buffers[cpu]->nr_pages; 879 } 880 881 /** 882 * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer 883 * @buffer: The ring_buffer to get the number of pages from 884 * @cpu: The cpu of the ring_buffer to get the number of pages from 885 * 886 * Returns the number of pages that have content in the ring buffer. 887 */ 888 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) 889 { 890 size_t read; 891 size_t lost; 892 size_t cnt; 893 894 read = local_read(&buffer->buffers[cpu]->pages_read); 895 lost = local_read(&buffer->buffers[cpu]->pages_lost); 896 cnt = local_read(&buffer->buffers[cpu]->pages_touched); 897 898 if (WARN_ON_ONCE(cnt < lost)) 899 return 0; 900 901 cnt -= lost; 902 903 /* The reader can read an empty page, but not more than that */ 904 if (cnt < read) { 905 WARN_ON_ONCE(read > cnt + 1); 906 return 0; 907 } 908 909 return cnt - read; 910 } 911 912 static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full) 913 { 914 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 915 size_t nr_pages; 916 size_t dirty; 917 918 nr_pages = cpu_buffer->nr_pages; 919 if (!nr_pages || !full) 920 return true; 921 922 dirty = ring_buffer_nr_dirty_pages(buffer, cpu); 923 924 return (dirty * 100) > (full * nr_pages); 925 } 926 927 /* 928 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input 929 * 930 * Schedules a delayed work to wake up any task that is blocked on the 931 * ring buffer waiters queue. 932 */ 933 static void rb_wake_up_waiters(struct irq_work *work) 934 { 935 struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work); 936 937 wake_up_all(&rbwork->waiters); 938 if (rbwork->full_waiters_pending || rbwork->wakeup_full) { 939 rbwork->wakeup_full = false; 940 rbwork->full_waiters_pending = false; 941 wake_up_all(&rbwork->full_waiters); 942 } 943 } 944 945 /** 946 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer 947 * @buffer: The ring buffer to wake waiters on 948 * @cpu: The CPU buffer to wake waiters on 949 * 950 * In the case of a file that represents a ring buffer is closing, 951 * it is prudent to wake up any waiters that are on this. 952 */ 953 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu) 954 { 955 struct ring_buffer_per_cpu *cpu_buffer; 956 struct rb_irq_work *rbwork; 957 958 if (!buffer) 959 return; 960 961 if (cpu == RING_BUFFER_ALL_CPUS) { 962 963 /* Wake up individual ones too. One level recursion */ 964 for_each_buffer_cpu(buffer, cpu) 965 ring_buffer_wake_waiters(buffer, cpu); 966 967 rbwork = &buffer->irq_work; 968 } else { 969 if (WARN_ON_ONCE(!buffer->buffers)) 970 return; 971 if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) 972 return; 973 974 cpu_buffer = buffer->buffers[cpu]; 975 /* The CPU buffer may not have been initialized yet */ 976 if (!cpu_buffer) 977 return; 978 rbwork = &cpu_buffer->irq_work; 979 } 980 981 rbwork->wait_index++; 982 /* make sure the waiters see the new index */ 983 smp_wmb(); 984 985 rb_wake_up_waiters(&rbwork->work); 986 } 987 988 /** 989 * ring_buffer_wait - wait for input to the ring buffer 990 * @buffer: buffer to wait on 991 * @cpu: the cpu buffer to wait on 992 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS 993 * 994 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 995 * as data is added to any of the @buffer's cpu buffers. Otherwise 996 * it will wait for data to be added to a specific cpu buffer. 997 */ 998 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) 999 { 1000 struct ring_buffer_per_cpu *cpu_buffer; 1001 DEFINE_WAIT(wait); 1002 struct rb_irq_work *work; 1003 long wait_index; 1004 int ret = 0; 1005 1006 /* 1007 * Depending on what the caller is waiting for, either any 1008 * data in any cpu buffer, or a specific buffer, put the 1009 * caller on the appropriate wait queue. 1010 */ 1011 if (cpu == RING_BUFFER_ALL_CPUS) { 1012 work = &buffer->irq_work; 1013 /* Full only makes sense on per cpu reads */ 1014 full = 0; 1015 } else { 1016 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1017 return -ENODEV; 1018 cpu_buffer = buffer->buffers[cpu]; 1019 work = &cpu_buffer->irq_work; 1020 } 1021 1022 wait_index = READ_ONCE(work->wait_index); 1023 1024 while (true) { 1025 if (full) 1026 prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE); 1027 else 1028 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); 1029 1030 /* 1031 * The events can happen in critical sections where 1032 * checking a work queue can cause deadlocks. 1033 * After adding a task to the queue, this flag is set 1034 * only to notify events to try to wake up the queue 1035 * using irq_work. 1036 * 1037 * We don't clear it even if the buffer is no longer 1038 * empty. The flag only causes the next event to run 1039 * irq_work to do the work queue wake up. The worse 1040 * that can happen if we race with !trace_empty() is that 1041 * an event will cause an irq_work to try to wake up 1042 * an empty queue. 1043 * 1044 * There's no reason to protect this flag either, as 1045 * the work queue and irq_work logic will do the necessary 1046 * synchronization for the wake ups. The only thing 1047 * that is necessary is that the wake up happens after 1048 * a task has been queued. It's OK for spurious wake ups. 1049 */ 1050 if (full) 1051 work->full_waiters_pending = true; 1052 else 1053 work->waiters_pending = true; 1054 1055 if (signal_pending(current)) { 1056 ret = -EINTR; 1057 break; 1058 } 1059 1060 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) 1061 break; 1062 1063 if (cpu != RING_BUFFER_ALL_CPUS && 1064 !ring_buffer_empty_cpu(buffer, cpu)) { 1065 unsigned long flags; 1066 bool pagebusy; 1067 bool done; 1068 1069 if (!full) 1070 break; 1071 1072 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 1073 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; 1074 done = !pagebusy && full_hit(buffer, cpu, full); 1075 1076 if (!cpu_buffer->shortest_full || 1077 cpu_buffer->shortest_full > full) 1078 cpu_buffer->shortest_full = full; 1079 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 1080 if (done) 1081 break; 1082 } 1083 1084 schedule(); 1085 1086 /* Make sure to see the new wait index */ 1087 smp_rmb(); 1088 if (wait_index != work->wait_index) 1089 break; 1090 } 1091 1092 if (full) 1093 finish_wait(&work->full_waiters, &wait); 1094 else 1095 finish_wait(&work->waiters, &wait); 1096 1097 return ret; 1098 } 1099 1100 /** 1101 * ring_buffer_poll_wait - poll on buffer input 1102 * @buffer: buffer to wait on 1103 * @cpu: the cpu buffer to wait on 1104 * @filp: the file descriptor 1105 * @poll_table: The poll descriptor 1106 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS 1107 * 1108 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 1109 * as data is added to any of the @buffer's cpu buffers. Otherwise 1110 * it will wait for data to be added to a specific cpu buffer. 1111 * 1112 * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers, 1113 * zero otherwise. 1114 */ 1115 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, 1116 struct file *filp, poll_table *poll_table, int full) 1117 { 1118 struct ring_buffer_per_cpu *cpu_buffer; 1119 struct rb_irq_work *work; 1120 1121 if (cpu == RING_BUFFER_ALL_CPUS) { 1122 work = &buffer->irq_work; 1123 full = 0; 1124 } else { 1125 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1126 return -EINVAL; 1127 1128 cpu_buffer = buffer->buffers[cpu]; 1129 work = &cpu_buffer->irq_work; 1130 } 1131 1132 if (full) { 1133 poll_wait(filp, &work->full_waiters, poll_table); 1134 work->full_waiters_pending = true; 1135 } else { 1136 poll_wait(filp, &work->waiters, poll_table); 1137 work->waiters_pending = true; 1138 } 1139 1140 /* 1141 * There's a tight race between setting the waiters_pending and 1142 * checking if the ring buffer is empty. Once the waiters_pending bit 1143 * is set, the next event will wake the task up, but we can get stuck 1144 * if there's only a single event in. 1145 * 1146 * FIXME: Ideally, we need a memory barrier on the writer side as well, 1147 * but adding a memory barrier to all events will cause too much of a 1148 * performance hit in the fast path. We only need a memory barrier when 1149 * the buffer goes from empty to having content. But as this race is 1150 * extremely small, and it's not a problem if another event comes in, we 1151 * will fix it later. 1152 */ 1153 smp_mb(); 1154 1155 if (full) 1156 return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0; 1157 1158 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || 1159 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) 1160 return EPOLLIN | EPOLLRDNORM; 1161 return 0; 1162 } 1163 1164 /* buffer may be either ring_buffer or ring_buffer_per_cpu */ 1165 #define RB_WARN_ON(b, cond) \ 1166 ({ \ 1167 int _____ret = unlikely(cond); \ 1168 if (_____ret) { \ 1169 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \ 1170 struct ring_buffer_per_cpu *__b = \ 1171 (void *)b; \ 1172 atomic_inc(&__b->buffer->record_disabled); \ 1173 } else \ 1174 atomic_inc(&b->record_disabled); \ 1175 WARN_ON(1); \ 1176 } \ 1177 _____ret; \ 1178 }) 1179 1180 /* Up this if you want to test the TIME_EXTENTS and normalization */ 1181 #define DEBUG_SHIFT 0 1182 1183 static inline u64 rb_time_stamp(struct trace_buffer *buffer) 1184 { 1185 u64 ts; 1186 1187 /* Skip retpolines :-( */ 1188 if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local)) 1189 ts = trace_clock_local(); 1190 else 1191 ts = buffer->clock(); 1192 1193 /* shift to debug/test normalization and TIME_EXTENTS */ 1194 return ts << DEBUG_SHIFT; 1195 } 1196 1197 u64 ring_buffer_time_stamp(struct trace_buffer *buffer) 1198 { 1199 u64 time; 1200 1201 preempt_disable_notrace(); 1202 time = rb_time_stamp(buffer); 1203 preempt_enable_notrace(); 1204 1205 return time; 1206 } 1207 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); 1208 1209 void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer, 1210 int cpu, u64 *ts) 1211 { 1212 /* Just stupid testing the normalize function and deltas */ 1213 *ts >>= DEBUG_SHIFT; 1214 } 1215 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); 1216 1217 /* 1218 * Making the ring buffer lockless makes things tricky. 1219 * Although writes only happen on the CPU that they are on, 1220 * and they only need to worry about interrupts. Reads can 1221 * happen on any CPU. 1222 * 1223 * The reader page is always off the ring buffer, but when the 1224 * reader finishes with a page, it needs to swap its page with 1225 * a new one from the buffer. The reader needs to take from 1226 * the head (writes go to the tail). But if a writer is in overwrite 1227 * mode and wraps, it must push the head page forward. 1228 * 1229 * Here lies the problem. 1230 * 1231 * The reader must be careful to replace only the head page, and 1232 * not another one. As described at the top of the file in the 1233 * ASCII art, the reader sets its old page to point to the next 1234 * page after head. It then sets the page after head to point to 1235 * the old reader page. But if the writer moves the head page 1236 * during this operation, the reader could end up with the tail. 1237 * 1238 * We use cmpxchg to help prevent this race. We also do something 1239 * special with the page before head. We set the LSB to 1. 1240 * 1241 * When the writer must push the page forward, it will clear the 1242 * bit that points to the head page, move the head, and then set 1243 * the bit that points to the new head page. 1244 * 1245 * We also don't want an interrupt coming in and moving the head 1246 * page on another writer. Thus we use the second LSB to catch 1247 * that too. Thus: 1248 * 1249 * head->list->prev->next bit 1 bit 0 1250 * ------- ------- 1251 * Normal page 0 0 1252 * Points to head page 0 1 1253 * New head page 1 0 1254 * 1255 * Note we can not trust the prev pointer of the head page, because: 1256 * 1257 * +----+ +-----+ +-----+ 1258 * | |------>| T |---X--->| N | 1259 * | |<------| | | | 1260 * +----+ +-----+ +-----+ 1261 * ^ ^ | 1262 * | +-----+ | | 1263 * +----------| R |----------+ | 1264 * | |<-----------+ 1265 * +-----+ 1266 * 1267 * Key: ---X--> HEAD flag set in pointer 1268 * T Tail page 1269 * R Reader page 1270 * N Next page 1271 * 1272 * (see __rb_reserve_next() to see where this happens) 1273 * 1274 * What the above shows is that the reader just swapped out 1275 * the reader page with a page in the buffer, but before it 1276 * could make the new header point back to the new page added 1277 * it was preempted by a writer. The writer moved forward onto 1278 * the new page added by the reader and is about to move forward 1279 * again. 1280 * 1281 * You can see, it is legitimate for the previous pointer of 1282 * the head (or any page) not to point back to itself. But only 1283 * temporarily. 1284 */ 1285 1286 #define RB_PAGE_NORMAL 0UL 1287 #define RB_PAGE_HEAD 1UL 1288 #define RB_PAGE_UPDATE 2UL 1289 1290 1291 #define RB_FLAG_MASK 3UL 1292 1293 /* PAGE_MOVED is not part of the mask */ 1294 #define RB_PAGE_MOVED 4UL 1295 1296 /* 1297 * rb_list_head - remove any bit 1298 */ 1299 static struct list_head *rb_list_head(struct list_head *list) 1300 { 1301 unsigned long val = (unsigned long)list; 1302 1303 return (struct list_head *)(val & ~RB_FLAG_MASK); 1304 } 1305 1306 /* 1307 * rb_is_head_page - test if the given page is the head page 1308 * 1309 * Because the reader may move the head_page pointer, we can 1310 * not trust what the head page is (it may be pointing to 1311 * the reader page). But if the next page is a header page, 1312 * its flags will be non zero. 1313 */ 1314 static inline int 1315 rb_is_head_page(struct buffer_page *page, struct list_head *list) 1316 { 1317 unsigned long val; 1318 1319 val = (unsigned long)list->next; 1320 1321 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) 1322 return RB_PAGE_MOVED; 1323 1324 return val & RB_FLAG_MASK; 1325 } 1326 1327 /* 1328 * rb_is_reader_page 1329 * 1330 * The unique thing about the reader page, is that, if the 1331 * writer is ever on it, the previous pointer never points 1332 * back to the reader page. 1333 */ 1334 static bool rb_is_reader_page(struct buffer_page *page) 1335 { 1336 struct list_head *list = page->list.prev; 1337 1338 return rb_list_head(list->next) != &page->list; 1339 } 1340 1341 /* 1342 * rb_set_list_to_head - set a list_head to be pointing to head. 1343 */ 1344 static void rb_set_list_to_head(struct list_head *list) 1345 { 1346 unsigned long *ptr; 1347 1348 ptr = (unsigned long *)&list->next; 1349 *ptr |= RB_PAGE_HEAD; 1350 *ptr &= ~RB_PAGE_UPDATE; 1351 } 1352 1353 /* 1354 * rb_head_page_activate - sets up head page 1355 */ 1356 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) 1357 { 1358 struct buffer_page *head; 1359 1360 head = cpu_buffer->head_page; 1361 if (!head) 1362 return; 1363 1364 /* 1365 * Set the previous list pointer to have the HEAD flag. 1366 */ 1367 rb_set_list_to_head(head->list.prev); 1368 } 1369 1370 static void rb_list_head_clear(struct list_head *list) 1371 { 1372 unsigned long *ptr = (unsigned long *)&list->next; 1373 1374 *ptr &= ~RB_FLAG_MASK; 1375 } 1376 1377 /* 1378 * rb_head_page_deactivate - clears head page ptr (for free list) 1379 */ 1380 static void 1381 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) 1382 { 1383 struct list_head *hd; 1384 1385 /* Go through the whole list and clear any pointers found. */ 1386 rb_list_head_clear(cpu_buffer->pages); 1387 1388 list_for_each(hd, cpu_buffer->pages) 1389 rb_list_head_clear(hd); 1390 } 1391 1392 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, 1393 struct buffer_page *head, 1394 struct buffer_page *prev, 1395 int old_flag, int new_flag) 1396 { 1397 struct list_head *list; 1398 unsigned long val = (unsigned long)&head->list; 1399 unsigned long ret; 1400 1401 list = &prev->list; 1402 1403 val &= ~RB_FLAG_MASK; 1404 1405 ret = cmpxchg((unsigned long *)&list->next, 1406 val | old_flag, val | new_flag); 1407 1408 /* check if the reader took the page */ 1409 if ((ret & ~RB_FLAG_MASK) != val) 1410 return RB_PAGE_MOVED; 1411 1412 return ret & RB_FLAG_MASK; 1413 } 1414 1415 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, 1416 struct buffer_page *head, 1417 struct buffer_page *prev, 1418 int old_flag) 1419 { 1420 return rb_head_page_set(cpu_buffer, head, prev, 1421 old_flag, RB_PAGE_UPDATE); 1422 } 1423 1424 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, 1425 struct buffer_page *head, 1426 struct buffer_page *prev, 1427 int old_flag) 1428 { 1429 return rb_head_page_set(cpu_buffer, head, prev, 1430 old_flag, RB_PAGE_HEAD); 1431 } 1432 1433 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, 1434 struct buffer_page *head, 1435 struct buffer_page *prev, 1436 int old_flag) 1437 { 1438 return rb_head_page_set(cpu_buffer, head, prev, 1439 old_flag, RB_PAGE_NORMAL); 1440 } 1441 1442 static inline void rb_inc_page(struct buffer_page **bpage) 1443 { 1444 struct list_head *p = rb_list_head((*bpage)->list.next); 1445 1446 *bpage = list_entry(p, struct buffer_page, list); 1447 } 1448 1449 static struct buffer_page * 1450 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) 1451 { 1452 struct buffer_page *head; 1453 struct buffer_page *page; 1454 struct list_head *list; 1455 int i; 1456 1457 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) 1458 return NULL; 1459 1460 /* sanity check */ 1461 list = cpu_buffer->pages; 1462 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) 1463 return NULL; 1464 1465 page = head = cpu_buffer->head_page; 1466 /* 1467 * It is possible that the writer moves the header behind 1468 * where we started, and we miss in one loop. 1469 * A second loop should grab the header, but we'll do 1470 * three loops just because I'm paranoid. 1471 */ 1472 for (i = 0; i < 3; i++) { 1473 do { 1474 if (rb_is_head_page(page, page->list.prev)) { 1475 cpu_buffer->head_page = page; 1476 return page; 1477 } 1478 rb_inc_page(&page); 1479 } while (page != head); 1480 } 1481 1482 RB_WARN_ON(cpu_buffer, 1); 1483 1484 return NULL; 1485 } 1486 1487 static bool rb_head_page_replace(struct buffer_page *old, 1488 struct buffer_page *new) 1489 { 1490 unsigned long *ptr = (unsigned long *)&old->list.prev->next; 1491 unsigned long val; 1492 1493 val = *ptr & ~RB_FLAG_MASK; 1494 val |= RB_PAGE_HEAD; 1495 1496 return try_cmpxchg(ptr, &val, (unsigned long)&new->list); 1497 } 1498 1499 /* 1500 * rb_tail_page_update - move the tail page forward 1501 */ 1502 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, 1503 struct buffer_page *tail_page, 1504 struct buffer_page *next_page) 1505 { 1506 unsigned long old_entries; 1507 unsigned long old_write; 1508 1509 /* 1510 * The tail page now needs to be moved forward. 1511 * 1512 * We need to reset the tail page, but without messing 1513 * with possible erasing of data brought in by interrupts 1514 * that have moved the tail page and are currently on it. 1515 * 1516 * We add a counter to the write field to denote this. 1517 */ 1518 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); 1519 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); 1520 1521 local_inc(&cpu_buffer->pages_touched); 1522 /* 1523 * Just make sure we have seen our old_write and synchronize 1524 * with any interrupts that come in. 1525 */ 1526 barrier(); 1527 1528 /* 1529 * If the tail page is still the same as what we think 1530 * it is, then it is up to us to update the tail 1531 * pointer. 1532 */ 1533 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { 1534 /* Zero the write counter */ 1535 unsigned long val = old_write & ~RB_WRITE_MASK; 1536 unsigned long eval = old_entries & ~RB_WRITE_MASK; 1537 1538 /* 1539 * This will only succeed if an interrupt did 1540 * not come in and change it. In which case, we 1541 * do not want to modify it. 1542 * 1543 * We add (void) to let the compiler know that we do not care 1544 * about the return value of these functions. We use the 1545 * cmpxchg to only update if an interrupt did not already 1546 * do it for us. If the cmpxchg fails, we don't care. 1547 */ 1548 (void)local_cmpxchg(&next_page->write, old_write, val); 1549 (void)local_cmpxchg(&next_page->entries, old_entries, eval); 1550 1551 /* 1552 * No need to worry about races with clearing out the commit. 1553 * it only can increment when a commit takes place. But that 1554 * only happens in the outer most nested commit. 1555 */ 1556 local_set(&next_page->page->commit, 0); 1557 1558 /* Again, either we update tail_page or an interrupt does */ 1559 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page); 1560 } 1561 } 1562 1563 static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, 1564 struct buffer_page *bpage) 1565 { 1566 unsigned long val = (unsigned long)bpage; 1567 1568 RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK); 1569 } 1570 1571 /** 1572 * rb_check_pages - integrity check of buffer pages 1573 * @cpu_buffer: CPU buffer with pages to test 1574 * 1575 * As a safety measure we check to make sure the data pages have not 1576 * been corrupted. 1577 */ 1578 static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) 1579 { 1580 struct list_head *head = rb_list_head(cpu_buffer->pages); 1581 struct list_head *tmp; 1582 1583 if (RB_WARN_ON(cpu_buffer, 1584 rb_list_head(rb_list_head(head->next)->prev) != head)) 1585 return; 1586 1587 if (RB_WARN_ON(cpu_buffer, 1588 rb_list_head(rb_list_head(head->prev)->next) != head)) 1589 return; 1590 1591 for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) { 1592 if (RB_WARN_ON(cpu_buffer, 1593 rb_list_head(rb_list_head(tmp->next)->prev) != tmp)) 1594 return; 1595 1596 if (RB_WARN_ON(cpu_buffer, 1597 rb_list_head(rb_list_head(tmp->prev)->next) != tmp)) 1598 return; 1599 } 1600 } 1601 1602 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 1603 long nr_pages, struct list_head *pages) 1604 { 1605 struct buffer_page *bpage, *tmp; 1606 bool user_thread = current->mm != NULL; 1607 gfp_t mflags; 1608 long i; 1609 1610 /* 1611 * Check if the available memory is there first. 1612 * Note, si_mem_available() only gives us a rough estimate of available 1613 * memory. It may not be accurate. But we don't care, we just want 1614 * to prevent doing any allocation when it is obvious that it is 1615 * not going to succeed. 1616 */ 1617 i = si_mem_available(); 1618 if (i < nr_pages) 1619 return -ENOMEM; 1620 1621 /* 1622 * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails 1623 * gracefully without invoking oom-killer and the system is not 1624 * destabilized. 1625 */ 1626 mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL; 1627 1628 /* 1629 * If a user thread allocates too much, and si_mem_available() 1630 * reports there's enough memory, even though there is not. 1631 * Make sure the OOM killer kills this thread. This can happen 1632 * even with RETRY_MAYFAIL because another task may be doing 1633 * an allocation after this task has taken all memory. 1634 * This is the task the OOM killer needs to take out during this 1635 * loop, even if it was triggered by an allocation somewhere else. 1636 */ 1637 if (user_thread) 1638 set_current_oom_origin(); 1639 for (i = 0; i < nr_pages; i++) { 1640 struct page *page; 1641 1642 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1643 mflags, cpu_to_node(cpu_buffer->cpu)); 1644 if (!bpage) 1645 goto free_pages; 1646 1647 rb_check_bpage(cpu_buffer, bpage); 1648 1649 list_add(&bpage->list, pages); 1650 1651 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, 0); 1652 if (!page) 1653 goto free_pages; 1654 bpage->page = page_address(page); 1655 rb_init_page(bpage->page); 1656 1657 if (user_thread && fatal_signal_pending(current)) 1658 goto free_pages; 1659 } 1660 if (user_thread) 1661 clear_current_oom_origin(); 1662 1663 return 0; 1664 1665 free_pages: 1666 list_for_each_entry_safe(bpage, tmp, pages, list) { 1667 list_del_init(&bpage->list); 1668 free_buffer_page(bpage); 1669 } 1670 if (user_thread) 1671 clear_current_oom_origin(); 1672 1673 return -ENOMEM; 1674 } 1675 1676 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 1677 unsigned long nr_pages) 1678 { 1679 LIST_HEAD(pages); 1680 1681 WARN_ON(!nr_pages); 1682 1683 if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages)) 1684 return -ENOMEM; 1685 1686 /* 1687 * The ring buffer page list is a circular list that does not 1688 * start and end with a list head. All page list items point to 1689 * other pages. 1690 */ 1691 cpu_buffer->pages = pages.next; 1692 list_del(&pages); 1693 1694 cpu_buffer->nr_pages = nr_pages; 1695 1696 rb_check_pages(cpu_buffer); 1697 1698 return 0; 1699 } 1700 1701 static struct ring_buffer_per_cpu * 1702 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu) 1703 { 1704 struct ring_buffer_per_cpu *cpu_buffer; 1705 struct buffer_page *bpage; 1706 struct page *page; 1707 int ret; 1708 1709 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), 1710 GFP_KERNEL, cpu_to_node(cpu)); 1711 if (!cpu_buffer) 1712 return NULL; 1713 1714 cpu_buffer->cpu = cpu; 1715 cpu_buffer->buffer = buffer; 1716 raw_spin_lock_init(&cpu_buffer->reader_lock); 1717 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1718 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 1719 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); 1720 init_completion(&cpu_buffer->update_done); 1721 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); 1722 init_waitqueue_head(&cpu_buffer->irq_work.waiters); 1723 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); 1724 1725 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1726 GFP_KERNEL, cpu_to_node(cpu)); 1727 if (!bpage) 1728 goto fail_free_buffer; 1729 1730 rb_check_bpage(cpu_buffer, bpage); 1731 1732 cpu_buffer->reader_page = bpage; 1733 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); 1734 if (!page) 1735 goto fail_free_reader; 1736 bpage->page = page_address(page); 1737 rb_init_page(bpage->page); 1738 1739 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 1740 INIT_LIST_HEAD(&cpu_buffer->new_pages); 1741 1742 ret = rb_allocate_pages(cpu_buffer, nr_pages); 1743 if (ret < 0) 1744 goto fail_free_reader; 1745 1746 cpu_buffer->head_page 1747 = list_entry(cpu_buffer->pages, struct buffer_page, list); 1748 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; 1749 1750 rb_head_page_activate(cpu_buffer); 1751 1752 return cpu_buffer; 1753 1754 fail_free_reader: 1755 free_buffer_page(cpu_buffer->reader_page); 1756 1757 fail_free_buffer: 1758 kfree(cpu_buffer); 1759 return NULL; 1760 } 1761 1762 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 1763 { 1764 struct list_head *head = cpu_buffer->pages; 1765 struct buffer_page *bpage, *tmp; 1766 1767 irq_work_sync(&cpu_buffer->irq_work.work); 1768 1769 free_buffer_page(cpu_buffer->reader_page); 1770 1771 if (head) { 1772 rb_head_page_deactivate(cpu_buffer); 1773 1774 list_for_each_entry_safe(bpage, tmp, head, list) { 1775 list_del_init(&bpage->list); 1776 free_buffer_page(bpage); 1777 } 1778 bpage = list_entry(head, struct buffer_page, list); 1779 free_buffer_page(bpage); 1780 } 1781 1782 kfree(cpu_buffer); 1783 } 1784 1785 /** 1786 * __ring_buffer_alloc - allocate a new ring_buffer 1787 * @size: the size in bytes per cpu that is needed. 1788 * @flags: attributes to set for the ring buffer. 1789 * @key: ring buffer reader_lock_key. 1790 * 1791 * Currently the only flag that is available is the RB_FL_OVERWRITE 1792 * flag. This flag means that the buffer will overwrite old data 1793 * when the buffer wraps. If this flag is not set, the buffer will 1794 * drop data when the tail hits the head. 1795 */ 1796 struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, 1797 struct lock_class_key *key) 1798 { 1799 struct trace_buffer *buffer; 1800 long nr_pages; 1801 int bsize; 1802 int cpu; 1803 int ret; 1804 1805 /* keep it in its own cache line */ 1806 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), 1807 GFP_KERNEL); 1808 if (!buffer) 1809 return NULL; 1810 1811 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) 1812 goto fail_free_buffer; 1813 1814 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 1815 buffer->flags = flags; 1816 buffer->clock = trace_clock_local; 1817 buffer->reader_lock_key = key; 1818 1819 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); 1820 init_waitqueue_head(&buffer->irq_work.waiters); 1821 1822 /* need at least two pages */ 1823 if (nr_pages < 2) 1824 nr_pages = 2; 1825 1826 buffer->cpus = nr_cpu_ids; 1827 1828 bsize = sizeof(void *) * nr_cpu_ids; 1829 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), 1830 GFP_KERNEL); 1831 if (!buffer->buffers) 1832 goto fail_free_cpumask; 1833 1834 cpu = raw_smp_processor_id(); 1835 cpumask_set_cpu(cpu, buffer->cpumask); 1836 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 1837 if (!buffer->buffers[cpu]) 1838 goto fail_free_buffers; 1839 1840 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); 1841 if (ret < 0) 1842 goto fail_free_buffers; 1843 1844 mutex_init(&buffer->mutex); 1845 1846 return buffer; 1847 1848 fail_free_buffers: 1849 for_each_buffer_cpu(buffer, cpu) { 1850 if (buffer->buffers[cpu]) 1851 rb_free_cpu_buffer(buffer->buffers[cpu]); 1852 } 1853 kfree(buffer->buffers); 1854 1855 fail_free_cpumask: 1856 free_cpumask_var(buffer->cpumask); 1857 1858 fail_free_buffer: 1859 kfree(buffer); 1860 return NULL; 1861 } 1862 EXPORT_SYMBOL_GPL(__ring_buffer_alloc); 1863 1864 /** 1865 * ring_buffer_free - free a ring buffer. 1866 * @buffer: the buffer to free. 1867 */ 1868 void 1869 ring_buffer_free(struct trace_buffer *buffer) 1870 { 1871 int cpu; 1872 1873 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); 1874 1875 irq_work_sync(&buffer->irq_work.work); 1876 1877 for_each_buffer_cpu(buffer, cpu) 1878 rb_free_cpu_buffer(buffer->buffers[cpu]); 1879 1880 kfree(buffer->buffers); 1881 free_cpumask_var(buffer->cpumask); 1882 1883 kfree(buffer); 1884 } 1885 EXPORT_SYMBOL_GPL(ring_buffer_free); 1886 1887 void ring_buffer_set_clock(struct trace_buffer *buffer, 1888 u64 (*clock)(void)) 1889 { 1890 buffer->clock = clock; 1891 } 1892 1893 void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs) 1894 { 1895 buffer->time_stamp_abs = abs; 1896 } 1897 1898 bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer) 1899 { 1900 return buffer->time_stamp_abs; 1901 } 1902 1903 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); 1904 1905 static inline unsigned long rb_page_entries(struct buffer_page *bpage) 1906 { 1907 return local_read(&bpage->entries) & RB_WRITE_MASK; 1908 } 1909 1910 static inline unsigned long rb_page_write(struct buffer_page *bpage) 1911 { 1912 return local_read(&bpage->write) & RB_WRITE_MASK; 1913 } 1914 1915 static bool 1916 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) 1917 { 1918 struct list_head *tail_page, *to_remove, *next_page; 1919 struct buffer_page *to_remove_page, *tmp_iter_page; 1920 struct buffer_page *last_page, *first_page; 1921 unsigned long nr_removed; 1922 unsigned long head_bit; 1923 int page_entries; 1924 1925 head_bit = 0; 1926 1927 raw_spin_lock_irq(&cpu_buffer->reader_lock); 1928 atomic_inc(&cpu_buffer->record_disabled); 1929 /* 1930 * We don't race with the readers since we have acquired the reader 1931 * lock. We also don't race with writers after disabling recording. 1932 * This makes it easy to figure out the first and the last page to be 1933 * removed from the list. We unlink all the pages in between including 1934 * the first and last pages. This is done in a busy loop so that we 1935 * lose the least number of traces. 1936 * The pages are freed after we restart recording and unlock readers. 1937 */ 1938 tail_page = &cpu_buffer->tail_page->list; 1939 1940 /* 1941 * tail page might be on reader page, we remove the next page 1942 * from the ring buffer 1943 */ 1944 if (cpu_buffer->tail_page == cpu_buffer->reader_page) 1945 tail_page = rb_list_head(tail_page->next); 1946 to_remove = tail_page; 1947 1948 /* start of pages to remove */ 1949 first_page = list_entry(rb_list_head(to_remove->next), 1950 struct buffer_page, list); 1951 1952 for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) { 1953 to_remove = rb_list_head(to_remove)->next; 1954 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD; 1955 } 1956 /* Read iterators need to reset themselves when some pages removed */ 1957 cpu_buffer->pages_removed += nr_removed; 1958 1959 next_page = rb_list_head(to_remove)->next; 1960 1961 /* 1962 * Now we remove all pages between tail_page and next_page. 1963 * Make sure that we have head_bit value preserved for the 1964 * next page 1965 */ 1966 tail_page->next = (struct list_head *)((unsigned long)next_page | 1967 head_bit); 1968 next_page = rb_list_head(next_page); 1969 next_page->prev = tail_page; 1970 1971 /* make sure pages points to a valid page in the ring buffer */ 1972 cpu_buffer->pages = next_page; 1973 1974 /* update head page */ 1975 if (head_bit) 1976 cpu_buffer->head_page = list_entry(next_page, 1977 struct buffer_page, list); 1978 1979 /* pages are removed, resume tracing and then free the pages */ 1980 atomic_dec(&cpu_buffer->record_disabled); 1981 raw_spin_unlock_irq(&cpu_buffer->reader_lock); 1982 1983 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); 1984 1985 /* last buffer page to remove */ 1986 last_page = list_entry(rb_list_head(to_remove), struct buffer_page, 1987 list); 1988 tmp_iter_page = first_page; 1989 1990 do { 1991 cond_resched(); 1992 1993 to_remove_page = tmp_iter_page; 1994 rb_inc_page(&tmp_iter_page); 1995 1996 /* update the counters */ 1997 page_entries = rb_page_entries(to_remove_page); 1998 if (page_entries) { 1999 /* 2000 * If something was added to this page, it was full 2001 * since it is not the tail page. So we deduct the 2002 * bytes consumed in ring buffer from here. 2003 * Increment overrun to account for the lost events. 2004 */ 2005 local_add(page_entries, &cpu_buffer->overrun); 2006 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); 2007 local_inc(&cpu_buffer->pages_lost); 2008 } 2009 2010 /* 2011 * We have already removed references to this list item, just 2012 * free up the buffer_page and its page 2013 */ 2014 free_buffer_page(to_remove_page); 2015 nr_removed--; 2016 2017 } while (to_remove_page != last_page); 2018 2019 RB_WARN_ON(cpu_buffer, nr_removed); 2020 2021 return nr_removed == 0; 2022 } 2023 2024 static bool 2025 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) 2026 { 2027 struct list_head *pages = &cpu_buffer->new_pages; 2028 unsigned long flags; 2029 bool success; 2030 int retries; 2031 2032 /* Can be called at early boot up, where interrupts must not been enabled */ 2033 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2034 /* 2035 * We are holding the reader lock, so the reader page won't be swapped 2036 * in the ring buffer. Now we are racing with the writer trying to 2037 * move head page and the tail page. 2038 * We are going to adapt the reader page update process where: 2039 * 1. We first splice the start and end of list of new pages between 2040 * the head page and its previous page. 2041 * 2. We cmpxchg the prev_page->next to point from head page to the 2042 * start of new pages list. 2043 * 3. Finally, we update the head->prev to the end of new list. 2044 * 2045 * We will try this process 10 times, to make sure that we don't keep 2046 * spinning. 2047 */ 2048 retries = 10; 2049 success = false; 2050 while (retries--) { 2051 struct list_head *head_page, *prev_page, *r; 2052 struct list_head *last_page, *first_page; 2053 struct list_head *head_page_with_bit; 2054 struct buffer_page *hpage = rb_set_head_page(cpu_buffer); 2055 2056 if (!hpage) 2057 break; 2058 head_page = &hpage->list; 2059 prev_page = head_page->prev; 2060 2061 first_page = pages->next; 2062 last_page = pages->prev; 2063 2064 head_page_with_bit = (struct list_head *) 2065 ((unsigned long)head_page | RB_PAGE_HEAD); 2066 2067 last_page->next = head_page_with_bit; 2068 first_page->prev = prev_page; 2069 2070 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page); 2071 2072 if (r == head_page_with_bit) { 2073 /* 2074 * yay, we replaced the page pointer to our new list, 2075 * now, we just have to update to head page's prev 2076 * pointer to point to end of list 2077 */ 2078 head_page->prev = last_page; 2079 success = true; 2080 break; 2081 } 2082 } 2083 2084 if (success) 2085 INIT_LIST_HEAD(pages); 2086 /* 2087 * If we weren't successful in adding in new pages, warn and stop 2088 * tracing 2089 */ 2090 RB_WARN_ON(cpu_buffer, !success); 2091 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2092 2093 /* free pages if they weren't inserted */ 2094 if (!success) { 2095 struct buffer_page *bpage, *tmp; 2096 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 2097 list) { 2098 list_del_init(&bpage->list); 2099 free_buffer_page(bpage); 2100 } 2101 } 2102 return success; 2103 } 2104 2105 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) 2106 { 2107 bool success; 2108 2109 if (cpu_buffer->nr_pages_to_update > 0) 2110 success = rb_insert_pages(cpu_buffer); 2111 else 2112 success = rb_remove_pages(cpu_buffer, 2113 -cpu_buffer->nr_pages_to_update); 2114 2115 if (success) 2116 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; 2117 } 2118 2119 static void update_pages_handler(struct work_struct *work) 2120 { 2121 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, 2122 struct ring_buffer_per_cpu, update_pages_work); 2123 rb_update_pages(cpu_buffer); 2124 complete(&cpu_buffer->update_done); 2125 } 2126 2127 /** 2128 * ring_buffer_resize - resize the ring buffer 2129 * @buffer: the buffer to resize. 2130 * @size: the new size. 2131 * @cpu_id: the cpu buffer to resize 2132 * 2133 * Minimum size is 2 * BUF_PAGE_SIZE. 2134 * 2135 * Returns 0 on success and < 0 on failure. 2136 */ 2137 int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, 2138 int cpu_id) 2139 { 2140 struct ring_buffer_per_cpu *cpu_buffer; 2141 unsigned long nr_pages; 2142 int cpu, err; 2143 2144 /* 2145 * Always succeed at resizing a non-existent buffer: 2146 */ 2147 if (!buffer) 2148 return 0; 2149 2150 /* Make sure the requested buffer exists */ 2151 if (cpu_id != RING_BUFFER_ALL_CPUS && 2152 !cpumask_test_cpu(cpu_id, buffer->cpumask)) 2153 return 0; 2154 2155 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 2156 2157 /* we need a minimum of two pages */ 2158 if (nr_pages < 2) 2159 nr_pages = 2; 2160 2161 /* prevent another thread from changing buffer sizes */ 2162 mutex_lock(&buffer->mutex); 2163 atomic_inc(&buffer->resizing); 2164 2165 if (cpu_id == RING_BUFFER_ALL_CPUS) { 2166 /* 2167 * Don't succeed if resizing is disabled, as a reader might be 2168 * manipulating the ring buffer and is expecting a sane state while 2169 * this is true. 2170 */ 2171 for_each_buffer_cpu(buffer, cpu) { 2172 cpu_buffer = buffer->buffers[cpu]; 2173 if (atomic_read(&cpu_buffer->resize_disabled)) { 2174 err = -EBUSY; 2175 goto out_err_unlock; 2176 } 2177 } 2178 2179 /* calculate the pages to update */ 2180 for_each_buffer_cpu(buffer, cpu) { 2181 cpu_buffer = buffer->buffers[cpu]; 2182 2183 cpu_buffer->nr_pages_to_update = nr_pages - 2184 cpu_buffer->nr_pages; 2185 /* 2186 * nothing more to do for removing pages or no update 2187 */ 2188 if (cpu_buffer->nr_pages_to_update <= 0) 2189 continue; 2190 /* 2191 * to add pages, make sure all new pages can be 2192 * allocated without receiving ENOMEM 2193 */ 2194 INIT_LIST_HEAD(&cpu_buffer->new_pages); 2195 if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, 2196 &cpu_buffer->new_pages)) { 2197 /* not enough memory for new pages */ 2198 err = -ENOMEM; 2199 goto out_err; 2200 } 2201 } 2202 2203 cpus_read_lock(); 2204 /* 2205 * Fire off all the required work handlers 2206 * We can't schedule on offline CPUs, but it's not necessary 2207 * since we can change their buffer sizes without any race. 2208 */ 2209 for_each_buffer_cpu(buffer, cpu) { 2210 cpu_buffer = buffer->buffers[cpu]; 2211 if (!cpu_buffer->nr_pages_to_update) 2212 continue; 2213 2214 /* Can't run something on an offline CPU. */ 2215 if (!cpu_online(cpu)) { 2216 rb_update_pages(cpu_buffer); 2217 cpu_buffer->nr_pages_to_update = 0; 2218 } else { 2219 /* Run directly if possible. */ 2220 migrate_disable(); 2221 if (cpu != smp_processor_id()) { 2222 migrate_enable(); 2223 schedule_work_on(cpu, 2224 &cpu_buffer->update_pages_work); 2225 } else { 2226 update_pages_handler(&cpu_buffer->update_pages_work); 2227 migrate_enable(); 2228 } 2229 } 2230 } 2231 2232 /* wait for all the updates to complete */ 2233 for_each_buffer_cpu(buffer, cpu) { 2234 cpu_buffer = buffer->buffers[cpu]; 2235 if (!cpu_buffer->nr_pages_to_update) 2236 continue; 2237 2238 if (cpu_online(cpu)) 2239 wait_for_completion(&cpu_buffer->update_done); 2240 cpu_buffer->nr_pages_to_update = 0; 2241 } 2242 2243 cpus_read_unlock(); 2244 } else { 2245 cpu_buffer = buffer->buffers[cpu_id]; 2246 2247 if (nr_pages == cpu_buffer->nr_pages) 2248 goto out; 2249 2250 /* 2251 * Don't succeed if resizing is disabled, as a reader might be 2252 * manipulating the ring buffer and is expecting a sane state while 2253 * this is true. 2254 */ 2255 if (atomic_read(&cpu_buffer->resize_disabled)) { 2256 err = -EBUSY; 2257 goto out_err_unlock; 2258 } 2259 2260 cpu_buffer->nr_pages_to_update = nr_pages - 2261 cpu_buffer->nr_pages; 2262 2263 INIT_LIST_HEAD(&cpu_buffer->new_pages); 2264 if (cpu_buffer->nr_pages_to_update > 0 && 2265 __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, 2266 &cpu_buffer->new_pages)) { 2267 err = -ENOMEM; 2268 goto out_err; 2269 } 2270 2271 cpus_read_lock(); 2272 2273 /* Can't run something on an offline CPU. */ 2274 if (!cpu_online(cpu_id)) 2275 rb_update_pages(cpu_buffer); 2276 else { 2277 /* Run directly if possible. */ 2278 migrate_disable(); 2279 if (cpu_id == smp_processor_id()) { 2280 rb_update_pages(cpu_buffer); 2281 migrate_enable(); 2282 } else { 2283 migrate_enable(); 2284 schedule_work_on(cpu_id, 2285 &cpu_buffer->update_pages_work); 2286 wait_for_completion(&cpu_buffer->update_done); 2287 } 2288 } 2289 2290 cpu_buffer->nr_pages_to_update = 0; 2291 cpus_read_unlock(); 2292 } 2293 2294 out: 2295 /* 2296 * The ring buffer resize can happen with the ring buffer 2297 * enabled, so that the update disturbs the tracing as little 2298 * as possible. But if the buffer is disabled, we do not need 2299 * to worry about that, and we can take the time to verify 2300 * that the buffer is not corrupt. 2301 */ 2302 if (atomic_read(&buffer->record_disabled)) { 2303 atomic_inc(&buffer->record_disabled); 2304 /* 2305 * Even though the buffer was disabled, we must make sure 2306 * that it is truly disabled before calling rb_check_pages. 2307 * There could have been a race between checking 2308 * record_disable and incrementing it. 2309 */ 2310 synchronize_rcu(); 2311 for_each_buffer_cpu(buffer, cpu) { 2312 cpu_buffer = buffer->buffers[cpu]; 2313 rb_check_pages(cpu_buffer); 2314 } 2315 atomic_dec(&buffer->record_disabled); 2316 } 2317 2318 atomic_dec(&buffer->resizing); 2319 mutex_unlock(&buffer->mutex); 2320 return 0; 2321 2322 out_err: 2323 for_each_buffer_cpu(buffer, cpu) { 2324 struct buffer_page *bpage, *tmp; 2325 2326 cpu_buffer = buffer->buffers[cpu]; 2327 cpu_buffer->nr_pages_to_update = 0; 2328 2329 if (list_empty(&cpu_buffer->new_pages)) 2330 continue; 2331 2332 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 2333 list) { 2334 list_del_init(&bpage->list); 2335 free_buffer_page(bpage); 2336 } 2337 } 2338 out_err_unlock: 2339 atomic_dec(&buffer->resizing); 2340 mutex_unlock(&buffer->mutex); 2341 return err; 2342 } 2343 EXPORT_SYMBOL_GPL(ring_buffer_resize); 2344 2345 void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val) 2346 { 2347 mutex_lock(&buffer->mutex); 2348 if (val) 2349 buffer->flags |= RB_FL_OVERWRITE; 2350 else 2351 buffer->flags &= ~RB_FL_OVERWRITE; 2352 mutex_unlock(&buffer->mutex); 2353 } 2354 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite); 2355 2356 static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) 2357 { 2358 return bpage->page->data + index; 2359 } 2360 2361 static __always_inline struct ring_buffer_event * 2362 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) 2363 { 2364 return __rb_page_index(cpu_buffer->reader_page, 2365 cpu_buffer->reader_page->read); 2366 } 2367 2368 static __always_inline unsigned rb_page_commit(struct buffer_page *bpage) 2369 { 2370 return local_read(&bpage->page->commit); 2371 } 2372 2373 static struct ring_buffer_event * 2374 rb_iter_head_event(struct ring_buffer_iter *iter) 2375 { 2376 struct ring_buffer_event *event; 2377 struct buffer_page *iter_head_page = iter->head_page; 2378 unsigned long commit; 2379 unsigned length; 2380 2381 if (iter->head != iter->next_event) 2382 return iter->event; 2383 2384 /* 2385 * When the writer goes across pages, it issues a cmpxchg which 2386 * is a mb(), which will synchronize with the rmb here. 2387 * (see rb_tail_page_update() and __rb_reserve_next()) 2388 */ 2389 commit = rb_page_commit(iter_head_page); 2390 smp_rmb(); 2391 event = __rb_page_index(iter_head_page, iter->head); 2392 length = rb_event_length(event); 2393 2394 /* 2395 * READ_ONCE() doesn't work on functions and we don't want the 2396 * compiler doing any crazy optimizations with length. 2397 */ 2398 barrier(); 2399 2400 if ((iter->head + length) > commit || length > BUF_MAX_DATA_SIZE) 2401 /* Writer corrupted the read? */ 2402 goto reset; 2403 2404 memcpy(iter->event, event, length); 2405 /* 2406 * If the page stamp is still the same after this rmb() then the 2407 * event was safely copied without the writer entering the page. 2408 */ 2409 smp_rmb(); 2410 2411 /* Make sure the page didn't change since we read this */ 2412 if (iter->page_stamp != iter_head_page->page->time_stamp || 2413 commit > rb_page_commit(iter_head_page)) 2414 goto reset; 2415 2416 iter->next_event = iter->head + length; 2417 return iter->event; 2418 reset: 2419 /* Reset to the beginning */ 2420 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; 2421 iter->head = 0; 2422 iter->next_event = 0; 2423 iter->missed_events = 1; 2424 return NULL; 2425 } 2426 2427 /* Size is determined by what has been committed */ 2428 static __always_inline unsigned rb_page_size(struct buffer_page *bpage) 2429 { 2430 return rb_page_commit(bpage); 2431 } 2432 2433 static __always_inline unsigned 2434 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) 2435 { 2436 return rb_page_commit(cpu_buffer->commit_page); 2437 } 2438 2439 static __always_inline unsigned 2440 rb_event_index(struct ring_buffer_event *event) 2441 { 2442 unsigned long addr = (unsigned long)event; 2443 2444 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; 2445 } 2446 2447 static void rb_inc_iter(struct ring_buffer_iter *iter) 2448 { 2449 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 2450 2451 /* 2452 * The iterator could be on the reader page (it starts there). 2453 * But the head could have moved, since the reader was 2454 * found. Check for this case and assign the iterator 2455 * to the head page instead of next. 2456 */ 2457 if (iter->head_page == cpu_buffer->reader_page) 2458 iter->head_page = rb_set_head_page(cpu_buffer); 2459 else 2460 rb_inc_page(&iter->head_page); 2461 2462 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; 2463 iter->head = 0; 2464 iter->next_event = 0; 2465 } 2466 2467 /* 2468 * rb_handle_head_page - writer hit the head page 2469 * 2470 * Returns: +1 to retry page 2471 * 0 to continue 2472 * -1 on error 2473 */ 2474 static int 2475 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, 2476 struct buffer_page *tail_page, 2477 struct buffer_page *next_page) 2478 { 2479 struct buffer_page *new_head; 2480 int entries; 2481 int type; 2482 int ret; 2483 2484 entries = rb_page_entries(next_page); 2485 2486 /* 2487 * The hard part is here. We need to move the head 2488 * forward, and protect against both readers on 2489 * other CPUs and writers coming in via interrupts. 2490 */ 2491 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, 2492 RB_PAGE_HEAD); 2493 2494 /* 2495 * type can be one of four: 2496 * NORMAL - an interrupt already moved it for us 2497 * HEAD - we are the first to get here. 2498 * UPDATE - we are the interrupt interrupting 2499 * a current move. 2500 * MOVED - a reader on another CPU moved the next 2501 * pointer to its reader page. Give up 2502 * and try again. 2503 */ 2504 2505 switch (type) { 2506 case RB_PAGE_HEAD: 2507 /* 2508 * We changed the head to UPDATE, thus 2509 * it is our responsibility to update 2510 * the counters. 2511 */ 2512 local_add(entries, &cpu_buffer->overrun); 2513 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); 2514 local_inc(&cpu_buffer->pages_lost); 2515 2516 /* 2517 * The entries will be zeroed out when we move the 2518 * tail page. 2519 */ 2520 2521 /* still more to do */ 2522 break; 2523 2524 case RB_PAGE_UPDATE: 2525 /* 2526 * This is an interrupt that interrupt the 2527 * previous update. Still more to do. 2528 */ 2529 break; 2530 case RB_PAGE_NORMAL: 2531 /* 2532 * An interrupt came in before the update 2533 * and processed this for us. 2534 * Nothing left to do. 2535 */ 2536 return 1; 2537 case RB_PAGE_MOVED: 2538 /* 2539 * The reader is on another CPU and just did 2540 * a swap with our next_page. 2541 * Try again. 2542 */ 2543 return 1; 2544 default: 2545 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ 2546 return -1; 2547 } 2548 2549 /* 2550 * Now that we are here, the old head pointer is 2551 * set to UPDATE. This will keep the reader from 2552 * swapping the head page with the reader page. 2553 * The reader (on another CPU) will spin till 2554 * we are finished. 2555 * 2556 * We just need to protect against interrupts 2557 * doing the job. We will set the next pointer 2558 * to HEAD. After that, we set the old pointer 2559 * to NORMAL, but only if it was HEAD before. 2560 * otherwise we are an interrupt, and only 2561 * want the outer most commit to reset it. 2562 */ 2563 new_head = next_page; 2564 rb_inc_page(&new_head); 2565 2566 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, 2567 RB_PAGE_NORMAL); 2568 2569 /* 2570 * Valid returns are: 2571 * HEAD - an interrupt came in and already set it. 2572 * NORMAL - One of two things: 2573 * 1) We really set it. 2574 * 2) A bunch of interrupts came in and moved 2575 * the page forward again. 2576 */ 2577 switch (ret) { 2578 case RB_PAGE_HEAD: 2579 case RB_PAGE_NORMAL: 2580 /* OK */ 2581 break; 2582 default: 2583 RB_WARN_ON(cpu_buffer, 1); 2584 return -1; 2585 } 2586 2587 /* 2588 * It is possible that an interrupt came in, 2589 * set the head up, then more interrupts came in 2590 * and moved it again. When we get back here, 2591 * the page would have been set to NORMAL but we 2592 * just set it back to HEAD. 2593 * 2594 * How do you detect this? Well, if that happened 2595 * the tail page would have moved. 2596 */ 2597 if (ret == RB_PAGE_NORMAL) { 2598 struct buffer_page *buffer_tail_page; 2599 2600 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); 2601 /* 2602 * If the tail had moved passed next, then we need 2603 * to reset the pointer. 2604 */ 2605 if (buffer_tail_page != tail_page && 2606 buffer_tail_page != next_page) 2607 rb_head_page_set_normal(cpu_buffer, new_head, 2608 next_page, 2609 RB_PAGE_HEAD); 2610 } 2611 2612 /* 2613 * If this was the outer most commit (the one that 2614 * changed the original pointer from HEAD to UPDATE), 2615 * then it is up to us to reset it to NORMAL. 2616 */ 2617 if (type == RB_PAGE_HEAD) { 2618 ret = rb_head_page_set_normal(cpu_buffer, next_page, 2619 tail_page, 2620 RB_PAGE_UPDATE); 2621 if (RB_WARN_ON(cpu_buffer, 2622 ret != RB_PAGE_UPDATE)) 2623 return -1; 2624 } 2625 2626 return 0; 2627 } 2628 2629 static inline void 2630 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, 2631 unsigned long tail, struct rb_event_info *info) 2632 { 2633 struct buffer_page *tail_page = info->tail_page; 2634 struct ring_buffer_event *event; 2635 unsigned long length = info->length; 2636 2637 /* 2638 * Only the event that crossed the page boundary 2639 * must fill the old tail_page with padding. 2640 */ 2641 if (tail >= BUF_PAGE_SIZE) { 2642 /* 2643 * If the page was filled, then we still need 2644 * to update the real_end. Reset it to zero 2645 * and the reader will ignore it. 2646 */ 2647 if (tail == BUF_PAGE_SIZE) 2648 tail_page->real_end = 0; 2649 2650 local_sub(length, &tail_page->write); 2651 return; 2652 } 2653 2654 event = __rb_page_index(tail_page, tail); 2655 2656 /* account for padding bytes */ 2657 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); 2658 2659 /* 2660 * Save the original length to the meta data. 2661 * This will be used by the reader to add lost event 2662 * counter. 2663 */ 2664 tail_page->real_end = tail; 2665 2666 /* 2667 * If this event is bigger than the minimum size, then 2668 * we need to be careful that we don't subtract the 2669 * write counter enough to allow another writer to slip 2670 * in on this page. 2671 * We put in a discarded commit instead, to make sure 2672 * that this space is not used again. 2673 * 2674 * If we are less than the minimum size, we don't need to 2675 * worry about it. 2676 */ 2677 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) { 2678 /* No room for any events */ 2679 2680 /* Mark the rest of the page with padding */ 2681 rb_event_set_padding(event); 2682 2683 /* Make sure the padding is visible before the write update */ 2684 smp_wmb(); 2685 2686 /* Set the write back to the previous setting */ 2687 local_sub(length, &tail_page->write); 2688 return; 2689 } 2690 2691 /* Put in a discarded event */ 2692 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; 2693 event->type_len = RINGBUF_TYPE_PADDING; 2694 /* time delta must be non zero */ 2695 event->time_delta = 1; 2696 2697 /* Make sure the padding is visible before the tail_page->write update */ 2698 smp_wmb(); 2699 2700 /* Set write to end of buffer */ 2701 length = (tail + length) - BUF_PAGE_SIZE; 2702 local_sub(length, &tail_page->write); 2703 } 2704 2705 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer); 2706 2707 /* 2708 * This is the slow path, force gcc not to inline it. 2709 */ 2710 static noinline struct ring_buffer_event * 2711 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, 2712 unsigned long tail, struct rb_event_info *info) 2713 { 2714 struct buffer_page *tail_page = info->tail_page; 2715 struct buffer_page *commit_page = cpu_buffer->commit_page; 2716 struct trace_buffer *buffer = cpu_buffer->buffer; 2717 struct buffer_page *next_page; 2718 int ret; 2719 2720 next_page = tail_page; 2721 2722 rb_inc_page(&next_page); 2723 2724 /* 2725 * If for some reason, we had an interrupt storm that made 2726 * it all the way around the buffer, bail, and warn 2727 * about it. 2728 */ 2729 if (unlikely(next_page == commit_page)) { 2730 local_inc(&cpu_buffer->commit_overrun); 2731 goto out_reset; 2732 } 2733 2734 /* 2735 * This is where the fun begins! 2736 * 2737 * We are fighting against races between a reader that 2738 * could be on another CPU trying to swap its reader 2739 * page with the buffer head. 2740 * 2741 * We are also fighting against interrupts coming in and 2742 * moving the head or tail on us as well. 2743 * 2744 * If the next page is the head page then we have filled 2745 * the buffer, unless the commit page is still on the 2746 * reader page. 2747 */ 2748 if (rb_is_head_page(next_page, &tail_page->list)) { 2749 2750 /* 2751 * If the commit is not on the reader page, then 2752 * move the header page. 2753 */ 2754 if (!rb_is_reader_page(cpu_buffer->commit_page)) { 2755 /* 2756 * If we are not in overwrite mode, 2757 * this is easy, just stop here. 2758 */ 2759 if (!(buffer->flags & RB_FL_OVERWRITE)) { 2760 local_inc(&cpu_buffer->dropped_events); 2761 goto out_reset; 2762 } 2763 2764 ret = rb_handle_head_page(cpu_buffer, 2765 tail_page, 2766 next_page); 2767 if (ret < 0) 2768 goto out_reset; 2769 if (ret) 2770 goto out_again; 2771 } else { 2772 /* 2773 * We need to be careful here too. The 2774 * commit page could still be on the reader 2775 * page. We could have a small buffer, and 2776 * have filled up the buffer with events 2777 * from interrupts and such, and wrapped. 2778 * 2779 * Note, if the tail page is also on the 2780 * reader_page, we let it move out. 2781 */ 2782 if (unlikely((cpu_buffer->commit_page != 2783 cpu_buffer->tail_page) && 2784 (cpu_buffer->commit_page == 2785 cpu_buffer->reader_page))) { 2786 local_inc(&cpu_buffer->commit_overrun); 2787 goto out_reset; 2788 } 2789 } 2790 } 2791 2792 rb_tail_page_update(cpu_buffer, tail_page, next_page); 2793 2794 out_again: 2795 2796 rb_reset_tail(cpu_buffer, tail, info); 2797 2798 /* Commit what we have for now. */ 2799 rb_end_commit(cpu_buffer); 2800 /* rb_end_commit() decs committing */ 2801 local_inc(&cpu_buffer->committing); 2802 2803 /* fail and let the caller try again */ 2804 return ERR_PTR(-EAGAIN); 2805 2806 out_reset: 2807 /* reset write */ 2808 rb_reset_tail(cpu_buffer, tail, info); 2809 2810 return NULL; 2811 } 2812 2813 /* Slow path */ 2814 static struct ring_buffer_event * 2815 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs) 2816 { 2817 if (abs) 2818 event->type_len = RINGBUF_TYPE_TIME_STAMP; 2819 else 2820 event->type_len = RINGBUF_TYPE_TIME_EXTEND; 2821 2822 /* Not the first event on the page, or not delta? */ 2823 if (abs || rb_event_index(event)) { 2824 event->time_delta = delta & TS_MASK; 2825 event->array[0] = delta >> TS_SHIFT; 2826 } else { 2827 /* nope, just zero it */ 2828 event->time_delta = 0; 2829 event->array[0] = 0; 2830 } 2831 2832 return skip_time_extend(event); 2833 } 2834 2835 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 2836 static inline bool sched_clock_stable(void) 2837 { 2838 return true; 2839 } 2840 #endif 2841 2842 static void 2843 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer, 2844 struct rb_event_info *info) 2845 { 2846 u64 write_stamp; 2847 2848 WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s", 2849 (unsigned long long)info->delta, 2850 (unsigned long long)info->ts, 2851 (unsigned long long)info->before, 2852 (unsigned long long)info->after, 2853 (unsigned long long)(rb_time_read(&cpu_buffer->write_stamp, &write_stamp) ? write_stamp : 0), 2854 sched_clock_stable() ? "" : 2855 "If you just came from a suspend/resume,\n" 2856 "please switch to the trace global clock:\n" 2857 " echo global > /sys/kernel/tracing/trace_clock\n" 2858 "or add trace_clock=global to the kernel command line\n"); 2859 } 2860 2861 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer, 2862 struct ring_buffer_event **event, 2863 struct rb_event_info *info, 2864 u64 *delta, 2865 unsigned int *length) 2866 { 2867 bool abs = info->add_timestamp & 2868 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE); 2869 2870 if (unlikely(info->delta > (1ULL << 59))) { 2871 /* 2872 * Some timers can use more than 59 bits, and when a timestamp 2873 * is added to the buffer, it will lose those bits. 2874 */ 2875 if (abs && (info->ts & TS_MSB)) { 2876 info->delta &= ABS_TS_MASK; 2877 2878 /* did the clock go backwards */ 2879 } else if (info->before == info->after && info->before > info->ts) { 2880 /* not interrupted */ 2881 static int once; 2882 2883 /* 2884 * This is possible with a recalibrating of the TSC. 2885 * Do not produce a call stack, but just report it. 2886 */ 2887 if (!once) { 2888 once++; 2889 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n", 2890 info->before, info->ts); 2891 } 2892 } else 2893 rb_check_timestamp(cpu_buffer, info); 2894 if (!abs) 2895 info->delta = 0; 2896 } 2897 *event = rb_add_time_stamp(*event, info->delta, abs); 2898 *length -= RB_LEN_TIME_EXTEND; 2899 *delta = 0; 2900 } 2901 2902 /** 2903 * rb_update_event - update event type and data 2904 * @cpu_buffer: The per cpu buffer of the @event 2905 * @event: the event to update 2906 * @info: The info to update the @event with (contains length and delta) 2907 * 2908 * Update the type and data fields of the @event. The length 2909 * is the actual size that is written to the ring buffer, 2910 * and with this, we can determine what to place into the 2911 * data field. 2912 */ 2913 static void 2914 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, 2915 struct ring_buffer_event *event, 2916 struct rb_event_info *info) 2917 { 2918 unsigned length = info->length; 2919 u64 delta = info->delta; 2920 unsigned int nest = local_read(&cpu_buffer->committing) - 1; 2921 2922 if (!WARN_ON_ONCE(nest >= MAX_NEST)) 2923 cpu_buffer->event_stamp[nest] = info->ts; 2924 2925 /* 2926 * If we need to add a timestamp, then we 2927 * add it to the start of the reserved space. 2928 */ 2929 if (unlikely(info->add_timestamp)) 2930 rb_add_timestamp(cpu_buffer, &event, info, &delta, &length); 2931 2932 event->time_delta = delta; 2933 length -= RB_EVNT_HDR_SIZE; 2934 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) { 2935 event->type_len = 0; 2936 event->array[0] = length; 2937 } else 2938 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); 2939 } 2940 2941 static unsigned rb_calculate_event_length(unsigned length) 2942 { 2943 struct ring_buffer_event event; /* Used only for sizeof array */ 2944 2945 /* zero length can cause confusions */ 2946 if (!length) 2947 length++; 2948 2949 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) 2950 length += sizeof(event.array[0]); 2951 2952 length += RB_EVNT_HDR_SIZE; 2953 length = ALIGN(length, RB_ARCH_ALIGNMENT); 2954 2955 /* 2956 * In case the time delta is larger than the 27 bits for it 2957 * in the header, we need to add a timestamp. If another 2958 * event comes in when trying to discard this one to increase 2959 * the length, then the timestamp will be added in the allocated 2960 * space of this event. If length is bigger than the size needed 2961 * for the TIME_EXTEND, then padding has to be used. The events 2962 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal 2963 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding. 2964 * As length is a multiple of 4, we only need to worry if it 2965 * is 12 (RB_LEN_TIME_EXTEND + 4). 2966 */ 2967 if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT) 2968 length += RB_ALIGNMENT; 2969 2970 return length; 2971 } 2972 2973 static u64 rb_time_delta(struct ring_buffer_event *event) 2974 { 2975 switch (event->type_len) { 2976 case RINGBUF_TYPE_PADDING: 2977 return 0; 2978 2979 case RINGBUF_TYPE_TIME_EXTEND: 2980 return rb_event_time_stamp(event); 2981 2982 case RINGBUF_TYPE_TIME_STAMP: 2983 return 0; 2984 2985 case RINGBUF_TYPE_DATA: 2986 return event->time_delta; 2987 default: 2988 return 0; 2989 } 2990 } 2991 2992 static inline bool 2993 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, 2994 struct ring_buffer_event *event) 2995 { 2996 unsigned long new_index, old_index; 2997 struct buffer_page *bpage; 2998 unsigned long addr; 2999 u64 write_stamp; 3000 u64 delta; 3001 3002 new_index = rb_event_index(event); 3003 old_index = new_index + rb_event_ts_length(event); 3004 addr = (unsigned long)event; 3005 addr &= PAGE_MASK; 3006 3007 bpage = READ_ONCE(cpu_buffer->tail_page); 3008 3009 delta = rb_time_delta(event); 3010 3011 if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp)) 3012 return false; 3013 3014 /* Make sure the write stamp is read before testing the location */ 3015 barrier(); 3016 3017 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { 3018 unsigned long write_mask = 3019 local_read(&bpage->write) & ~RB_WRITE_MASK; 3020 unsigned long event_length = rb_event_length(event); 3021 3022 /* Something came in, can't discard */ 3023 if (!rb_time_cmpxchg(&cpu_buffer->write_stamp, 3024 write_stamp, write_stamp - delta)) 3025 return false; 3026 3027 /* 3028 * It's possible that the event time delta is zero 3029 * (has the same time stamp as the previous event) 3030 * in which case write_stamp and before_stamp could 3031 * be the same. In such a case, force before_stamp 3032 * to be different than write_stamp. It doesn't 3033 * matter what it is, as long as its different. 3034 */ 3035 if (!delta) 3036 rb_time_set(&cpu_buffer->before_stamp, 0); 3037 3038 /* 3039 * If an event were to come in now, it would see that the 3040 * write_stamp and the before_stamp are different, and assume 3041 * that this event just added itself before updating 3042 * the write stamp. The interrupting event will fix the 3043 * write stamp for us, and use the before stamp as its delta. 3044 */ 3045 3046 /* 3047 * This is on the tail page. It is possible that 3048 * a write could come in and move the tail page 3049 * and write to the next page. That is fine 3050 * because we just shorten what is on this page. 3051 */ 3052 old_index += write_mask; 3053 new_index += write_mask; 3054 3055 /* caution: old_index gets updated on cmpxchg failure */ 3056 if (local_try_cmpxchg(&bpage->write, &old_index, new_index)) { 3057 /* update counters */ 3058 local_sub(event_length, &cpu_buffer->entries_bytes); 3059 return true; 3060 } 3061 } 3062 3063 /* could not discard */ 3064 return false; 3065 } 3066 3067 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) 3068 { 3069 local_inc(&cpu_buffer->committing); 3070 local_inc(&cpu_buffer->commits); 3071 } 3072 3073 static __always_inline void 3074 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) 3075 { 3076 unsigned long max_count; 3077 3078 /* 3079 * We only race with interrupts and NMIs on this CPU. 3080 * If we own the commit event, then we can commit 3081 * all others that interrupted us, since the interruptions 3082 * are in stack format (they finish before they come 3083 * back to us). This allows us to do a simple loop to 3084 * assign the commit to the tail. 3085 */ 3086 again: 3087 max_count = cpu_buffer->nr_pages * 100; 3088 3089 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { 3090 if (RB_WARN_ON(cpu_buffer, !(--max_count))) 3091 return; 3092 if (RB_WARN_ON(cpu_buffer, 3093 rb_is_reader_page(cpu_buffer->tail_page))) 3094 return; 3095 /* 3096 * No need for a memory barrier here, as the update 3097 * of the tail_page did it for this page. 3098 */ 3099 local_set(&cpu_buffer->commit_page->page->commit, 3100 rb_page_write(cpu_buffer->commit_page)); 3101 rb_inc_page(&cpu_buffer->commit_page); 3102 /* add barrier to keep gcc from optimizing too much */ 3103 barrier(); 3104 } 3105 while (rb_commit_index(cpu_buffer) != 3106 rb_page_write(cpu_buffer->commit_page)) { 3107 3108 /* Make sure the readers see the content of what is committed. */ 3109 smp_wmb(); 3110 local_set(&cpu_buffer->commit_page->page->commit, 3111 rb_page_write(cpu_buffer->commit_page)); 3112 RB_WARN_ON(cpu_buffer, 3113 local_read(&cpu_buffer->commit_page->page->commit) & 3114 ~RB_WRITE_MASK); 3115 barrier(); 3116 } 3117 3118 /* again, keep gcc from optimizing */ 3119 barrier(); 3120 3121 /* 3122 * If an interrupt came in just after the first while loop 3123 * and pushed the tail page forward, we will be left with 3124 * a dangling commit that will never go forward. 3125 */ 3126 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) 3127 goto again; 3128 } 3129 3130 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) 3131 { 3132 unsigned long commits; 3133 3134 if (RB_WARN_ON(cpu_buffer, 3135 !local_read(&cpu_buffer->committing))) 3136 return; 3137 3138 again: 3139 commits = local_read(&cpu_buffer->commits); 3140 /* synchronize with interrupts */ 3141 barrier(); 3142 if (local_read(&cpu_buffer->committing) == 1) 3143 rb_set_commit_to_write(cpu_buffer); 3144 3145 local_dec(&cpu_buffer->committing); 3146 3147 /* synchronize with interrupts */ 3148 barrier(); 3149 3150 /* 3151 * Need to account for interrupts coming in between the 3152 * updating of the commit page and the clearing of the 3153 * committing counter. 3154 */ 3155 if (unlikely(local_read(&cpu_buffer->commits) != commits) && 3156 !local_read(&cpu_buffer->committing)) { 3157 local_inc(&cpu_buffer->committing); 3158 goto again; 3159 } 3160 } 3161 3162 static inline void rb_event_discard(struct ring_buffer_event *event) 3163 { 3164 if (extended_time(event)) 3165 event = skip_time_extend(event); 3166 3167 /* array[0] holds the actual length for the discarded event */ 3168 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; 3169 event->type_len = RINGBUF_TYPE_PADDING; 3170 /* time delta must be non zero */ 3171 if (!event->time_delta) 3172 event->time_delta = 1; 3173 } 3174 3175 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer) 3176 { 3177 local_inc(&cpu_buffer->entries); 3178 rb_end_commit(cpu_buffer); 3179 } 3180 3181 static __always_inline void 3182 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) 3183 { 3184 if (buffer->irq_work.waiters_pending) { 3185 buffer->irq_work.waiters_pending = false; 3186 /* irq_work_queue() supplies it's own memory barriers */ 3187 irq_work_queue(&buffer->irq_work.work); 3188 } 3189 3190 if (cpu_buffer->irq_work.waiters_pending) { 3191 cpu_buffer->irq_work.waiters_pending = false; 3192 /* irq_work_queue() supplies it's own memory barriers */ 3193 irq_work_queue(&cpu_buffer->irq_work.work); 3194 } 3195 3196 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched)) 3197 return; 3198 3199 if (cpu_buffer->reader_page == cpu_buffer->commit_page) 3200 return; 3201 3202 if (!cpu_buffer->irq_work.full_waiters_pending) 3203 return; 3204 3205 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); 3206 3207 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) 3208 return; 3209 3210 cpu_buffer->irq_work.wakeup_full = true; 3211 cpu_buffer->irq_work.full_waiters_pending = false; 3212 /* irq_work_queue() supplies it's own memory barriers */ 3213 irq_work_queue(&cpu_buffer->irq_work.work); 3214 } 3215 3216 #ifdef CONFIG_RING_BUFFER_RECORD_RECURSION 3217 # define do_ring_buffer_record_recursion() \ 3218 do_ftrace_record_recursion(_THIS_IP_, _RET_IP_) 3219 #else 3220 # define do_ring_buffer_record_recursion() do { } while (0) 3221 #endif 3222 3223 /* 3224 * The lock and unlock are done within a preempt disable section. 3225 * The current_context per_cpu variable can only be modified 3226 * by the current task between lock and unlock. But it can 3227 * be modified more than once via an interrupt. To pass this 3228 * information from the lock to the unlock without having to 3229 * access the 'in_interrupt()' functions again (which do show 3230 * a bit of overhead in something as critical as function tracing, 3231 * we use a bitmask trick. 3232 * 3233 * bit 1 = NMI context 3234 * bit 2 = IRQ context 3235 * bit 3 = SoftIRQ context 3236 * bit 4 = normal context. 3237 * 3238 * This works because this is the order of contexts that can 3239 * preempt other contexts. A SoftIRQ never preempts an IRQ 3240 * context. 3241 * 3242 * When the context is determined, the corresponding bit is 3243 * checked and set (if it was set, then a recursion of that context 3244 * happened). 3245 * 3246 * On unlock, we need to clear this bit. To do so, just subtract 3247 * 1 from the current_context and AND it to itself. 3248 * 3249 * (binary) 3250 * 101 - 1 = 100 3251 * 101 & 100 = 100 (clearing bit zero) 3252 * 3253 * 1010 - 1 = 1001 3254 * 1010 & 1001 = 1000 (clearing bit 1) 3255 * 3256 * The least significant bit can be cleared this way, and it 3257 * just so happens that it is the same bit corresponding to 3258 * the current context. 3259 * 3260 * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit 3261 * is set when a recursion is detected at the current context, and if 3262 * the TRANSITION bit is already set, it will fail the recursion. 3263 * This is needed because there's a lag between the changing of 3264 * interrupt context and updating the preempt count. In this case, 3265 * a false positive will be found. To handle this, one extra recursion 3266 * is allowed, and this is done by the TRANSITION bit. If the TRANSITION 3267 * bit is already set, then it is considered a recursion and the function 3268 * ends. Otherwise, the TRANSITION bit is set, and that bit is returned. 3269 * 3270 * On the trace_recursive_unlock(), the TRANSITION bit will be the first 3271 * to be cleared. Even if it wasn't the context that set it. That is, 3272 * if an interrupt comes in while NORMAL bit is set and the ring buffer 3273 * is called before preempt_count() is updated, since the check will 3274 * be on the NORMAL bit, the TRANSITION bit will then be set. If an 3275 * NMI then comes in, it will set the NMI bit, but when the NMI code 3276 * does the trace_recursive_unlock() it will clear the TRANSITION bit 3277 * and leave the NMI bit set. But this is fine, because the interrupt 3278 * code that set the TRANSITION bit will then clear the NMI bit when it 3279 * calls trace_recursive_unlock(). If another NMI comes in, it will 3280 * set the TRANSITION bit and continue. 3281 * 3282 * Note: The TRANSITION bit only handles a single transition between context. 3283 */ 3284 3285 static __always_inline bool 3286 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) 3287 { 3288 unsigned int val = cpu_buffer->current_context; 3289 int bit = interrupt_context_level(); 3290 3291 bit = RB_CTX_NORMAL - bit; 3292 3293 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { 3294 /* 3295 * It is possible that this was called by transitioning 3296 * between interrupt context, and preempt_count() has not 3297 * been updated yet. In this case, use the TRANSITION bit. 3298 */ 3299 bit = RB_CTX_TRANSITION; 3300 if (val & (1 << (bit + cpu_buffer->nest))) { 3301 do_ring_buffer_record_recursion(); 3302 return true; 3303 } 3304 } 3305 3306 val |= (1 << (bit + cpu_buffer->nest)); 3307 cpu_buffer->current_context = val; 3308 3309 return false; 3310 } 3311 3312 static __always_inline void 3313 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) 3314 { 3315 cpu_buffer->current_context &= 3316 cpu_buffer->current_context - (1 << cpu_buffer->nest); 3317 } 3318 3319 /* The recursive locking above uses 5 bits */ 3320 #define NESTED_BITS 5 3321 3322 /** 3323 * ring_buffer_nest_start - Allow to trace while nested 3324 * @buffer: The ring buffer to modify 3325 * 3326 * The ring buffer has a safety mechanism to prevent recursion. 3327 * But there may be a case where a trace needs to be done while 3328 * tracing something else. In this case, calling this function 3329 * will allow this function to nest within a currently active 3330 * ring_buffer_lock_reserve(). 3331 * 3332 * Call this function before calling another ring_buffer_lock_reserve() and 3333 * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit(). 3334 */ 3335 void ring_buffer_nest_start(struct trace_buffer *buffer) 3336 { 3337 struct ring_buffer_per_cpu *cpu_buffer; 3338 int cpu; 3339 3340 /* Enabled by ring_buffer_nest_end() */ 3341 preempt_disable_notrace(); 3342 cpu = raw_smp_processor_id(); 3343 cpu_buffer = buffer->buffers[cpu]; 3344 /* This is the shift value for the above recursive locking */ 3345 cpu_buffer->nest += NESTED_BITS; 3346 } 3347 3348 /** 3349 * ring_buffer_nest_end - Allow to trace while nested 3350 * @buffer: The ring buffer to modify 3351 * 3352 * Must be called after ring_buffer_nest_start() and after the 3353 * ring_buffer_unlock_commit(). 3354 */ 3355 void ring_buffer_nest_end(struct trace_buffer *buffer) 3356 { 3357 struct ring_buffer_per_cpu *cpu_buffer; 3358 int cpu; 3359 3360 /* disabled by ring_buffer_nest_start() */ 3361 cpu = raw_smp_processor_id(); 3362 cpu_buffer = buffer->buffers[cpu]; 3363 /* This is the shift value for the above recursive locking */ 3364 cpu_buffer->nest -= NESTED_BITS; 3365 preempt_enable_notrace(); 3366 } 3367 3368 /** 3369 * ring_buffer_unlock_commit - commit a reserved 3370 * @buffer: The buffer to commit to 3371 * 3372 * This commits the data to the ring buffer, and releases any locks held. 3373 * 3374 * Must be paired with ring_buffer_lock_reserve. 3375 */ 3376 int ring_buffer_unlock_commit(struct trace_buffer *buffer) 3377 { 3378 struct ring_buffer_per_cpu *cpu_buffer; 3379 int cpu = raw_smp_processor_id(); 3380 3381 cpu_buffer = buffer->buffers[cpu]; 3382 3383 rb_commit(cpu_buffer); 3384 3385 rb_wakeups(buffer, cpu_buffer); 3386 3387 trace_recursive_unlock(cpu_buffer); 3388 3389 preempt_enable_notrace(); 3390 3391 return 0; 3392 } 3393 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); 3394 3395 /* Special value to validate all deltas on a page. */ 3396 #define CHECK_FULL_PAGE 1L 3397 3398 #ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS 3399 static void dump_buffer_page(struct buffer_data_page *bpage, 3400 struct rb_event_info *info, 3401 unsigned long tail) 3402 { 3403 struct ring_buffer_event *event; 3404 u64 ts, delta; 3405 int e; 3406 3407 ts = bpage->time_stamp; 3408 pr_warn(" [%lld] PAGE TIME STAMP\n", ts); 3409 3410 for (e = 0; e < tail; e += rb_event_length(event)) { 3411 3412 event = (struct ring_buffer_event *)(bpage->data + e); 3413 3414 switch (event->type_len) { 3415 3416 case RINGBUF_TYPE_TIME_EXTEND: 3417 delta = rb_event_time_stamp(event); 3418 ts += delta; 3419 pr_warn(" [%lld] delta:%lld TIME EXTEND\n", ts, delta); 3420 break; 3421 3422 case RINGBUF_TYPE_TIME_STAMP: 3423 delta = rb_event_time_stamp(event); 3424 ts = rb_fix_abs_ts(delta, ts); 3425 pr_warn(" [%lld] absolute:%lld TIME STAMP\n", ts, delta); 3426 break; 3427 3428 case RINGBUF_TYPE_PADDING: 3429 ts += event->time_delta; 3430 pr_warn(" [%lld] delta:%d PADDING\n", ts, event->time_delta); 3431 break; 3432 3433 case RINGBUF_TYPE_DATA: 3434 ts += event->time_delta; 3435 pr_warn(" [%lld] delta:%d\n", ts, event->time_delta); 3436 break; 3437 3438 default: 3439 break; 3440 } 3441 } 3442 } 3443 3444 static DEFINE_PER_CPU(atomic_t, checking); 3445 static atomic_t ts_dump; 3446 3447 /* 3448 * Check if the current event time stamp matches the deltas on 3449 * the buffer page. 3450 */ 3451 static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, 3452 struct rb_event_info *info, 3453 unsigned long tail) 3454 { 3455 struct ring_buffer_event *event; 3456 struct buffer_data_page *bpage; 3457 u64 ts, delta; 3458 bool full = false; 3459 int e; 3460 3461 bpage = info->tail_page->page; 3462 3463 if (tail == CHECK_FULL_PAGE) { 3464 full = true; 3465 tail = local_read(&bpage->commit); 3466 } else if (info->add_timestamp & 3467 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) { 3468 /* Ignore events with absolute time stamps */ 3469 return; 3470 } 3471 3472 /* 3473 * Do not check the first event (skip possible extends too). 3474 * Also do not check if previous events have not been committed. 3475 */ 3476 if (tail <= 8 || tail > local_read(&bpage->commit)) 3477 return; 3478 3479 /* 3480 * If this interrupted another event, 3481 */ 3482 if (atomic_inc_return(this_cpu_ptr(&checking)) != 1) 3483 goto out; 3484 3485 ts = bpage->time_stamp; 3486 3487 for (e = 0; e < tail; e += rb_event_length(event)) { 3488 3489 event = (struct ring_buffer_event *)(bpage->data + e); 3490 3491 switch (event->type_len) { 3492 3493 case RINGBUF_TYPE_TIME_EXTEND: 3494 delta = rb_event_time_stamp(event); 3495 ts += delta; 3496 break; 3497 3498 case RINGBUF_TYPE_TIME_STAMP: 3499 delta = rb_event_time_stamp(event); 3500 ts = rb_fix_abs_ts(delta, ts); 3501 break; 3502 3503 case RINGBUF_TYPE_PADDING: 3504 if (event->time_delta == 1) 3505 break; 3506 fallthrough; 3507 case RINGBUF_TYPE_DATA: 3508 ts += event->time_delta; 3509 break; 3510 3511 default: 3512 RB_WARN_ON(cpu_buffer, 1); 3513 } 3514 } 3515 if ((full && ts > info->ts) || 3516 (!full && ts + info->delta != info->ts)) { 3517 /* If another report is happening, ignore this one */ 3518 if (atomic_inc_return(&ts_dump) != 1) { 3519 atomic_dec(&ts_dump); 3520 goto out; 3521 } 3522 atomic_inc(&cpu_buffer->record_disabled); 3523 /* There's some cases in boot up that this can happen */ 3524 WARN_ON_ONCE(system_state != SYSTEM_BOOTING); 3525 pr_warn("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s\n", 3526 cpu_buffer->cpu, 3527 ts + info->delta, info->ts, info->delta, 3528 info->before, info->after, 3529 full ? " (full)" : ""); 3530 dump_buffer_page(bpage, info, tail); 3531 atomic_dec(&ts_dump); 3532 /* Do not re-enable checking */ 3533 return; 3534 } 3535 out: 3536 atomic_dec(this_cpu_ptr(&checking)); 3537 } 3538 #else 3539 static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, 3540 struct rb_event_info *info, 3541 unsigned long tail) 3542 { 3543 } 3544 #endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */ 3545 3546 static struct ring_buffer_event * 3547 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 3548 struct rb_event_info *info) 3549 { 3550 struct ring_buffer_event *event; 3551 struct buffer_page *tail_page; 3552 unsigned long tail, write, w; 3553 bool a_ok; 3554 bool b_ok; 3555 3556 /* Don't let the compiler play games with cpu_buffer->tail_page */ 3557 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); 3558 3559 /*A*/ w = local_read(&tail_page->write) & RB_WRITE_MASK; 3560 barrier(); 3561 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); 3562 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3563 barrier(); 3564 info->ts = rb_time_stamp(cpu_buffer->buffer); 3565 3566 if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) { 3567 info->delta = info->ts; 3568 } else { 3569 /* 3570 * If interrupting an event time update, we may need an 3571 * absolute timestamp. 3572 * Don't bother if this is the start of a new page (w == 0). 3573 */ 3574 if (unlikely(!a_ok || !b_ok || (info->before != info->after && w))) { 3575 info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND; 3576 info->length += RB_LEN_TIME_EXTEND; 3577 } else { 3578 info->delta = info->ts - info->after; 3579 if (unlikely(test_time_stamp(info->delta))) { 3580 info->add_timestamp |= RB_ADD_STAMP_EXTEND; 3581 info->length += RB_LEN_TIME_EXTEND; 3582 } 3583 } 3584 } 3585 3586 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts); 3587 3588 /*C*/ write = local_add_return(info->length, &tail_page->write); 3589 3590 /* set write to only the index of the write */ 3591 write &= RB_WRITE_MASK; 3592 3593 tail = write - info->length; 3594 3595 /* See if we shot pass the end of this buffer page */ 3596 if (unlikely(write > BUF_PAGE_SIZE)) { 3597 /* before and after may now different, fix it up*/ 3598 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); 3599 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3600 if (a_ok && b_ok && info->before != info->after) 3601 (void)rb_time_cmpxchg(&cpu_buffer->before_stamp, 3602 info->before, info->after); 3603 if (a_ok && b_ok) 3604 check_buffer(cpu_buffer, info, CHECK_FULL_PAGE); 3605 return rb_move_tail(cpu_buffer, tail, info); 3606 } 3607 3608 if (likely(tail == w)) { 3609 u64 save_before; 3610 bool s_ok; 3611 3612 /* Nothing interrupted us between A and C */ 3613 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts); 3614 barrier(); 3615 /*E*/ s_ok = rb_time_read(&cpu_buffer->before_stamp, &save_before); 3616 RB_WARN_ON(cpu_buffer, !s_ok); 3617 if (likely(!(info->add_timestamp & 3618 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)))) 3619 /* This did not interrupt any time update */ 3620 info->delta = info->ts - info->after; 3621 else 3622 /* Just use full timestamp for interrupting event */ 3623 info->delta = info->ts; 3624 barrier(); 3625 check_buffer(cpu_buffer, info, tail); 3626 if (unlikely(info->ts != save_before)) { 3627 /* SLOW PATH - Interrupted between C and E */ 3628 3629 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3630 RB_WARN_ON(cpu_buffer, !a_ok); 3631 3632 /* Write stamp must only go forward */ 3633 if (save_before > info->after) { 3634 /* 3635 * We do not care about the result, only that 3636 * it gets updated atomically. 3637 */ 3638 (void)rb_time_cmpxchg(&cpu_buffer->write_stamp, 3639 info->after, save_before); 3640 } 3641 } 3642 } else { 3643 u64 ts; 3644 /* SLOW PATH - Interrupted between A and C */ 3645 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3646 /* Was interrupted before here, write_stamp must be valid */ 3647 RB_WARN_ON(cpu_buffer, !a_ok); 3648 ts = rb_time_stamp(cpu_buffer->buffer); 3649 barrier(); 3650 /*E*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) && 3651 info->after < ts && 3652 rb_time_cmpxchg(&cpu_buffer->write_stamp, 3653 info->after, ts)) { 3654 /* Nothing came after this event between C and E */ 3655 info->delta = ts - info->after; 3656 } else { 3657 /* 3658 * Interrupted between C and E: 3659 * Lost the previous events time stamp. Just set the 3660 * delta to zero, and this will be the same time as 3661 * the event this event interrupted. And the events that 3662 * came after this will still be correct (as they would 3663 * have built their delta on the previous event. 3664 */ 3665 info->delta = 0; 3666 } 3667 info->ts = ts; 3668 info->add_timestamp &= ~RB_ADD_STAMP_FORCE; 3669 } 3670 3671 /* 3672 * If this is the first commit on the page, then it has the same 3673 * timestamp as the page itself. 3674 */ 3675 if (unlikely(!tail && !(info->add_timestamp & 3676 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)))) 3677 info->delta = 0; 3678 3679 /* We reserved something on the buffer */ 3680 3681 event = __rb_page_index(tail_page, tail); 3682 rb_update_event(cpu_buffer, event, info); 3683 3684 local_inc(&tail_page->entries); 3685 3686 /* 3687 * If this is the first commit on the page, then update 3688 * its timestamp. 3689 */ 3690 if (unlikely(!tail)) 3691 tail_page->page->time_stamp = info->ts; 3692 3693 /* account for these added bytes */ 3694 local_add(info->length, &cpu_buffer->entries_bytes); 3695 3696 return event; 3697 } 3698 3699 static __always_inline struct ring_buffer_event * 3700 rb_reserve_next_event(struct trace_buffer *buffer, 3701 struct ring_buffer_per_cpu *cpu_buffer, 3702 unsigned long length) 3703 { 3704 struct ring_buffer_event *event; 3705 struct rb_event_info info; 3706 int nr_loops = 0; 3707 int add_ts_default; 3708 3709 rb_start_commit(cpu_buffer); 3710 /* The commit page can not change after this */ 3711 3712 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 3713 /* 3714 * Due to the ability to swap a cpu buffer from a buffer 3715 * it is possible it was swapped before we committed. 3716 * (committing stops a swap). We check for it here and 3717 * if it happened, we have to fail the write. 3718 */ 3719 barrier(); 3720 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { 3721 local_dec(&cpu_buffer->committing); 3722 local_dec(&cpu_buffer->commits); 3723 return NULL; 3724 } 3725 #endif 3726 3727 info.length = rb_calculate_event_length(length); 3728 3729 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { 3730 add_ts_default = RB_ADD_STAMP_ABSOLUTE; 3731 info.length += RB_LEN_TIME_EXTEND; 3732 } else { 3733 add_ts_default = RB_ADD_STAMP_NONE; 3734 } 3735 3736 again: 3737 info.add_timestamp = add_ts_default; 3738 info.delta = 0; 3739 3740 /* 3741 * We allow for interrupts to reenter here and do a trace. 3742 * If one does, it will cause this original code to loop 3743 * back here. Even with heavy interrupts happening, this 3744 * should only happen a few times in a row. If this happens 3745 * 1000 times in a row, there must be either an interrupt 3746 * storm or we have something buggy. 3747 * Bail! 3748 */ 3749 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) 3750 goto out_fail; 3751 3752 event = __rb_reserve_next(cpu_buffer, &info); 3753 3754 if (unlikely(PTR_ERR(event) == -EAGAIN)) { 3755 if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND)) 3756 info.length -= RB_LEN_TIME_EXTEND; 3757 goto again; 3758 } 3759 3760 if (likely(event)) 3761 return event; 3762 out_fail: 3763 rb_end_commit(cpu_buffer); 3764 return NULL; 3765 } 3766 3767 /** 3768 * ring_buffer_lock_reserve - reserve a part of the buffer 3769 * @buffer: the ring buffer to reserve from 3770 * @length: the length of the data to reserve (excluding event header) 3771 * 3772 * Returns a reserved event on the ring buffer to copy directly to. 3773 * The user of this interface will need to get the body to write into 3774 * and can use the ring_buffer_event_data() interface. 3775 * 3776 * The length is the length of the data needed, not the event length 3777 * which also includes the event header. 3778 * 3779 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned. 3780 * If NULL is returned, then nothing has been allocated or locked. 3781 */ 3782 struct ring_buffer_event * 3783 ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length) 3784 { 3785 struct ring_buffer_per_cpu *cpu_buffer; 3786 struct ring_buffer_event *event; 3787 int cpu; 3788 3789 /* If we are tracing schedule, we don't want to recurse */ 3790 preempt_disable_notrace(); 3791 3792 if (unlikely(atomic_read(&buffer->record_disabled))) 3793 goto out; 3794 3795 cpu = raw_smp_processor_id(); 3796 3797 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) 3798 goto out; 3799 3800 cpu_buffer = buffer->buffers[cpu]; 3801 3802 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) 3803 goto out; 3804 3805 if (unlikely(length > BUF_MAX_DATA_SIZE)) 3806 goto out; 3807 3808 if (unlikely(trace_recursive_lock(cpu_buffer))) 3809 goto out; 3810 3811 event = rb_reserve_next_event(buffer, cpu_buffer, length); 3812 if (!event) 3813 goto out_unlock; 3814 3815 return event; 3816 3817 out_unlock: 3818 trace_recursive_unlock(cpu_buffer); 3819 out: 3820 preempt_enable_notrace(); 3821 return NULL; 3822 } 3823 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); 3824 3825 /* 3826 * Decrement the entries to the page that an event is on. 3827 * The event does not even need to exist, only the pointer 3828 * to the page it is on. This may only be called before the commit 3829 * takes place. 3830 */ 3831 static inline void 3832 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, 3833 struct ring_buffer_event *event) 3834 { 3835 unsigned long addr = (unsigned long)event; 3836 struct buffer_page *bpage = cpu_buffer->commit_page; 3837 struct buffer_page *start; 3838 3839 addr &= PAGE_MASK; 3840 3841 /* Do the likely case first */ 3842 if (likely(bpage->page == (void *)addr)) { 3843 local_dec(&bpage->entries); 3844 return; 3845 } 3846 3847 /* 3848 * Because the commit page may be on the reader page we 3849 * start with the next page and check the end loop there. 3850 */ 3851 rb_inc_page(&bpage); 3852 start = bpage; 3853 do { 3854 if (bpage->page == (void *)addr) { 3855 local_dec(&bpage->entries); 3856 return; 3857 } 3858 rb_inc_page(&bpage); 3859 } while (bpage != start); 3860 3861 /* commit not part of this buffer?? */ 3862 RB_WARN_ON(cpu_buffer, 1); 3863 } 3864 3865 /** 3866 * ring_buffer_discard_commit - discard an event that has not been committed 3867 * @buffer: the ring buffer 3868 * @event: non committed event to discard 3869 * 3870 * Sometimes an event that is in the ring buffer needs to be ignored. 3871 * This function lets the user discard an event in the ring buffer 3872 * and then that event will not be read later. 3873 * 3874 * This function only works if it is called before the item has been 3875 * committed. It will try to free the event from the ring buffer 3876 * if another event has not been added behind it. 3877 * 3878 * If another event has been added behind it, it will set the event 3879 * up as discarded, and perform the commit. 3880 * 3881 * If this function is called, do not call ring_buffer_unlock_commit on 3882 * the event. 3883 */ 3884 void ring_buffer_discard_commit(struct trace_buffer *buffer, 3885 struct ring_buffer_event *event) 3886 { 3887 struct ring_buffer_per_cpu *cpu_buffer; 3888 int cpu; 3889 3890 /* The event is discarded regardless */ 3891 rb_event_discard(event); 3892 3893 cpu = smp_processor_id(); 3894 cpu_buffer = buffer->buffers[cpu]; 3895 3896 /* 3897 * This must only be called if the event has not been 3898 * committed yet. Thus we can assume that preemption 3899 * is still disabled. 3900 */ 3901 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); 3902 3903 rb_decrement_entry(cpu_buffer, event); 3904 if (rb_try_to_discard(cpu_buffer, event)) 3905 goto out; 3906 3907 out: 3908 rb_end_commit(cpu_buffer); 3909 3910 trace_recursive_unlock(cpu_buffer); 3911 3912 preempt_enable_notrace(); 3913 3914 } 3915 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); 3916 3917 /** 3918 * ring_buffer_write - write data to the buffer without reserving 3919 * @buffer: The ring buffer to write to. 3920 * @length: The length of the data being written (excluding the event header) 3921 * @data: The data to write to the buffer. 3922 * 3923 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as 3924 * one function. If you already have the data to write to the buffer, it 3925 * may be easier to simply call this function. 3926 * 3927 * Note, like ring_buffer_lock_reserve, the length is the length of the data 3928 * and not the length of the event which would hold the header. 3929 */ 3930 int ring_buffer_write(struct trace_buffer *buffer, 3931 unsigned long length, 3932 void *data) 3933 { 3934 struct ring_buffer_per_cpu *cpu_buffer; 3935 struct ring_buffer_event *event; 3936 void *body; 3937 int ret = -EBUSY; 3938 int cpu; 3939 3940 preempt_disable_notrace(); 3941 3942 if (atomic_read(&buffer->record_disabled)) 3943 goto out; 3944 3945 cpu = raw_smp_processor_id(); 3946 3947 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3948 goto out; 3949 3950 cpu_buffer = buffer->buffers[cpu]; 3951 3952 if (atomic_read(&cpu_buffer->record_disabled)) 3953 goto out; 3954 3955 if (length > BUF_MAX_DATA_SIZE) 3956 goto out; 3957 3958 if (unlikely(trace_recursive_lock(cpu_buffer))) 3959 goto out; 3960 3961 event = rb_reserve_next_event(buffer, cpu_buffer, length); 3962 if (!event) 3963 goto out_unlock; 3964 3965 body = rb_event_data(event); 3966 3967 memcpy(body, data, length); 3968 3969 rb_commit(cpu_buffer); 3970 3971 rb_wakeups(buffer, cpu_buffer); 3972 3973 ret = 0; 3974 3975 out_unlock: 3976 trace_recursive_unlock(cpu_buffer); 3977 3978 out: 3979 preempt_enable_notrace(); 3980 3981 return ret; 3982 } 3983 EXPORT_SYMBOL_GPL(ring_buffer_write); 3984 3985 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 3986 { 3987 struct buffer_page *reader = cpu_buffer->reader_page; 3988 struct buffer_page *head = rb_set_head_page(cpu_buffer); 3989 struct buffer_page *commit = cpu_buffer->commit_page; 3990 3991 /* In case of error, head will be NULL */ 3992 if (unlikely(!head)) 3993 return true; 3994 3995 /* Reader should exhaust content in reader page */ 3996 if (reader->read != rb_page_commit(reader)) 3997 return false; 3998 3999 /* 4000 * If writers are committing on the reader page, knowing all 4001 * committed content has been read, the ring buffer is empty. 4002 */ 4003 if (commit == reader) 4004 return true; 4005 4006 /* 4007 * If writers are committing on a page other than reader page 4008 * and head page, there should always be content to read. 4009 */ 4010 if (commit != head) 4011 return false; 4012 4013 /* 4014 * Writers are committing on the head page, we just need 4015 * to care about there're committed data, and the reader will 4016 * swap reader page with head page when it is to read data. 4017 */ 4018 return rb_page_commit(commit) == 0; 4019 } 4020 4021 /** 4022 * ring_buffer_record_disable - stop all writes into the buffer 4023 * @buffer: The ring buffer to stop writes to. 4024 * 4025 * This prevents all writes to the buffer. Any attempt to write 4026 * to the buffer after this will fail and return NULL. 4027 * 4028 * The caller should call synchronize_rcu() after this. 4029 */ 4030 void ring_buffer_record_disable(struct trace_buffer *buffer) 4031 { 4032 atomic_inc(&buffer->record_disabled); 4033 } 4034 EXPORT_SYMBOL_GPL(ring_buffer_record_disable); 4035 4036 /** 4037 * ring_buffer_record_enable - enable writes to the buffer 4038 * @buffer: The ring buffer to enable writes 4039 * 4040 * Note, multiple disables will need the same number of enables 4041 * to truly enable the writing (much like preempt_disable). 4042 */ 4043 void ring_buffer_record_enable(struct trace_buffer *buffer) 4044 { 4045 atomic_dec(&buffer->record_disabled); 4046 } 4047 EXPORT_SYMBOL_GPL(ring_buffer_record_enable); 4048 4049 /** 4050 * ring_buffer_record_off - stop all writes into the buffer 4051 * @buffer: The ring buffer to stop writes to. 4052 * 4053 * This prevents all writes to the buffer. Any attempt to write 4054 * to the buffer after this will fail and return NULL. 4055 * 4056 * This is different than ring_buffer_record_disable() as 4057 * it works like an on/off switch, where as the disable() version 4058 * must be paired with a enable(). 4059 */ 4060 void ring_buffer_record_off(struct trace_buffer *buffer) 4061 { 4062 unsigned int rd; 4063 unsigned int new_rd; 4064 4065 rd = atomic_read(&buffer->record_disabled); 4066 do { 4067 new_rd = rd | RB_BUFFER_OFF; 4068 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd)); 4069 } 4070 EXPORT_SYMBOL_GPL(ring_buffer_record_off); 4071 4072 /** 4073 * ring_buffer_record_on - restart writes into the buffer 4074 * @buffer: The ring buffer to start writes to. 4075 * 4076 * This enables all writes to the buffer that was disabled by 4077 * ring_buffer_record_off(). 4078 * 4079 * This is different than ring_buffer_record_enable() as 4080 * it works like an on/off switch, where as the enable() version 4081 * must be paired with a disable(). 4082 */ 4083 void ring_buffer_record_on(struct trace_buffer *buffer) 4084 { 4085 unsigned int rd; 4086 unsigned int new_rd; 4087 4088 rd = atomic_read(&buffer->record_disabled); 4089 do { 4090 new_rd = rd & ~RB_BUFFER_OFF; 4091 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd)); 4092 } 4093 EXPORT_SYMBOL_GPL(ring_buffer_record_on); 4094 4095 /** 4096 * ring_buffer_record_is_on - return true if the ring buffer can write 4097 * @buffer: The ring buffer to see if write is enabled 4098 * 4099 * Returns true if the ring buffer is in a state that it accepts writes. 4100 */ 4101 bool ring_buffer_record_is_on(struct trace_buffer *buffer) 4102 { 4103 return !atomic_read(&buffer->record_disabled); 4104 } 4105 4106 /** 4107 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable 4108 * @buffer: The ring buffer to see if write is set enabled 4109 * 4110 * Returns true if the ring buffer is set writable by ring_buffer_record_on(). 4111 * Note that this does NOT mean it is in a writable state. 4112 * 4113 * It may return true when the ring buffer has been disabled by 4114 * ring_buffer_record_disable(), as that is a temporary disabling of 4115 * the ring buffer. 4116 */ 4117 bool ring_buffer_record_is_set_on(struct trace_buffer *buffer) 4118 { 4119 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); 4120 } 4121 4122 /** 4123 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 4124 * @buffer: The ring buffer to stop writes to. 4125 * @cpu: The CPU buffer to stop 4126 * 4127 * This prevents all writes to the buffer. Any attempt to write 4128 * to the buffer after this will fail and return NULL. 4129 * 4130 * The caller should call synchronize_rcu() after this. 4131 */ 4132 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu) 4133 { 4134 struct ring_buffer_per_cpu *cpu_buffer; 4135 4136 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4137 return; 4138 4139 cpu_buffer = buffer->buffers[cpu]; 4140 atomic_inc(&cpu_buffer->record_disabled); 4141 } 4142 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); 4143 4144 /** 4145 * ring_buffer_record_enable_cpu - enable writes to the buffer 4146 * @buffer: The ring buffer to enable writes 4147 * @cpu: The CPU to enable. 4148 * 4149 * Note, multiple disables will need the same number of enables 4150 * to truly enable the writing (much like preempt_disable). 4151 */ 4152 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu) 4153 { 4154 struct ring_buffer_per_cpu *cpu_buffer; 4155 4156 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4157 return; 4158 4159 cpu_buffer = buffer->buffers[cpu]; 4160 atomic_dec(&cpu_buffer->record_disabled); 4161 } 4162 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); 4163 4164 /* 4165 * The total entries in the ring buffer is the running counter 4166 * of entries entered into the ring buffer, minus the sum of 4167 * the entries read from the ring buffer and the number of 4168 * entries that were overwritten. 4169 */ 4170 static inline unsigned long 4171 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) 4172 { 4173 return local_read(&cpu_buffer->entries) - 4174 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); 4175 } 4176 4177 /** 4178 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer 4179 * @buffer: The ring buffer 4180 * @cpu: The per CPU buffer to read from. 4181 */ 4182 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu) 4183 { 4184 unsigned long flags; 4185 struct ring_buffer_per_cpu *cpu_buffer; 4186 struct buffer_page *bpage; 4187 u64 ret = 0; 4188 4189 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4190 return 0; 4191 4192 cpu_buffer = buffer->buffers[cpu]; 4193 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4194 /* 4195 * if the tail is on reader_page, oldest time stamp is on the reader 4196 * page 4197 */ 4198 if (cpu_buffer->tail_page == cpu_buffer->reader_page) 4199 bpage = cpu_buffer->reader_page; 4200 else 4201 bpage = rb_set_head_page(cpu_buffer); 4202 if (bpage) 4203 ret = bpage->page->time_stamp; 4204 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4205 4206 return ret; 4207 } 4208 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts); 4209 4210 /** 4211 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer 4212 * @buffer: The ring buffer 4213 * @cpu: The per CPU buffer to read from. 4214 */ 4215 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu) 4216 { 4217 struct ring_buffer_per_cpu *cpu_buffer; 4218 unsigned long ret; 4219 4220 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4221 return 0; 4222 4223 cpu_buffer = buffer->buffers[cpu]; 4224 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; 4225 4226 return ret; 4227 } 4228 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu); 4229 4230 /** 4231 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer 4232 * @buffer: The ring buffer 4233 * @cpu: The per CPU buffer to get the entries from. 4234 */ 4235 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu) 4236 { 4237 struct ring_buffer_per_cpu *cpu_buffer; 4238 4239 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4240 return 0; 4241 4242 cpu_buffer = buffer->buffers[cpu]; 4243 4244 return rb_num_of_entries(cpu_buffer); 4245 } 4246 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); 4247 4248 /** 4249 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring 4250 * buffer wrapping around (only if RB_FL_OVERWRITE is on). 4251 * @buffer: The ring buffer 4252 * @cpu: The per CPU buffer to get the number of overruns from 4253 */ 4254 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu) 4255 { 4256 struct ring_buffer_per_cpu *cpu_buffer; 4257 unsigned long ret; 4258 4259 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4260 return 0; 4261 4262 cpu_buffer = buffer->buffers[cpu]; 4263 ret = local_read(&cpu_buffer->overrun); 4264 4265 return ret; 4266 } 4267 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); 4268 4269 /** 4270 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by 4271 * commits failing due to the buffer wrapping around while there are uncommitted 4272 * events, such as during an interrupt storm. 4273 * @buffer: The ring buffer 4274 * @cpu: The per CPU buffer to get the number of overruns from 4275 */ 4276 unsigned long 4277 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu) 4278 { 4279 struct ring_buffer_per_cpu *cpu_buffer; 4280 unsigned long ret; 4281 4282 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4283 return 0; 4284 4285 cpu_buffer = buffer->buffers[cpu]; 4286 ret = local_read(&cpu_buffer->commit_overrun); 4287 4288 return ret; 4289 } 4290 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); 4291 4292 /** 4293 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by 4294 * the ring buffer filling up (only if RB_FL_OVERWRITE is off). 4295 * @buffer: The ring buffer 4296 * @cpu: The per CPU buffer to get the number of overruns from 4297 */ 4298 unsigned long 4299 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu) 4300 { 4301 struct ring_buffer_per_cpu *cpu_buffer; 4302 unsigned long ret; 4303 4304 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4305 return 0; 4306 4307 cpu_buffer = buffer->buffers[cpu]; 4308 ret = local_read(&cpu_buffer->dropped_events); 4309 4310 return ret; 4311 } 4312 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu); 4313 4314 /** 4315 * ring_buffer_read_events_cpu - get the number of events successfully read 4316 * @buffer: The ring buffer 4317 * @cpu: The per CPU buffer to get the number of events read 4318 */ 4319 unsigned long 4320 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu) 4321 { 4322 struct ring_buffer_per_cpu *cpu_buffer; 4323 4324 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4325 return 0; 4326 4327 cpu_buffer = buffer->buffers[cpu]; 4328 return cpu_buffer->read; 4329 } 4330 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu); 4331 4332 /** 4333 * ring_buffer_entries - get the number of entries in a buffer 4334 * @buffer: The ring buffer 4335 * 4336 * Returns the total number of entries in the ring buffer 4337 * (all CPU entries) 4338 */ 4339 unsigned long ring_buffer_entries(struct trace_buffer *buffer) 4340 { 4341 struct ring_buffer_per_cpu *cpu_buffer; 4342 unsigned long entries = 0; 4343 int cpu; 4344 4345 /* if you care about this being correct, lock the buffer */ 4346 for_each_buffer_cpu(buffer, cpu) { 4347 cpu_buffer = buffer->buffers[cpu]; 4348 entries += rb_num_of_entries(cpu_buffer); 4349 } 4350 4351 return entries; 4352 } 4353 EXPORT_SYMBOL_GPL(ring_buffer_entries); 4354 4355 /** 4356 * ring_buffer_overruns - get the number of overruns in buffer 4357 * @buffer: The ring buffer 4358 * 4359 * Returns the total number of overruns in the ring buffer 4360 * (all CPU entries) 4361 */ 4362 unsigned long ring_buffer_overruns(struct trace_buffer *buffer) 4363 { 4364 struct ring_buffer_per_cpu *cpu_buffer; 4365 unsigned long overruns = 0; 4366 int cpu; 4367 4368 /* if you care about this being correct, lock the buffer */ 4369 for_each_buffer_cpu(buffer, cpu) { 4370 cpu_buffer = buffer->buffers[cpu]; 4371 overruns += local_read(&cpu_buffer->overrun); 4372 } 4373 4374 return overruns; 4375 } 4376 EXPORT_SYMBOL_GPL(ring_buffer_overruns); 4377 4378 static void rb_iter_reset(struct ring_buffer_iter *iter) 4379 { 4380 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 4381 4382 /* Iterator usage is expected to have record disabled */ 4383 iter->head_page = cpu_buffer->reader_page; 4384 iter->head = cpu_buffer->reader_page->read; 4385 iter->next_event = iter->head; 4386 4387 iter->cache_reader_page = iter->head_page; 4388 iter->cache_read = cpu_buffer->read; 4389 iter->cache_pages_removed = cpu_buffer->pages_removed; 4390 4391 if (iter->head) { 4392 iter->read_stamp = cpu_buffer->read_stamp; 4393 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp; 4394 } else { 4395 iter->read_stamp = iter->head_page->page->time_stamp; 4396 iter->page_stamp = iter->read_stamp; 4397 } 4398 } 4399 4400 /** 4401 * ring_buffer_iter_reset - reset an iterator 4402 * @iter: The iterator to reset 4403 * 4404 * Resets the iterator, so that it will start from the beginning 4405 * again. 4406 */ 4407 void ring_buffer_iter_reset(struct ring_buffer_iter *iter) 4408 { 4409 struct ring_buffer_per_cpu *cpu_buffer; 4410 unsigned long flags; 4411 4412 if (!iter) 4413 return; 4414 4415 cpu_buffer = iter->cpu_buffer; 4416 4417 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4418 rb_iter_reset(iter); 4419 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4420 } 4421 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); 4422 4423 /** 4424 * ring_buffer_iter_empty - check if an iterator has no more to read 4425 * @iter: The iterator to check 4426 */ 4427 int ring_buffer_iter_empty(struct ring_buffer_iter *iter) 4428 { 4429 struct ring_buffer_per_cpu *cpu_buffer; 4430 struct buffer_page *reader; 4431 struct buffer_page *head_page; 4432 struct buffer_page *commit_page; 4433 struct buffer_page *curr_commit_page; 4434 unsigned commit; 4435 u64 curr_commit_ts; 4436 u64 commit_ts; 4437 4438 cpu_buffer = iter->cpu_buffer; 4439 reader = cpu_buffer->reader_page; 4440 head_page = cpu_buffer->head_page; 4441 commit_page = cpu_buffer->commit_page; 4442 commit_ts = commit_page->page->time_stamp; 4443 4444 /* 4445 * When the writer goes across pages, it issues a cmpxchg which 4446 * is a mb(), which will synchronize with the rmb here. 4447 * (see rb_tail_page_update()) 4448 */ 4449 smp_rmb(); 4450 commit = rb_page_commit(commit_page); 4451 /* We want to make sure that the commit page doesn't change */ 4452 smp_rmb(); 4453 4454 /* Make sure commit page didn't change */ 4455 curr_commit_page = READ_ONCE(cpu_buffer->commit_page); 4456 curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp); 4457 4458 /* If the commit page changed, then there's more data */ 4459 if (curr_commit_page != commit_page || 4460 curr_commit_ts != commit_ts) 4461 return 0; 4462 4463 /* Still racy, as it may return a false positive, but that's OK */ 4464 return ((iter->head_page == commit_page && iter->head >= commit) || 4465 (iter->head_page == reader && commit_page == head_page && 4466 head_page->read == commit && 4467 iter->head == rb_page_commit(cpu_buffer->reader_page))); 4468 } 4469 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); 4470 4471 static void 4472 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, 4473 struct ring_buffer_event *event) 4474 { 4475 u64 delta; 4476 4477 switch (event->type_len) { 4478 case RINGBUF_TYPE_PADDING: 4479 return; 4480 4481 case RINGBUF_TYPE_TIME_EXTEND: 4482 delta = rb_event_time_stamp(event); 4483 cpu_buffer->read_stamp += delta; 4484 return; 4485 4486 case RINGBUF_TYPE_TIME_STAMP: 4487 delta = rb_event_time_stamp(event); 4488 delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp); 4489 cpu_buffer->read_stamp = delta; 4490 return; 4491 4492 case RINGBUF_TYPE_DATA: 4493 cpu_buffer->read_stamp += event->time_delta; 4494 return; 4495 4496 default: 4497 RB_WARN_ON(cpu_buffer, 1); 4498 } 4499 } 4500 4501 static void 4502 rb_update_iter_read_stamp(struct ring_buffer_iter *iter, 4503 struct ring_buffer_event *event) 4504 { 4505 u64 delta; 4506 4507 switch (event->type_len) { 4508 case RINGBUF_TYPE_PADDING: 4509 return; 4510 4511 case RINGBUF_TYPE_TIME_EXTEND: 4512 delta = rb_event_time_stamp(event); 4513 iter->read_stamp += delta; 4514 return; 4515 4516 case RINGBUF_TYPE_TIME_STAMP: 4517 delta = rb_event_time_stamp(event); 4518 delta = rb_fix_abs_ts(delta, iter->read_stamp); 4519 iter->read_stamp = delta; 4520 return; 4521 4522 case RINGBUF_TYPE_DATA: 4523 iter->read_stamp += event->time_delta; 4524 return; 4525 4526 default: 4527 RB_WARN_ON(iter->cpu_buffer, 1); 4528 } 4529 } 4530 4531 static struct buffer_page * 4532 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 4533 { 4534 struct buffer_page *reader = NULL; 4535 unsigned long overwrite; 4536 unsigned long flags; 4537 int nr_loops = 0; 4538 bool ret; 4539 4540 local_irq_save(flags); 4541 arch_spin_lock(&cpu_buffer->lock); 4542 4543 again: 4544 /* 4545 * This should normally only loop twice. But because the 4546 * start of the reader inserts an empty page, it causes 4547 * a case where we will loop three times. There should be no 4548 * reason to loop four times (that I know of). 4549 */ 4550 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { 4551 reader = NULL; 4552 goto out; 4553 } 4554 4555 reader = cpu_buffer->reader_page; 4556 4557 /* If there's more to read, return this page */ 4558 if (cpu_buffer->reader_page->read < rb_page_size(reader)) 4559 goto out; 4560 4561 /* Never should we have an index greater than the size */ 4562 if (RB_WARN_ON(cpu_buffer, 4563 cpu_buffer->reader_page->read > rb_page_size(reader))) 4564 goto out; 4565 4566 /* check if we caught up to the tail */ 4567 reader = NULL; 4568 if (cpu_buffer->commit_page == cpu_buffer->reader_page) 4569 goto out; 4570 4571 /* Don't bother swapping if the ring buffer is empty */ 4572 if (rb_num_of_entries(cpu_buffer) == 0) 4573 goto out; 4574 4575 /* 4576 * Reset the reader page to size zero. 4577 */ 4578 local_set(&cpu_buffer->reader_page->write, 0); 4579 local_set(&cpu_buffer->reader_page->entries, 0); 4580 local_set(&cpu_buffer->reader_page->page->commit, 0); 4581 cpu_buffer->reader_page->real_end = 0; 4582 4583 spin: 4584 /* 4585 * Splice the empty reader page into the list around the head. 4586 */ 4587 reader = rb_set_head_page(cpu_buffer); 4588 if (!reader) 4589 goto out; 4590 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); 4591 cpu_buffer->reader_page->list.prev = reader->list.prev; 4592 4593 /* 4594 * cpu_buffer->pages just needs to point to the buffer, it 4595 * has no specific buffer page to point to. Lets move it out 4596 * of our way so we don't accidentally swap it. 4597 */ 4598 cpu_buffer->pages = reader->list.prev; 4599 4600 /* The reader page will be pointing to the new head */ 4601 rb_set_list_to_head(&cpu_buffer->reader_page->list); 4602 4603 /* 4604 * We want to make sure we read the overruns after we set up our 4605 * pointers to the next object. The writer side does a 4606 * cmpxchg to cross pages which acts as the mb on the writer 4607 * side. Note, the reader will constantly fail the swap 4608 * while the writer is updating the pointers, so this 4609 * guarantees that the overwrite recorded here is the one we 4610 * want to compare with the last_overrun. 4611 */ 4612 smp_mb(); 4613 overwrite = local_read(&(cpu_buffer->overrun)); 4614 4615 /* 4616 * Here's the tricky part. 4617 * 4618 * We need to move the pointer past the header page. 4619 * But we can only do that if a writer is not currently 4620 * moving it. The page before the header page has the 4621 * flag bit '1' set if it is pointing to the page we want. 4622 * but if the writer is in the process of moving it 4623 * than it will be '2' or already moved '0'. 4624 */ 4625 4626 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); 4627 4628 /* 4629 * If we did not convert it, then we must try again. 4630 */ 4631 if (!ret) 4632 goto spin; 4633 4634 /* 4635 * Yay! We succeeded in replacing the page. 4636 * 4637 * Now make the new head point back to the reader page. 4638 */ 4639 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; 4640 rb_inc_page(&cpu_buffer->head_page); 4641 4642 local_inc(&cpu_buffer->pages_read); 4643 4644 /* Finally update the reader page to the new head */ 4645 cpu_buffer->reader_page = reader; 4646 cpu_buffer->reader_page->read = 0; 4647 4648 if (overwrite != cpu_buffer->last_overrun) { 4649 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; 4650 cpu_buffer->last_overrun = overwrite; 4651 } 4652 4653 goto again; 4654 4655 out: 4656 /* Update the read_stamp on the first event */ 4657 if (reader && reader->read == 0) 4658 cpu_buffer->read_stamp = reader->page->time_stamp; 4659 4660 arch_spin_unlock(&cpu_buffer->lock); 4661 local_irq_restore(flags); 4662 4663 /* 4664 * The writer has preempt disable, wait for it. But not forever 4665 * Although, 1 second is pretty much "forever" 4666 */ 4667 #define USECS_WAIT 1000000 4668 for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) { 4669 /* If the write is past the end of page, a writer is still updating it */ 4670 if (likely(!reader || rb_page_write(reader) <= BUF_PAGE_SIZE)) 4671 break; 4672 4673 udelay(1); 4674 4675 /* Get the latest version of the reader write value */ 4676 smp_rmb(); 4677 } 4678 4679 /* The writer is not moving forward? Something is wrong */ 4680 if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT)) 4681 reader = NULL; 4682 4683 /* 4684 * Make sure we see any padding after the write update 4685 * (see rb_reset_tail()). 4686 * 4687 * In addition, a writer may be writing on the reader page 4688 * if the page has not been fully filled, so the read barrier 4689 * is also needed to make sure we see the content of what is 4690 * committed by the writer (see rb_set_commit_to_write()). 4691 */ 4692 smp_rmb(); 4693 4694 4695 return reader; 4696 } 4697 4698 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) 4699 { 4700 struct ring_buffer_event *event; 4701 struct buffer_page *reader; 4702 unsigned length; 4703 4704 reader = rb_get_reader_page(cpu_buffer); 4705 4706 /* This function should not be called when buffer is empty */ 4707 if (RB_WARN_ON(cpu_buffer, !reader)) 4708 return; 4709 4710 event = rb_reader_event(cpu_buffer); 4711 4712 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 4713 cpu_buffer->read++; 4714 4715 rb_update_read_stamp(cpu_buffer, event); 4716 4717 length = rb_event_length(event); 4718 cpu_buffer->reader_page->read += length; 4719 } 4720 4721 static void rb_advance_iter(struct ring_buffer_iter *iter) 4722 { 4723 struct ring_buffer_per_cpu *cpu_buffer; 4724 4725 cpu_buffer = iter->cpu_buffer; 4726 4727 /* If head == next_event then we need to jump to the next event */ 4728 if (iter->head == iter->next_event) { 4729 /* If the event gets overwritten again, there's nothing to do */ 4730 if (rb_iter_head_event(iter) == NULL) 4731 return; 4732 } 4733 4734 iter->head = iter->next_event; 4735 4736 /* 4737 * Check if we are at the end of the buffer. 4738 */ 4739 if (iter->next_event >= rb_page_size(iter->head_page)) { 4740 /* discarded commits can make the page empty */ 4741 if (iter->head_page == cpu_buffer->commit_page) 4742 return; 4743 rb_inc_iter(iter); 4744 return; 4745 } 4746 4747 rb_update_iter_read_stamp(iter, iter->event); 4748 } 4749 4750 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) 4751 { 4752 return cpu_buffer->lost_events; 4753 } 4754 4755 static struct ring_buffer_event * 4756 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, 4757 unsigned long *lost_events) 4758 { 4759 struct ring_buffer_event *event; 4760 struct buffer_page *reader; 4761 int nr_loops = 0; 4762 4763 if (ts) 4764 *ts = 0; 4765 again: 4766 /* 4767 * We repeat when a time extend is encountered. 4768 * Since the time extend is always attached to a data event, 4769 * we should never loop more than once. 4770 * (We never hit the following condition more than twice). 4771 */ 4772 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) 4773 return NULL; 4774 4775 reader = rb_get_reader_page(cpu_buffer); 4776 if (!reader) 4777 return NULL; 4778 4779 event = rb_reader_event(cpu_buffer); 4780 4781 switch (event->type_len) { 4782 case RINGBUF_TYPE_PADDING: 4783 if (rb_null_event(event)) 4784 RB_WARN_ON(cpu_buffer, 1); 4785 /* 4786 * Because the writer could be discarding every 4787 * event it creates (which would probably be bad) 4788 * if we were to go back to "again" then we may never 4789 * catch up, and will trigger the warn on, or lock 4790 * the box. Return the padding, and we will release 4791 * the current locks, and try again. 4792 */ 4793 return event; 4794 4795 case RINGBUF_TYPE_TIME_EXTEND: 4796 /* Internal data, OK to advance */ 4797 rb_advance_reader(cpu_buffer); 4798 goto again; 4799 4800 case RINGBUF_TYPE_TIME_STAMP: 4801 if (ts) { 4802 *ts = rb_event_time_stamp(event); 4803 *ts = rb_fix_abs_ts(*ts, reader->page->time_stamp); 4804 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 4805 cpu_buffer->cpu, ts); 4806 } 4807 /* Internal data, OK to advance */ 4808 rb_advance_reader(cpu_buffer); 4809 goto again; 4810 4811 case RINGBUF_TYPE_DATA: 4812 if (ts && !(*ts)) { 4813 *ts = cpu_buffer->read_stamp + event->time_delta; 4814 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 4815 cpu_buffer->cpu, ts); 4816 } 4817 if (lost_events) 4818 *lost_events = rb_lost_events(cpu_buffer); 4819 return event; 4820 4821 default: 4822 RB_WARN_ON(cpu_buffer, 1); 4823 } 4824 4825 return NULL; 4826 } 4827 EXPORT_SYMBOL_GPL(ring_buffer_peek); 4828 4829 static struct ring_buffer_event * 4830 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 4831 { 4832 struct trace_buffer *buffer; 4833 struct ring_buffer_per_cpu *cpu_buffer; 4834 struct ring_buffer_event *event; 4835 int nr_loops = 0; 4836 4837 if (ts) 4838 *ts = 0; 4839 4840 cpu_buffer = iter->cpu_buffer; 4841 buffer = cpu_buffer->buffer; 4842 4843 /* 4844 * Check if someone performed a consuming read to the buffer 4845 * or removed some pages from the buffer. In these cases, 4846 * iterator was invalidated and we need to reset it. 4847 */ 4848 if (unlikely(iter->cache_read != cpu_buffer->read || 4849 iter->cache_reader_page != cpu_buffer->reader_page || 4850 iter->cache_pages_removed != cpu_buffer->pages_removed)) 4851 rb_iter_reset(iter); 4852 4853 again: 4854 if (ring_buffer_iter_empty(iter)) 4855 return NULL; 4856 4857 /* 4858 * As the writer can mess with what the iterator is trying 4859 * to read, just give up if we fail to get an event after 4860 * three tries. The iterator is not as reliable when reading 4861 * the ring buffer with an active write as the consumer is. 4862 * Do not warn if the three failures is reached. 4863 */ 4864 if (++nr_loops > 3) 4865 return NULL; 4866 4867 if (rb_per_cpu_empty(cpu_buffer)) 4868 return NULL; 4869 4870 if (iter->head >= rb_page_size(iter->head_page)) { 4871 rb_inc_iter(iter); 4872 goto again; 4873 } 4874 4875 event = rb_iter_head_event(iter); 4876 if (!event) 4877 goto again; 4878 4879 switch (event->type_len) { 4880 case RINGBUF_TYPE_PADDING: 4881 if (rb_null_event(event)) { 4882 rb_inc_iter(iter); 4883 goto again; 4884 } 4885 rb_advance_iter(iter); 4886 return event; 4887 4888 case RINGBUF_TYPE_TIME_EXTEND: 4889 /* Internal data, OK to advance */ 4890 rb_advance_iter(iter); 4891 goto again; 4892 4893 case RINGBUF_TYPE_TIME_STAMP: 4894 if (ts) { 4895 *ts = rb_event_time_stamp(event); 4896 *ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp); 4897 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 4898 cpu_buffer->cpu, ts); 4899 } 4900 /* Internal data, OK to advance */ 4901 rb_advance_iter(iter); 4902 goto again; 4903 4904 case RINGBUF_TYPE_DATA: 4905 if (ts && !(*ts)) { 4906 *ts = iter->read_stamp + event->time_delta; 4907 ring_buffer_normalize_time_stamp(buffer, 4908 cpu_buffer->cpu, ts); 4909 } 4910 return event; 4911 4912 default: 4913 RB_WARN_ON(cpu_buffer, 1); 4914 } 4915 4916 return NULL; 4917 } 4918 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); 4919 4920 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) 4921 { 4922 if (likely(!in_nmi())) { 4923 raw_spin_lock(&cpu_buffer->reader_lock); 4924 return true; 4925 } 4926 4927 /* 4928 * If an NMI die dumps out the content of the ring buffer 4929 * trylock must be used to prevent a deadlock if the NMI 4930 * preempted a task that holds the ring buffer locks. If 4931 * we get the lock then all is fine, if not, then continue 4932 * to do the read, but this can corrupt the ring buffer, 4933 * so it must be permanently disabled from future writes. 4934 * Reading from NMI is a oneshot deal. 4935 */ 4936 if (raw_spin_trylock(&cpu_buffer->reader_lock)) 4937 return true; 4938 4939 /* Continue without locking, but disable the ring buffer */ 4940 atomic_inc(&cpu_buffer->record_disabled); 4941 return false; 4942 } 4943 4944 static inline void 4945 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) 4946 { 4947 if (likely(locked)) 4948 raw_spin_unlock(&cpu_buffer->reader_lock); 4949 } 4950 4951 /** 4952 * ring_buffer_peek - peek at the next event to be read 4953 * @buffer: The ring buffer to read 4954 * @cpu: The cpu to peak at 4955 * @ts: The timestamp counter of this event. 4956 * @lost_events: a variable to store if events were lost (may be NULL) 4957 * 4958 * This will return the event that will be read next, but does 4959 * not consume the data. 4960 */ 4961 struct ring_buffer_event * 4962 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, 4963 unsigned long *lost_events) 4964 { 4965 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 4966 struct ring_buffer_event *event; 4967 unsigned long flags; 4968 bool dolock; 4969 4970 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4971 return NULL; 4972 4973 again: 4974 local_irq_save(flags); 4975 dolock = rb_reader_lock(cpu_buffer); 4976 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 4977 if (event && event->type_len == RINGBUF_TYPE_PADDING) 4978 rb_advance_reader(cpu_buffer); 4979 rb_reader_unlock(cpu_buffer, dolock); 4980 local_irq_restore(flags); 4981 4982 if (event && event->type_len == RINGBUF_TYPE_PADDING) 4983 goto again; 4984 4985 return event; 4986 } 4987 4988 /** ring_buffer_iter_dropped - report if there are dropped events 4989 * @iter: The ring buffer iterator 4990 * 4991 * Returns true if there was dropped events since the last peek. 4992 */ 4993 bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter) 4994 { 4995 bool ret = iter->missed_events != 0; 4996 4997 iter->missed_events = 0; 4998 return ret; 4999 } 5000 EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped); 5001 5002 /** 5003 * ring_buffer_iter_peek - peek at the next event to be read 5004 * @iter: The ring buffer iterator 5005 * @ts: The timestamp counter of this event. 5006 * 5007 * This will return the event that will be read next, but does 5008 * not increment the iterator. 5009 */ 5010 struct ring_buffer_event * 5011 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 5012 { 5013 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5014 struct ring_buffer_event *event; 5015 unsigned long flags; 5016 5017 again: 5018 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5019 event = rb_iter_peek(iter, ts); 5020 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5021 5022 if (event && event->type_len == RINGBUF_TYPE_PADDING) 5023 goto again; 5024 5025 return event; 5026 } 5027 5028 /** 5029 * ring_buffer_consume - return an event and consume it 5030 * @buffer: The ring buffer to get the next event from 5031 * @cpu: the cpu to read the buffer from 5032 * @ts: a variable to store the timestamp (may be NULL) 5033 * @lost_events: a variable to store if events were lost (may be NULL) 5034 * 5035 * Returns the next event in the ring buffer, and that event is consumed. 5036 * Meaning, that sequential reads will keep returning a different event, 5037 * and eventually empty the ring buffer if the producer is slower. 5038 */ 5039 struct ring_buffer_event * 5040 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, 5041 unsigned long *lost_events) 5042 { 5043 struct ring_buffer_per_cpu *cpu_buffer; 5044 struct ring_buffer_event *event = NULL; 5045 unsigned long flags; 5046 bool dolock; 5047 5048 again: 5049 /* might be called in atomic */ 5050 preempt_disable(); 5051 5052 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5053 goto out; 5054 5055 cpu_buffer = buffer->buffers[cpu]; 5056 local_irq_save(flags); 5057 dolock = rb_reader_lock(cpu_buffer); 5058 5059 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 5060 if (event) { 5061 cpu_buffer->lost_events = 0; 5062 rb_advance_reader(cpu_buffer); 5063 } 5064 5065 rb_reader_unlock(cpu_buffer, dolock); 5066 local_irq_restore(flags); 5067 5068 out: 5069 preempt_enable(); 5070 5071 if (event && event->type_len == RINGBUF_TYPE_PADDING) 5072 goto again; 5073 5074 return event; 5075 } 5076 EXPORT_SYMBOL_GPL(ring_buffer_consume); 5077 5078 /** 5079 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer 5080 * @buffer: The ring buffer to read from 5081 * @cpu: The cpu buffer to iterate over 5082 * @flags: gfp flags to use for memory allocation 5083 * 5084 * This performs the initial preparations necessary to iterate 5085 * through the buffer. Memory is allocated, buffer recording 5086 * is disabled, and the iterator pointer is returned to the caller. 5087 * 5088 * Disabling buffer recording prevents the reading from being 5089 * corrupted. This is not a consuming read, so a producer is not 5090 * expected. 5091 * 5092 * After a sequence of ring_buffer_read_prepare calls, the user is 5093 * expected to make at least one call to ring_buffer_read_prepare_sync. 5094 * Afterwards, ring_buffer_read_start is invoked to get things going 5095 * for real. 5096 * 5097 * This overall must be paired with ring_buffer_read_finish. 5098 */ 5099 struct ring_buffer_iter * 5100 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags) 5101 { 5102 struct ring_buffer_per_cpu *cpu_buffer; 5103 struct ring_buffer_iter *iter; 5104 5105 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5106 return NULL; 5107 5108 iter = kzalloc(sizeof(*iter), flags); 5109 if (!iter) 5110 return NULL; 5111 5112 iter->event = kmalloc(BUF_MAX_DATA_SIZE, flags); 5113 if (!iter->event) { 5114 kfree(iter); 5115 return NULL; 5116 } 5117 5118 cpu_buffer = buffer->buffers[cpu]; 5119 5120 iter->cpu_buffer = cpu_buffer; 5121 5122 atomic_inc(&cpu_buffer->resize_disabled); 5123 5124 return iter; 5125 } 5126 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare); 5127 5128 /** 5129 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls 5130 * 5131 * All previously invoked ring_buffer_read_prepare calls to prepare 5132 * iterators will be synchronized. Afterwards, read_buffer_read_start 5133 * calls on those iterators are allowed. 5134 */ 5135 void 5136 ring_buffer_read_prepare_sync(void) 5137 { 5138 synchronize_rcu(); 5139 } 5140 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); 5141 5142 /** 5143 * ring_buffer_read_start - start a non consuming read of the buffer 5144 * @iter: The iterator returned by ring_buffer_read_prepare 5145 * 5146 * This finalizes the startup of an iteration through the buffer. 5147 * The iterator comes from a call to ring_buffer_read_prepare and 5148 * an intervening ring_buffer_read_prepare_sync must have been 5149 * performed. 5150 * 5151 * Must be paired with ring_buffer_read_finish. 5152 */ 5153 void 5154 ring_buffer_read_start(struct ring_buffer_iter *iter) 5155 { 5156 struct ring_buffer_per_cpu *cpu_buffer; 5157 unsigned long flags; 5158 5159 if (!iter) 5160 return; 5161 5162 cpu_buffer = iter->cpu_buffer; 5163 5164 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5165 arch_spin_lock(&cpu_buffer->lock); 5166 rb_iter_reset(iter); 5167 arch_spin_unlock(&cpu_buffer->lock); 5168 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5169 } 5170 EXPORT_SYMBOL_GPL(ring_buffer_read_start); 5171 5172 /** 5173 * ring_buffer_read_finish - finish reading the iterator of the buffer 5174 * @iter: The iterator retrieved by ring_buffer_start 5175 * 5176 * This re-enables the recording to the buffer, and frees the 5177 * iterator. 5178 */ 5179 void 5180 ring_buffer_read_finish(struct ring_buffer_iter *iter) 5181 { 5182 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5183 unsigned long flags; 5184 5185 /* 5186 * Ring buffer is disabled from recording, here's a good place 5187 * to check the integrity of the ring buffer. 5188 * Must prevent readers from trying to read, as the check 5189 * clears the HEAD page and readers require it. 5190 */ 5191 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5192 rb_check_pages(cpu_buffer); 5193 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5194 5195 atomic_dec(&cpu_buffer->resize_disabled); 5196 kfree(iter->event); 5197 kfree(iter); 5198 } 5199 EXPORT_SYMBOL_GPL(ring_buffer_read_finish); 5200 5201 /** 5202 * ring_buffer_iter_advance - advance the iterator to the next location 5203 * @iter: The ring buffer iterator 5204 * 5205 * Move the location of the iterator such that the next read will 5206 * be the next location of the iterator. 5207 */ 5208 void ring_buffer_iter_advance(struct ring_buffer_iter *iter) 5209 { 5210 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5211 unsigned long flags; 5212 5213 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5214 5215 rb_advance_iter(iter); 5216 5217 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5218 } 5219 EXPORT_SYMBOL_GPL(ring_buffer_iter_advance); 5220 5221 /** 5222 * ring_buffer_size - return the size of the ring buffer (in bytes) 5223 * @buffer: The ring buffer. 5224 * @cpu: The CPU to get ring buffer size from. 5225 */ 5226 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu) 5227 { 5228 /* 5229 * Earlier, this method returned 5230 * BUF_PAGE_SIZE * buffer->nr_pages 5231 * Since the nr_pages field is now removed, we have converted this to 5232 * return the per cpu buffer value. 5233 */ 5234 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5235 return 0; 5236 5237 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; 5238 } 5239 EXPORT_SYMBOL_GPL(ring_buffer_size); 5240 5241 static void rb_clear_buffer_page(struct buffer_page *page) 5242 { 5243 local_set(&page->write, 0); 5244 local_set(&page->entries, 0); 5245 rb_init_page(page->page); 5246 page->read = 0; 5247 } 5248 5249 static void 5250 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 5251 { 5252 struct buffer_page *page; 5253 5254 rb_head_page_deactivate(cpu_buffer); 5255 5256 cpu_buffer->head_page 5257 = list_entry(cpu_buffer->pages, struct buffer_page, list); 5258 rb_clear_buffer_page(cpu_buffer->head_page); 5259 list_for_each_entry(page, cpu_buffer->pages, list) { 5260 rb_clear_buffer_page(page); 5261 } 5262 5263 cpu_buffer->tail_page = cpu_buffer->head_page; 5264 cpu_buffer->commit_page = cpu_buffer->head_page; 5265 5266 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 5267 INIT_LIST_HEAD(&cpu_buffer->new_pages); 5268 rb_clear_buffer_page(cpu_buffer->reader_page); 5269 5270 local_set(&cpu_buffer->entries_bytes, 0); 5271 local_set(&cpu_buffer->overrun, 0); 5272 local_set(&cpu_buffer->commit_overrun, 0); 5273 local_set(&cpu_buffer->dropped_events, 0); 5274 local_set(&cpu_buffer->entries, 0); 5275 local_set(&cpu_buffer->committing, 0); 5276 local_set(&cpu_buffer->commits, 0); 5277 local_set(&cpu_buffer->pages_touched, 0); 5278 local_set(&cpu_buffer->pages_lost, 0); 5279 local_set(&cpu_buffer->pages_read, 0); 5280 cpu_buffer->last_pages_touch = 0; 5281 cpu_buffer->shortest_full = 0; 5282 cpu_buffer->read = 0; 5283 cpu_buffer->read_bytes = 0; 5284 5285 rb_time_set(&cpu_buffer->write_stamp, 0); 5286 rb_time_set(&cpu_buffer->before_stamp, 0); 5287 5288 memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp)); 5289 5290 cpu_buffer->lost_events = 0; 5291 cpu_buffer->last_overrun = 0; 5292 5293 rb_head_page_activate(cpu_buffer); 5294 cpu_buffer->pages_removed = 0; 5295 } 5296 5297 /* Must have disabled the cpu buffer then done a synchronize_rcu */ 5298 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 5299 { 5300 unsigned long flags; 5301 5302 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5303 5304 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 5305 goto out; 5306 5307 arch_spin_lock(&cpu_buffer->lock); 5308 5309 rb_reset_cpu(cpu_buffer); 5310 5311 arch_spin_unlock(&cpu_buffer->lock); 5312 5313 out: 5314 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5315 } 5316 5317 /** 5318 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer 5319 * @buffer: The ring buffer to reset a per cpu buffer of 5320 * @cpu: The CPU buffer to be reset 5321 */ 5322 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu) 5323 { 5324 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 5325 5326 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5327 return; 5328 5329 /* prevent another thread from changing buffer sizes */ 5330 mutex_lock(&buffer->mutex); 5331 5332 atomic_inc(&cpu_buffer->resize_disabled); 5333 atomic_inc(&cpu_buffer->record_disabled); 5334 5335 /* Make sure all commits have finished */ 5336 synchronize_rcu(); 5337 5338 reset_disabled_cpu_buffer(cpu_buffer); 5339 5340 atomic_dec(&cpu_buffer->record_disabled); 5341 atomic_dec(&cpu_buffer->resize_disabled); 5342 5343 mutex_unlock(&buffer->mutex); 5344 } 5345 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); 5346 5347 /* Flag to ensure proper resetting of atomic variables */ 5348 #define RESET_BIT (1 << 30) 5349 5350 /** 5351 * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer 5352 * @buffer: The ring buffer to reset a per cpu buffer of 5353 */ 5354 void ring_buffer_reset_online_cpus(struct trace_buffer *buffer) 5355 { 5356 struct ring_buffer_per_cpu *cpu_buffer; 5357 int cpu; 5358 5359 /* prevent another thread from changing buffer sizes */ 5360 mutex_lock(&buffer->mutex); 5361 5362 for_each_online_buffer_cpu(buffer, cpu) { 5363 cpu_buffer = buffer->buffers[cpu]; 5364 5365 atomic_add(RESET_BIT, &cpu_buffer->resize_disabled); 5366 atomic_inc(&cpu_buffer->record_disabled); 5367 } 5368 5369 /* Make sure all commits have finished */ 5370 synchronize_rcu(); 5371 5372 for_each_buffer_cpu(buffer, cpu) { 5373 cpu_buffer = buffer->buffers[cpu]; 5374 5375 /* 5376 * If a CPU came online during the synchronize_rcu(), then 5377 * ignore it. 5378 */ 5379 if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT)) 5380 continue; 5381 5382 reset_disabled_cpu_buffer(cpu_buffer); 5383 5384 atomic_dec(&cpu_buffer->record_disabled); 5385 atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled); 5386 } 5387 5388 mutex_unlock(&buffer->mutex); 5389 } 5390 5391 /** 5392 * ring_buffer_reset - reset a ring buffer 5393 * @buffer: The ring buffer to reset all cpu buffers 5394 */ 5395 void ring_buffer_reset(struct trace_buffer *buffer) 5396 { 5397 struct ring_buffer_per_cpu *cpu_buffer; 5398 int cpu; 5399 5400 /* prevent another thread from changing buffer sizes */ 5401 mutex_lock(&buffer->mutex); 5402 5403 for_each_buffer_cpu(buffer, cpu) { 5404 cpu_buffer = buffer->buffers[cpu]; 5405 5406 atomic_inc(&cpu_buffer->resize_disabled); 5407 atomic_inc(&cpu_buffer->record_disabled); 5408 } 5409 5410 /* Make sure all commits have finished */ 5411 synchronize_rcu(); 5412 5413 for_each_buffer_cpu(buffer, cpu) { 5414 cpu_buffer = buffer->buffers[cpu]; 5415 5416 reset_disabled_cpu_buffer(cpu_buffer); 5417 5418 atomic_dec(&cpu_buffer->record_disabled); 5419 atomic_dec(&cpu_buffer->resize_disabled); 5420 } 5421 5422 mutex_unlock(&buffer->mutex); 5423 } 5424 EXPORT_SYMBOL_GPL(ring_buffer_reset); 5425 5426 /** 5427 * ring_buffer_empty - is the ring buffer empty? 5428 * @buffer: The ring buffer to test 5429 */ 5430 bool ring_buffer_empty(struct trace_buffer *buffer) 5431 { 5432 struct ring_buffer_per_cpu *cpu_buffer; 5433 unsigned long flags; 5434 bool dolock; 5435 bool ret; 5436 int cpu; 5437 5438 /* yes this is racy, but if you don't like the race, lock the buffer */ 5439 for_each_buffer_cpu(buffer, cpu) { 5440 cpu_buffer = buffer->buffers[cpu]; 5441 local_irq_save(flags); 5442 dolock = rb_reader_lock(cpu_buffer); 5443 ret = rb_per_cpu_empty(cpu_buffer); 5444 rb_reader_unlock(cpu_buffer, dolock); 5445 local_irq_restore(flags); 5446 5447 if (!ret) 5448 return false; 5449 } 5450 5451 return true; 5452 } 5453 EXPORT_SYMBOL_GPL(ring_buffer_empty); 5454 5455 /** 5456 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? 5457 * @buffer: The ring buffer 5458 * @cpu: The CPU buffer to test 5459 */ 5460 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu) 5461 { 5462 struct ring_buffer_per_cpu *cpu_buffer; 5463 unsigned long flags; 5464 bool dolock; 5465 bool ret; 5466 5467 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5468 return true; 5469 5470 cpu_buffer = buffer->buffers[cpu]; 5471 local_irq_save(flags); 5472 dolock = rb_reader_lock(cpu_buffer); 5473 ret = rb_per_cpu_empty(cpu_buffer); 5474 rb_reader_unlock(cpu_buffer, dolock); 5475 local_irq_restore(flags); 5476 5477 return ret; 5478 } 5479 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); 5480 5481 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 5482 /** 5483 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers 5484 * @buffer_a: One buffer to swap with 5485 * @buffer_b: The other buffer to swap with 5486 * @cpu: the CPU of the buffers to swap 5487 * 5488 * This function is useful for tracers that want to take a "snapshot" 5489 * of a CPU buffer and has another back up buffer lying around. 5490 * it is expected that the tracer handles the cpu buffer not being 5491 * used at the moment. 5492 */ 5493 int ring_buffer_swap_cpu(struct trace_buffer *buffer_a, 5494 struct trace_buffer *buffer_b, int cpu) 5495 { 5496 struct ring_buffer_per_cpu *cpu_buffer_a; 5497 struct ring_buffer_per_cpu *cpu_buffer_b; 5498 int ret = -EINVAL; 5499 5500 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || 5501 !cpumask_test_cpu(cpu, buffer_b->cpumask)) 5502 goto out; 5503 5504 cpu_buffer_a = buffer_a->buffers[cpu]; 5505 cpu_buffer_b = buffer_b->buffers[cpu]; 5506 5507 /* At least make sure the two buffers are somewhat the same */ 5508 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages) 5509 goto out; 5510 5511 ret = -EAGAIN; 5512 5513 if (atomic_read(&buffer_a->record_disabled)) 5514 goto out; 5515 5516 if (atomic_read(&buffer_b->record_disabled)) 5517 goto out; 5518 5519 if (atomic_read(&cpu_buffer_a->record_disabled)) 5520 goto out; 5521 5522 if (atomic_read(&cpu_buffer_b->record_disabled)) 5523 goto out; 5524 5525 /* 5526 * We can't do a synchronize_rcu here because this 5527 * function can be called in atomic context. 5528 * Normally this will be called from the same CPU as cpu. 5529 * If not it's up to the caller to protect this. 5530 */ 5531 atomic_inc(&cpu_buffer_a->record_disabled); 5532 atomic_inc(&cpu_buffer_b->record_disabled); 5533 5534 ret = -EBUSY; 5535 if (local_read(&cpu_buffer_a->committing)) 5536 goto out_dec; 5537 if (local_read(&cpu_buffer_b->committing)) 5538 goto out_dec; 5539 5540 /* 5541 * When resize is in progress, we cannot swap it because 5542 * it will mess the state of the cpu buffer. 5543 */ 5544 if (atomic_read(&buffer_a->resizing)) 5545 goto out_dec; 5546 if (atomic_read(&buffer_b->resizing)) 5547 goto out_dec; 5548 5549 buffer_a->buffers[cpu] = cpu_buffer_b; 5550 buffer_b->buffers[cpu] = cpu_buffer_a; 5551 5552 cpu_buffer_b->buffer = buffer_a; 5553 cpu_buffer_a->buffer = buffer_b; 5554 5555 ret = 0; 5556 5557 out_dec: 5558 atomic_dec(&cpu_buffer_a->record_disabled); 5559 atomic_dec(&cpu_buffer_b->record_disabled); 5560 out: 5561 return ret; 5562 } 5563 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); 5564 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */ 5565 5566 /** 5567 * ring_buffer_alloc_read_page - allocate a page to read from buffer 5568 * @buffer: the buffer to allocate for. 5569 * @cpu: the cpu buffer to allocate. 5570 * 5571 * This function is used in conjunction with ring_buffer_read_page. 5572 * When reading a full page from the ring buffer, these functions 5573 * can be used to speed up the process. The calling function should 5574 * allocate a few pages first with this function. Then when it 5575 * needs to get pages from the ring buffer, it passes the result 5576 * of this function into ring_buffer_read_page, which will swap 5577 * the page that was allocated, with the read page of the buffer. 5578 * 5579 * Returns: 5580 * The page allocated, or ERR_PTR 5581 */ 5582 void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu) 5583 { 5584 struct ring_buffer_per_cpu *cpu_buffer; 5585 struct buffer_data_page *bpage = NULL; 5586 unsigned long flags; 5587 struct page *page; 5588 5589 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5590 return ERR_PTR(-ENODEV); 5591 5592 cpu_buffer = buffer->buffers[cpu]; 5593 local_irq_save(flags); 5594 arch_spin_lock(&cpu_buffer->lock); 5595 5596 if (cpu_buffer->free_page) { 5597 bpage = cpu_buffer->free_page; 5598 cpu_buffer->free_page = NULL; 5599 } 5600 5601 arch_spin_unlock(&cpu_buffer->lock); 5602 local_irq_restore(flags); 5603 5604 if (bpage) 5605 goto out; 5606 5607 page = alloc_pages_node(cpu_to_node(cpu), 5608 GFP_KERNEL | __GFP_NORETRY, 0); 5609 if (!page) 5610 return ERR_PTR(-ENOMEM); 5611 5612 bpage = page_address(page); 5613 5614 out: 5615 rb_init_page(bpage); 5616 5617 return bpage; 5618 } 5619 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page); 5620 5621 /** 5622 * ring_buffer_free_read_page - free an allocated read page 5623 * @buffer: the buffer the page was allocate for 5624 * @cpu: the cpu buffer the page came from 5625 * @data: the page to free 5626 * 5627 * Free a page allocated from ring_buffer_alloc_read_page. 5628 */ 5629 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data) 5630 { 5631 struct ring_buffer_per_cpu *cpu_buffer; 5632 struct buffer_data_page *bpage = data; 5633 struct page *page = virt_to_page(bpage); 5634 unsigned long flags; 5635 5636 if (!buffer || !buffer->buffers || !buffer->buffers[cpu]) 5637 return; 5638 5639 cpu_buffer = buffer->buffers[cpu]; 5640 5641 /* If the page is still in use someplace else, we can't reuse it */ 5642 if (page_ref_count(page) > 1) 5643 goto out; 5644 5645 local_irq_save(flags); 5646 arch_spin_lock(&cpu_buffer->lock); 5647 5648 if (!cpu_buffer->free_page) { 5649 cpu_buffer->free_page = bpage; 5650 bpage = NULL; 5651 } 5652 5653 arch_spin_unlock(&cpu_buffer->lock); 5654 local_irq_restore(flags); 5655 5656 out: 5657 free_page((unsigned long)bpage); 5658 } 5659 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); 5660 5661 /** 5662 * ring_buffer_read_page - extract a page from the ring buffer 5663 * @buffer: buffer to extract from 5664 * @data_page: the page to use allocated from ring_buffer_alloc_read_page 5665 * @len: amount to extract 5666 * @cpu: the cpu of the buffer to extract 5667 * @full: should the extraction only happen when the page is full. 5668 * 5669 * This function will pull out a page from the ring buffer and consume it. 5670 * @data_page must be the address of the variable that was returned 5671 * from ring_buffer_alloc_read_page. This is because the page might be used 5672 * to swap with a page in the ring buffer. 5673 * 5674 * for example: 5675 * rpage = ring_buffer_alloc_read_page(buffer, cpu); 5676 * if (IS_ERR(rpage)) 5677 * return PTR_ERR(rpage); 5678 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); 5679 * if (ret >= 0) 5680 * process_page(rpage, ret); 5681 * 5682 * When @full is set, the function will not return true unless 5683 * the writer is off the reader page. 5684 * 5685 * Note: it is up to the calling functions to handle sleeps and wakeups. 5686 * The ring buffer can be used anywhere in the kernel and can not 5687 * blindly call wake_up. The layer that uses the ring buffer must be 5688 * responsible for that. 5689 * 5690 * Returns: 5691 * >=0 if data has been transferred, returns the offset of consumed data. 5692 * <0 if no data has been transferred. 5693 */ 5694 int ring_buffer_read_page(struct trace_buffer *buffer, 5695 void **data_page, size_t len, int cpu, int full) 5696 { 5697 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 5698 struct ring_buffer_event *event; 5699 struct buffer_data_page *bpage; 5700 struct buffer_page *reader; 5701 unsigned long missed_events; 5702 unsigned long flags; 5703 unsigned int commit; 5704 unsigned int read; 5705 u64 save_timestamp; 5706 int ret = -1; 5707 5708 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5709 goto out; 5710 5711 /* 5712 * If len is not big enough to hold the page header, then 5713 * we can not copy anything. 5714 */ 5715 if (len <= BUF_PAGE_HDR_SIZE) 5716 goto out; 5717 5718 len -= BUF_PAGE_HDR_SIZE; 5719 5720 if (!data_page) 5721 goto out; 5722 5723 bpage = *data_page; 5724 if (!bpage) 5725 goto out; 5726 5727 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5728 5729 reader = rb_get_reader_page(cpu_buffer); 5730 if (!reader) 5731 goto out_unlock; 5732 5733 event = rb_reader_event(cpu_buffer); 5734 5735 read = reader->read; 5736 commit = rb_page_commit(reader); 5737 5738 /* Check if any events were dropped */ 5739 missed_events = cpu_buffer->lost_events; 5740 5741 /* 5742 * If this page has been partially read or 5743 * if len is not big enough to read the rest of the page or 5744 * a writer is still on the page, then 5745 * we must copy the data from the page to the buffer. 5746 * Otherwise, we can simply swap the page with the one passed in. 5747 */ 5748 if (read || (len < (commit - read)) || 5749 cpu_buffer->reader_page == cpu_buffer->commit_page) { 5750 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; 5751 unsigned int rpos = read; 5752 unsigned int pos = 0; 5753 unsigned int size; 5754 5755 /* 5756 * If a full page is expected, this can still be returned 5757 * if there's been a previous partial read and the 5758 * rest of the page can be read and the commit page is off 5759 * the reader page. 5760 */ 5761 if (full && 5762 (!read || (len < (commit - read)) || 5763 cpu_buffer->reader_page == cpu_buffer->commit_page)) 5764 goto out_unlock; 5765 5766 if (len > (commit - read)) 5767 len = (commit - read); 5768 5769 /* Always keep the time extend and data together */ 5770 size = rb_event_ts_length(event); 5771 5772 if (len < size) 5773 goto out_unlock; 5774 5775 /* save the current timestamp, since the user will need it */ 5776 save_timestamp = cpu_buffer->read_stamp; 5777 5778 /* Need to copy one event at a time */ 5779 do { 5780 /* We need the size of one event, because 5781 * rb_advance_reader only advances by one event, 5782 * whereas rb_event_ts_length may include the size of 5783 * one or two events. 5784 * We have already ensured there's enough space if this 5785 * is a time extend. */ 5786 size = rb_event_length(event); 5787 memcpy(bpage->data + pos, rpage->data + rpos, size); 5788 5789 len -= size; 5790 5791 rb_advance_reader(cpu_buffer); 5792 rpos = reader->read; 5793 pos += size; 5794 5795 if (rpos >= commit) 5796 break; 5797 5798 event = rb_reader_event(cpu_buffer); 5799 /* Always keep the time extend and data together */ 5800 size = rb_event_ts_length(event); 5801 } while (len >= size); 5802 5803 /* update bpage */ 5804 local_set(&bpage->commit, pos); 5805 bpage->time_stamp = save_timestamp; 5806 5807 /* we copied everything to the beginning */ 5808 read = 0; 5809 } else { 5810 /* update the entry counter */ 5811 cpu_buffer->read += rb_page_entries(reader); 5812 cpu_buffer->read_bytes += BUF_PAGE_SIZE; 5813 5814 /* swap the pages */ 5815 rb_init_page(bpage); 5816 bpage = reader->page; 5817 reader->page = *data_page; 5818 local_set(&reader->write, 0); 5819 local_set(&reader->entries, 0); 5820 reader->read = 0; 5821 *data_page = bpage; 5822 5823 /* 5824 * Use the real_end for the data size, 5825 * This gives us a chance to store the lost events 5826 * on the page. 5827 */ 5828 if (reader->real_end) 5829 local_set(&bpage->commit, reader->real_end); 5830 } 5831 ret = read; 5832 5833 cpu_buffer->lost_events = 0; 5834 5835 commit = local_read(&bpage->commit); 5836 /* 5837 * Set a flag in the commit field if we lost events 5838 */ 5839 if (missed_events) { 5840 /* If there is room at the end of the page to save the 5841 * missed events, then record it there. 5842 */ 5843 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) { 5844 memcpy(&bpage->data[commit], &missed_events, 5845 sizeof(missed_events)); 5846 local_add(RB_MISSED_STORED, &bpage->commit); 5847 commit += sizeof(missed_events); 5848 } 5849 local_add(RB_MISSED_EVENTS, &bpage->commit); 5850 } 5851 5852 /* 5853 * This page may be off to user land. Zero it out here. 5854 */ 5855 if (commit < BUF_PAGE_SIZE) 5856 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); 5857 5858 out_unlock: 5859 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5860 5861 out: 5862 return ret; 5863 } 5864 EXPORT_SYMBOL_GPL(ring_buffer_read_page); 5865 5866 /* 5867 * We only allocate new buffers, never free them if the CPU goes down. 5868 * If we were to free the buffer, then the user would lose any trace that was in 5869 * the buffer. 5870 */ 5871 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node) 5872 { 5873 struct trace_buffer *buffer; 5874 long nr_pages_same; 5875 int cpu_i; 5876 unsigned long nr_pages; 5877 5878 buffer = container_of(node, struct trace_buffer, node); 5879 if (cpumask_test_cpu(cpu, buffer->cpumask)) 5880 return 0; 5881 5882 nr_pages = 0; 5883 nr_pages_same = 1; 5884 /* check if all cpu sizes are same */ 5885 for_each_buffer_cpu(buffer, cpu_i) { 5886 /* fill in the size from first enabled cpu */ 5887 if (nr_pages == 0) 5888 nr_pages = buffer->buffers[cpu_i]->nr_pages; 5889 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { 5890 nr_pages_same = 0; 5891 break; 5892 } 5893 } 5894 /* allocate minimum pages, user can later expand it */ 5895 if (!nr_pages_same) 5896 nr_pages = 2; 5897 buffer->buffers[cpu] = 5898 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 5899 if (!buffer->buffers[cpu]) { 5900 WARN(1, "failed to allocate ring buffer on CPU %u\n", 5901 cpu); 5902 return -ENOMEM; 5903 } 5904 smp_wmb(); 5905 cpumask_set_cpu(cpu, buffer->cpumask); 5906 return 0; 5907 } 5908 5909 #ifdef CONFIG_RING_BUFFER_STARTUP_TEST 5910 /* 5911 * This is a basic integrity check of the ring buffer. 5912 * Late in the boot cycle this test will run when configured in. 5913 * It will kick off a thread per CPU that will go into a loop 5914 * writing to the per cpu ring buffer various sizes of data. 5915 * Some of the data will be large items, some small. 5916 * 5917 * Another thread is created that goes into a spin, sending out 5918 * IPIs to the other CPUs to also write into the ring buffer. 5919 * this is to test the nesting ability of the buffer. 5920 * 5921 * Basic stats are recorded and reported. If something in the 5922 * ring buffer should happen that's not expected, a big warning 5923 * is displayed and all ring buffers are disabled. 5924 */ 5925 static struct task_struct *rb_threads[NR_CPUS] __initdata; 5926 5927 struct rb_test_data { 5928 struct trace_buffer *buffer; 5929 unsigned long events; 5930 unsigned long bytes_written; 5931 unsigned long bytes_alloc; 5932 unsigned long bytes_dropped; 5933 unsigned long events_nested; 5934 unsigned long bytes_written_nested; 5935 unsigned long bytes_alloc_nested; 5936 unsigned long bytes_dropped_nested; 5937 int min_size_nested; 5938 int max_size_nested; 5939 int max_size; 5940 int min_size; 5941 int cpu; 5942 int cnt; 5943 }; 5944 5945 static struct rb_test_data rb_data[NR_CPUS] __initdata; 5946 5947 /* 1 meg per cpu */ 5948 #define RB_TEST_BUFFER_SIZE 1048576 5949 5950 static char rb_string[] __initdata = 5951 "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\" 5952 "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890" 5953 "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv"; 5954 5955 static bool rb_test_started __initdata; 5956 5957 struct rb_item { 5958 int size; 5959 char str[]; 5960 }; 5961 5962 static __init int rb_write_something(struct rb_test_data *data, bool nested) 5963 { 5964 struct ring_buffer_event *event; 5965 struct rb_item *item; 5966 bool started; 5967 int event_len; 5968 int size; 5969 int len; 5970 int cnt; 5971 5972 /* Have nested writes different that what is written */ 5973 cnt = data->cnt + (nested ? 27 : 0); 5974 5975 /* Multiply cnt by ~e, to make some unique increment */ 5976 size = (cnt * 68 / 25) % (sizeof(rb_string) - 1); 5977 5978 len = size + sizeof(struct rb_item); 5979 5980 started = rb_test_started; 5981 /* read rb_test_started before checking buffer enabled */ 5982 smp_rmb(); 5983 5984 event = ring_buffer_lock_reserve(data->buffer, len); 5985 if (!event) { 5986 /* Ignore dropped events before test starts. */ 5987 if (started) { 5988 if (nested) 5989 data->bytes_dropped += len; 5990 else 5991 data->bytes_dropped_nested += len; 5992 } 5993 return len; 5994 } 5995 5996 event_len = ring_buffer_event_length(event); 5997 5998 if (RB_WARN_ON(data->buffer, event_len < len)) 5999 goto out; 6000 6001 item = ring_buffer_event_data(event); 6002 item->size = size; 6003 memcpy(item->str, rb_string, size); 6004 6005 if (nested) { 6006 data->bytes_alloc_nested += event_len; 6007 data->bytes_written_nested += len; 6008 data->events_nested++; 6009 if (!data->min_size_nested || len < data->min_size_nested) 6010 data->min_size_nested = len; 6011 if (len > data->max_size_nested) 6012 data->max_size_nested = len; 6013 } else { 6014 data->bytes_alloc += event_len; 6015 data->bytes_written += len; 6016 data->events++; 6017 if (!data->min_size || len < data->min_size) 6018 data->max_size = len; 6019 if (len > data->max_size) 6020 data->max_size = len; 6021 } 6022 6023 out: 6024 ring_buffer_unlock_commit(data->buffer); 6025 6026 return 0; 6027 } 6028 6029 static __init int rb_test(void *arg) 6030 { 6031 struct rb_test_data *data = arg; 6032 6033 while (!kthread_should_stop()) { 6034 rb_write_something(data, false); 6035 data->cnt++; 6036 6037 set_current_state(TASK_INTERRUPTIBLE); 6038 /* Now sleep between a min of 100-300us and a max of 1ms */ 6039 usleep_range(((data->cnt % 3) + 1) * 100, 1000); 6040 } 6041 6042 return 0; 6043 } 6044 6045 static __init void rb_ipi(void *ignore) 6046 { 6047 struct rb_test_data *data; 6048 int cpu = smp_processor_id(); 6049 6050 data = &rb_data[cpu]; 6051 rb_write_something(data, true); 6052 } 6053 6054 static __init int rb_hammer_test(void *arg) 6055 { 6056 while (!kthread_should_stop()) { 6057 6058 /* Send an IPI to all cpus to write data! */ 6059 smp_call_function(rb_ipi, NULL, 1); 6060 /* No sleep, but for non preempt, let others run */ 6061 schedule(); 6062 } 6063 6064 return 0; 6065 } 6066 6067 static __init int test_ringbuffer(void) 6068 { 6069 struct task_struct *rb_hammer; 6070 struct trace_buffer *buffer; 6071 int cpu; 6072 int ret = 0; 6073 6074 if (security_locked_down(LOCKDOWN_TRACEFS)) { 6075 pr_warn("Lockdown is enabled, skipping ring buffer tests\n"); 6076 return 0; 6077 } 6078 6079 pr_info("Running ring buffer tests...\n"); 6080 6081 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE); 6082 if (WARN_ON(!buffer)) 6083 return 0; 6084 6085 /* Disable buffer so that threads can't write to it yet */ 6086 ring_buffer_record_off(buffer); 6087 6088 for_each_online_cpu(cpu) { 6089 rb_data[cpu].buffer = buffer; 6090 rb_data[cpu].cpu = cpu; 6091 rb_data[cpu].cnt = cpu; 6092 rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu], 6093 cpu, "rbtester/%u"); 6094 if (WARN_ON(IS_ERR(rb_threads[cpu]))) { 6095 pr_cont("FAILED\n"); 6096 ret = PTR_ERR(rb_threads[cpu]); 6097 goto out_free; 6098 } 6099 } 6100 6101 /* Now create the rb hammer! */ 6102 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer"); 6103 if (WARN_ON(IS_ERR(rb_hammer))) { 6104 pr_cont("FAILED\n"); 6105 ret = PTR_ERR(rb_hammer); 6106 goto out_free; 6107 } 6108 6109 ring_buffer_record_on(buffer); 6110 /* 6111 * Show buffer is enabled before setting rb_test_started. 6112 * Yes there's a small race window where events could be 6113 * dropped and the thread wont catch it. But when a ring 6114 * buffer gets enabled, there will always be some kind of 6115 * delay before other CPUs see it. Thus, we don't care about 6116 * those dropped events. We care about events dropped after 6117 * the threads see that the buffer is active. 6118 */ 6119 smp_wmb(); 6120 rb_test_started = true; 6121 6122 set_current_state(TASK_INTERRUPTIBLE); 6123 /* Just run for 10 seconds */; 6124 schedule_timeout(10 * HZ); 6125 6126 kthread_stop(rb_hammer); 6127 6128 out_free: 6129 for_each_online_cpu(cpu) { 6130 if (!rb_threads[cpu]) 6131 break; 6132 kthread_stop(rb_threads[cpu]); 6133 } 6134 if (ret) { 6135 ring_buffer_free(buffer); 6136 return ret; 6137 } 6138 6139 /* Report! */ 6140 pr_info("finished\n"); 6141 for_each_online_cpu(cpu) { 6142 struct ring_buffer_event *event; 6143 struct rb_test_data *data = &rb_data[cpu]; 6144 struct rb_item *item; 6145 unsigned long total_events; 6146 unsigned long total_dropped; 6147 unsigned long total_written; 6148 unsigned long total_alloc; 6149 unsigned long total_read = 0; 6150 unsigned long total_size = 0; 6151 unsigned long total_len = 0; 6152 unsigned long total_lost = 0; 6153 unsigned long lost; 6154 int big_event_size; 6155 int small_event_size; 6156 6157 ret = -1; 6158 6159 total_events = data->events + data->events_nested; 6160 total_written = data->bytes_written + data->bytes_written_nested; 6161 total_alloc = data->bytes_alloc + data->bytes_alloc_nested; 6162 total_dropped = data->bytes_dropped + data->bytes_dropped_nested; 6163 6164 big_event_size = data->max_size + data->max_size_nested; 6165 small_event_size = data->min_size + data->min_size_nested; 6166 6167 pr_info("CPU %d:\n", cpu); 6168 pr_info(" events: %ld\n", total_events); 6169 pr_info(" dropped bytes: %ld\n", total_dropped); 6170 pr_info(" alloced bytes: %ld\n", total_alloc); 6171 pr_info(" written bytes: %ld\n", total_written); 6172 pr_info(" biggest event: %d\n", big_event_size); 6173 pr_info(" smallest event: %d\n", small_event_size); 6174 6175 if (RB_WARN_ON(buffer, total_dropped)) 6176 break; 6177 6178 ret = 0; 6179 6180 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { 6181 total_lost += lost; 6182 item = ring_buffer_event_data(event); 6183 total_len += ring_buffer_event_length(event); 6184 total_size += item->size + sizeof(struct rb_item); 6185 if (memcmp(&item->str[0], rb_string, item->size) != 0) { 6186 pr_info("FAILED!\n"); 6187 pr_info("buffer had: %.*s\n", item->size, item->str); 6188 pr_info("expected: %.*s\n", item->size, rb_string); 6189 RB_WARN_ON(buffer, 1); 6190 ret = -1; 6191 break; 6192 } 6193 total_read++; 6194 } 6195 if (ret) 6196 break; 6197 6198 ret = -1; 6199 6200 pr_info(" read events: %ld\n", total_read); 6201 pr_info(" lost events: %ld\n", total_lost); 6202 pr_info(" total events: %ld\n", total_lost + total_read); 6203 pr_info(" recorded len bytes: %ld\n", total_len); 6204 pr_info(" recorded size bytes: %ld\n", total_size); 6205 if (total_lost) { 6206 pr_info(" With dropped events, record len and size may not match\n" 6207 " alloced and written from above\n"); 6208 } else { 6209 if (RB_WARN_ON(buffer, total_len != total_alloc || 6210 total_size != total_written)) 6211 break; 6212 } 6213 if (RB_WARN_ON(buffer, total_lost + total_read != total_events)) 6214 break; 6215 6216 ret = 0; 6217 } 6218 if (!ret) 6219 pr_info("Ring buffer PASSED!\n"); 6220 6221 ring_buffer_free(buffer); 6222 return 0; 6223 } 6224 6225 late_initcall(test_ringbuffer); 6226 #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */ 6227