1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Generic ring buffer 4 * 5 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> 6 */ 7 #include <linux/trace_recursion.h> 8 #include <linux/trace_events.h> 9 #include <linux/ring_buffer.h> 10 #include <linux/trace_clock.h> 11 #include <linux/sched/clock.h> 12 #include <linux/trace_seq.h> 13 #include <linux/spinlock.h> 14 #include <linux/irq_work.h> 15 #include <linux/security.h> 16 #include <linux/uaccess.h> 17 #include <linux/hardirq.h> 18 #include <linux/kthread.h> /* for self test */ 19 #include <linux/module.h> 20 #include <linux/percpu.h> 21 #include <linux/mutex.h> 22 #include <linux/delay.h> 23 #include <linux/slab.h> 24 #include <linux/init.h> 25 #include <linux/hash.h> 26 #include <linux/list.h> 27 #include <linux/cpu.h> 28 #include <linux/oom.h> 29 30 #include <asm/local.h> 31 32 /* 33 * The "absolute" timestamp in the buffer is only 59 bits. 34 * If a clock has the 5 MSBs set, it needs to be saved and 35 * reinserted. 36 */ 37 #define TS_MSB (0xf8ULL << 56) 38 #define ABS_TS_MASK (~TS_MSB) 39 40 static void update_pages_handler(struct work_struct *work); 41 42 /* 43 * The ring buffer header is special. We must manually up keep it. 44 */ 45 int ring_buffer_print_entry_header(struct trace_seq *s) 46 { 47 trace_seq_puts(s, "# compressed entry header\n"); 48 trace_seq_puts(s, "\ttype_len : 5 bits\n"); 49 trace_seq_puts(s, "\ttime_delta : 27 bits\n"); 50 trace_seq_puts(s, "\tarray : 32 bits\n"); 51 trace_seq_putc(s, '\n'); 52 trace_seq_printf(s, "\tpadding : type == %d\n", 53 RINGBUF_TYPE_PADDING); 54 trace_seq_printf(s, "\ttime_extend : type == %d\n", 55 RINGBUF_TYPE_TIME_EXTEND); 56 trace_seq_printf(s, "\ttime_stamp : type == %d\n", 57 RINGBUF_TYPE_TIME_STAMP); 58 trace_seq_printf(s, "\tdata max type_len == %d\n", 59 RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 60 61 return !trace_seq_has_overflowed(s); 62 } 63 64 /* 65 * The ring buffer is made up of a list of pages. A separate list of pages is 66 * allocated for each CPU. A writer may only write to a buffer that is 67 * associated with the CPU it is currently executing on. A reader may read 68 * from any per cpu buffer. 69 * 70 * The reader is special. For each per cpu buffer, the reader has its own 71 * reader page. When a reader has read the entire reader page, this reader 72 * page is swapped with another page in the ring buffer. 73 * 74 * Now, as long as the writer is off the reader page, the reader can do what 75 * ever it wants with that page. The writer will never write to that page 76 * again (as long as it is out of the ring buffer). 77 * 78 * Here's some silly ASCII art. 79 * 80 * +------+ 81 * |reader| RING BUFFER 82 * |page | 83 * +------+ +---+ +---+ +---+ 84 * | |-->| |-->| | 85 * +---+ +---+ +---+ 86 * ^ | 87 * | | 88 * +---------------+ 89 * 90 * 91 * +------+ 92 * |reader| RING BUFFER 93 * |page |------------------v 94 * +------+ +---+ +---+ +---+ 95 * | |-->| |-->| | 96 * +---+ +---+ +---+ 97 * ^ | 98 * | | 99 * +---------------+ 100 * 101 * 102 * +------+ 103 * |reader| RING BUFFER 104 * |page |------------------v 105 * +------+ +---+ +---+ +---+ 106 * ^ | |-->| |-->| | 107 * | +---+ +---+ +---+ 108 * | | 109 * | | 110 * +------------------------------+ 111 * 112 * 113 * +------+ 114 * |buffer| RING BUFFER 115 * |page |------------------v 116 * +------+ +---+ +---+ +---+ 117 * ^ | | | |-->| | 118 * | New +---+ +---+ +---+ 119 * | Reader------^ | 120 * | page | 121 * +------------------------------+ 122 * 123 * 124 * After we make this swap, the reader can hand this page off to the splice 125 * code and be done with it. It can even allocate a new page if it needs to 126 * and swap that into the ring buffer. 127 * 128 * We will be using cmpxchg soon to make all this lockless. 129 * 130 */ 131 132 /* Used for individual buffers (after the counter) */ 133 #define RB_BUFFER_OFF (1 << 20) 134 135 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) 136 137 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) 138 #define RB_ALIGNMENT 4U 139 #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 140 #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ 141 142 #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS 143 # define RB_FORCE_8BYTE_ALIGNMENT 0 144 # define RB_ARCH_ALIGNMENT RB_ALIGNMENT 145 #else 146 # define RB_FORCE_8BYTE_ALIGNMENT 1 147 # define RB_ARCH_ALIGNMENT 8U 148 #endif 149 150 #define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT) 151 152 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ 153 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX 154 155 enum { 156 RB_LEN_TIME_EXTEND = 8, 157 RB_LEN_TIME_STAMP = 8, 158 }; 159 160 #define skip_time_extend(event) \ 161 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND)) 162 163 #define extended_time(event) \ 164 (event->type_len >= RINGBUF_TYPE_TIME_EXTEND) 165 166 static inline int rb_null_event(struct ring_buffer_event *event) 167 { 168 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; 169 } 170 171 static void rb_event_set_padding(struct ring_buffer_event *event) 172 { 173 /* padding has a NULL time_delta */ 174 event->type_len = RINGBUF_TYPE_PADDING; 175 event->time_delta = 0; 176 } 177 178 static unsigned 179 rb_event_data_length(struct ring_buffer_event *event) 180 { 181 unsigned length; 182 183 if (event->type_len) 184 length = event->type_len * RB_ALIGNMENT; 185 else 186 length = event->array[0]; 187 return length + RB_EVNT_HDR_SIZE; 188 } 189 190 /* 191 * Return the length of the given event. Will return 192 * the length of the time extend if the event is a 193 * time extend. 194 */ 195 static inline unsigned 196 rb_event_length(struct ring_buffer_event *event) 197 { 198 switch (event->type_len) { 199 case RINGBUF_TYPE_PADDING: 200 if (rb_null_event(event)) 201 /* undefined */ 202 return -1; 203 return event->array[0] + RB_EVNT_HDR_SIZE; 204 205 case RINGBUF_TYPE_TIME_EXTEND: 206 return RB_LEN_TIME_EXTEND; 207 208 case RINGBUF_TYPE_TIME_STAMP: 209 return RB_LEN_TIME_STAMP; 210 211 case RINGBUF_TYPE_DATA: 212 return rb_event_data_length(event); 213 default: 214 WARN_ON_ONCE(1); 215 } 216 /* not hit */ 217 return 0; 218 } 219 220 /* 221 * Return total length of time extend and data, 222 * or just the event length for all other events. 223 */ 224 static inline unsigned 225 rb_event_ts_length(struct ring_buffer_event *event) 226 { 227 unsigned len = 0; 228 229 if (extended_time(event)) { 230 /* time extends include the data event after it */ 231 len = RB_LEN_TIME_EXTEND; 232 event = skip_time_extend(event); 233 } 234 return len + rb_event_length(event); 235 } 236 237 /** 238 * ring_buffer_event_length - return the length of the event 239 * @event: the event to get the length of 240 * 241 * Returns the size of the data load of a data event. 242 * If the event is something other than a data event, it 243 * returns the size of the event itself. With the exception 244 * of a TIME EXTEND, where it still returns the size of the 245 * data load of the data event after it. 246 */ 247 unsigned ring_buffer_event_length(struct ring_buffer_event *event) 248 { 249 unsigned length; 250 251 if (extended_time(event)) 252 event = skip_time_extend(event); 253 254 length = rb_event_length(event); 255 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 256 return length; 257 length -= RB_EVNT_HDR_SIZE; 258 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) 259 length -= sizeof(event->array[0]); 260 return length; 261 } 262 EXPORT_SYMBOL_GPL(ring_buffer_event_length); 263 264 /* inline for ring buffer fast paths */ 265 static __always_inline void * 266 rb_event_data(struct ring_buffer_event *event) 267 { 268 if (extended_time(event)) 269 event = skip_time_extend(event); 270 WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 271 /* If length is in len field, then array[0] has the data */ 272 if (event->type_len) 273 return (void *)&event->array[0]; 274 /* Otherwise length is in array[0] and array[1] has the data */ 275 return (void *)&event->array[1]; 276 } 277 278 /** 279 * ring_buffer_event_data - return the data of the event 280 * @event: the event to get the data from 281 */ 282 void *ring_buffer_event_data(struct ring_buffer_event *event) 283 { 284 return rb_event_data(event); 285 } 286 EXPORT_SYMBOL_GPL(ring_buffer_event_data); 287 288 #define for_each_buffer_cpu(buffer, cpu) \ 289 for_each_cpu(cpu, buffer->cpumask) 290 291 #define for_each_online_buffer_cpu(buffer, cpu) \ 292 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask) 293 294 #define TS_SHIFT 27 295 #define TS_MASK ((1ULL << TS_SHIFT) - 1) 296 #define TS_DELTA_TEST (~TS_MASK) 297 298 static u64 rb_event_time_stamp(struct ring_buffer_event *event) 299 { 300 u64 ts; 301 302 ts = event->array[0]; 303 ts <<= TS_SHIFT; 304 ts += event->time_delta; 305 306 return ts; 307 } 308 309 /* Flag when events were overwritten */ 310 #define RB_MISSED_EVENTS (1 << 31) 311 /* Missed count stored at end */ 312 #define RB_MISSED_STORED (1 << 30) 313 314 struct buffer_data_page { 315 u64 time_stamp; /* page time stamp */ 316 local_t commit; /* write committed index */ 317 unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */ 318 }; 319 320 /* 321 * Note, the buffer_page list must be first. The buffer pages 322 * are allocated in cache lines, which means that each buffer 323 * page will be at the beginning of a cache line, and thus 324 * the least significant bits will be zero. We use this to 325 * add flags in the list struct pointers, to make the ring buffer 326 * lockless. 327 */ 328 struct buffer_page { 329 struct list_head list; /* list of buffer pages */ 330 local_t write; /* index for next write */ 331 unsigned read; /* index for next read */ 332 local_t entries; /* entries on this page */ 333 unsigned long real_end; /* real end of data */ 334 struct buffer_data_page *page; /* Actual data page */ 335 }; 336 337 /* 338 * The buffer page counters, write and entries, must be reset 339 * atomically when crossing page boundaries. To synchronize this 340 * update, two counters are inserted into the number. One is 341 * the actual counter for the write position or count on the page. 342 * 343 * The other is a counter of updaters. Before an update happens 344 * the update partition of the counter is incremented. This will 345 * allow the updater to update the counter atomically. 346 * 347 * The counter is 20 bits, and the state data is 12. 348 */ 349 #define RB_WRITE_MASK 0xfffff 350 #define RB_WRITE_INTCNT (1 << 20) 351 352 static void rb_init_page(struct buffer_data_page *bpage) 353 { 354 local_set(&bpage->commit, 0); 355 } 356 357 /* 358 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing 359 * this issue out. 360 */ 361 static void free_buffer_page(struct buffer_page *bpage) 362 { 363 free_page((unsigned long)bpage->page); 364 kfree(bpage); 365 } 366 367 /* 368 * We need to fit the time_stamp delta into 27 bits. 369 */ 370 static inline int test_time_stamp(u64 delta) 371 { 372 if (delta & TS_DELTA_TEST) 373 return 1; 374 return 0; 375 } 376 377 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE) 378 379 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */ 380 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) 381 382 int ring_buffer_print_page_header(struct trace_seq *s) 383 { 384 struct buffer_data_page field; 385 386 trace_seq_printf(s, "\tfield: u64 timestamp;\t" 387 "offset:0;\tsize:%u;\tsigned:%u;\n", 388 (unsigned int)sizeof(field.time_stamp), 389 (unsigned int)is_signed_type(u64)); 390 391 trace_seq_printf(s, "\tfield: local_t commit;\t" 392 "offset:%u;\tsize:%u;\tsigned:%u;\n", 393 (unsigned int)offsetof(typeof(field), commit), 394 (unsigned int)sizeof(field.commit), 395 (unsigned int)is_signed_type(long)); 396 397 trace_seq_printf(s, "\tfield: int overwrite;\t" 398 "offset:%u;\tsize:%u;\tsigned:%u;\n", 399 (unsigned int)offsetof(typeof(field), commit), 400 1, 401 (unsigned int)is_signed_type(long)); 402 403 trace_seq_printf(s, "\tfield: char data;\t" 404 "offset:%u;\tsize:%u;\tsigned:%u;\n", 405 (unsigned int)offsetof(typeof(field), data), 406 (unsigned int)BUF_PAGE_SIZE, 407 (unsigned int)is_signed_type(char)); 408 409 return !trace_seq_has_overflowed(s); 410 } 411 412 struct rb_irq_work { 413 struct irq_work work; 414 wait_queue_head_t waiters; 415 wait_queue_head_t full_waiters; 416 long wait_index; 417 bool waiters_pending; 418 bool full_waiters_pending; 419 bool wakeup_full; 420 }; 421 422 /* 423 * Structure to hold event state and handle nested events. 424 */ 425 struct rb_event_info { 426 u64 ts; 427 u64 delta; 428 u64 before; 429 u64 after; 430 unsigned long length; 431 struct buffer_page *tail_page; 432 int add_timestamp; 433 }; 434 435 /* 436 * Used for the add_timestamp 437 * NONE 438 * EXTEND - wants a time extend 439 * ABSOLUTE - the buffer requests all events to have absolute time stamps 440 * FORCE - force a full time stamp. 441 */ 442 enum { 443 RB_ADD_STAMP_NONE = 0, 444 RB_ADD_STAMP_EXTEND = BIT(1), 445 RB_ADD_STAMP_ABSOLUTE = BIT(2), 446 RB_ADD_STAMP_FORCE = BIT(3) 447 }; 448 /* 449 * Used for which event context the event is in. 450 * TRANSITION = 0 451 * NMI = 1 452 * IRQ = 2 453 * SOFTIRQ = 3 454 * NORMAL = 4 455 * 456 * See trace_recursive_lock() comment below for more details. 457 */ 458 enum { 459 RB_CTX_TRANSITION, 460 RB_CTX_NMI, 461 RB_CTX_IRQ, 462 RB_CTX_SOFTIRQ, 463 RB_CTX_NORMAL, 464 RB_CTX_MAX 465 }; 466 467 #if BITS_PER_LONG == 32 468 #define RB_TIME_32 469 #endif 470 471 /* To test on 64 bit machines */ 472 //#define RB_TIME_32 473 474 #ifdef RB_TIME_32 475 476 struct rb_time_struct { 477 local_t cnt; 478 local_t top; 479 local_t bottom; 480 local_t msb; 481 }; 482 #else 483 #include <asm/local64.h> 484 struct rb_time_struct { 485 local64_t time; 486 }; 487 #endif 488 typedef struct rb_time_struct rb_time_t; 489 490 #define MAX_NEST 5 491 492 /* 493 * head_page == tail_page && head == tail then buffer is empty. 494 */ 495 struct ring_buffer_per_cpu { 496 int cpu; 497 atomic_t record_disabled; 498 atomic_t resize_disabled; 499 struct trace_buffer *buffer; 500 raw_spinlock_t reader_lock; /* serialize readers */ 501 arch_spinlock_t lock; 502 struct lock_class_key lock_key; 503 struct buffer_data_page *free_page; 504 unsigned long nr_pages; 505 unsigned int current_context; 506 struct list_head *pages; 507 struct buffer_page *head_page; /* read from head */ 508 struct buffer_page *tail_page; /* write to tail */ 509 struct buffer_page *commit_page; /* committed pages */ 510 struct buffer_page *reader_page; 511 unsigned long lost_events; 512 unsigned long last_overrun; 513 unsigned long nest; 514 local_t entries_bytes; 515 local_t entries; 516 local_t overrun; 517 local_t commit_overrun; 518 local_t dropped_events; 519 local_t committing; 520 local_t commits; 521 local_t pages_touched; 522 local_t pages_read; 523 long last_pages_touch; 524 size_t shortest_full; 525 unsigned long read; 526 unsigned long read_bytes; 527 rb_time_t write_stamp; 528 rb_time_t before_stamp; 529 u64 event_stamp[MAX_NEST]; 530 u64 read_stamp; 531 /* ring buffer pages to update, > 0 to add, < 0 to remove */ 532 long nr_pages_to_update; 533 struct list_head new_pages; /* new pages to add */ 534 struct work_struct update_pages_work; 535 struct completion update_done; 536 537 struct rb_irq_work irq_work; 538 }; 539 540 struct trace_buffer { 541 unsigned flags; 542 int cpus; 543 atomic_t record_disabled; 544 cpumask_var_t cpumask; 545 546 struct lock_class_key *reader_lock_key; 547 548 struct mutex mutex; 549 550 struct ring_buffer_per_cpu **buffers; 551 552 struct hlist_node node; 553 u64 (*clock)(void); 554 555 struct rb_irq_work irq_work; 556 bool time_stamp_abs; 557 }; 558 559 struct ring_buffer_iter { 560 struct ring_buffer_per_cpu *cpu_buffer; 561 unsigned long head; 562 unsigned long next_event; 563 struct buffer_page *head_page; 564 struct buffer_page *cache_reader_page; 565 unsigned long cache_read; 566 u64 read_stamp; 567 u64 page_stamp; 568 struct ring_buffer_event *event; 569 int missed_events; 570 }; 571 572 #ifdef RB_TIME_32 573 574 /* 575 * On 32 bit machines, local64_t is very expensive. As the ring 576 * buffer doesn't need all the features of a true 64 bit atomic, 577 * on 32 bit, it uses these functions (64 still uses local64_t). 578 * 579 * For the ring buffer, 64 bit required operations for the time is 580 * the following: 581 * 582 * - Reads may fail if it interrupted a modification of the time stamp. 583 * It will succeed if it did not interrupt another write even if 584 * the read itself is interrupted by a write. 585 * It returns whether it was successful or not. 586 * 587 * - Writes always succeed and will overwrite other writes and writes 588 * that were done by events interrupting the current write. 589 * 590 * - A write followed by a read of the same time stamp will always succeed, 591 * but may not contain the same value. 592 * 593 * - A cmpxchg will fail if it interrupted another write or cmpxchg. 594 * Other than that, it acts like a normal cmpxchg. 595 * 596 * The 60 bit time stamp is broken up by 30 bits in a top and bottom half 597 * (bottom being the least significant 30 bits of the 60 bit time stamp). 598 * 599 * The two most significant bits of each half holds a 2 bit counter (0-3). 600 * Each update will increment this counter by one. 601 * When reading the top and bottom, if the two counter bits match then the 602 * top and bottom together make a valid 60 bit number. 603 */ 604 #define RB_TIME_SHIFT 30 605 #define RB_TIME_VAL_MASK ((1 << RB_TIME_SHIFT) - 1) 606 #define RB_TIME_MSB_SHIFT 60 607 608 static inline int rb_time_cnt(unsigned long val) 609 { 610 return (val >> RB_TIME_SHIFT) & 3; 611 } 612 613 static inline u64 rb_time_val(unsigned long top, unsigned long bottom) 614 { 615 u64 val; 616 617 val = top & RB_TIME_VAL_MASK; 618 val <<= RB_TIME_SHIFT; 619 val |= bottom & RB_TIME_VAL_MASK; 620 621 return val; 622 } 623 624 static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt) 625 { 626 unsigned long top, bottom, msb; 627 unsigned long c; 628 629 /* 630 * If the read is interrupted by a write, then the cnt will 631 * be different. Loop until both top and bottom have been read 632 * without interruption. 633 */ 634 do { 635 c = local_read(&t->cnt); 636 top = local_read(&t->top); 637 bottom = local_read(&t->bottom); 638 msb = local_read(&t->msb); 639 } while (c != local_read(&t->cnt)); 640 641 *cnt = rb_time_cnt(top); 642 643 /* If top and bottom counts don't match, this interrupted a write */ 644 if (*cnt != rb_time_cnt(bottom)) 645 return false; 646 647 /* The shift to msb will lose its cnt bits */ 648 *ret = rb_time_val(top, bottom) | ((u64)msb << RB_TIME_MSB_SHIFT); 649 return true; 650 } 651 652 static bool rb_time_read(rb_time_t *t, u64 *ret) 653 { 654 unsigned long cnt; 655 656 return __rb_time_read(t, ret, &cnt); 657 } 658 659 static inline unsigned long rb_time_val_cnt(unsigned long val, unsigned long cnt) 660 { 661 return (val & RB_TIME_VAL_MASK) | ((cnt & 3) << RB_TIME_SHIFT); 662 } 663 664 static inline void rb_time_split(u64 val, unsigned long *top, unsigned long *bottom, 665 unsigned long *msb) 666 { 667 *top = (unsigned long)((val >> RB_TIME_SHIFT) & RB_TIME_VAL_MASK); 668 *bottom = (unsigned long)(val & RB_TIME_VAL_MASK); 669 *msb = (unsigned long)(val >> RB_TIME_MSB_SHIFT); 670 } 671 672 static inline void rb_time_val_set(local_t *t, unsigned long val, unsigned long cnt) 673 { 674 val = rb_time_val_cnt(val, cnt); 675 local_set(t, val); 676 } 677 678 static void rb_time_set(rb_time_t *t, u64 val) 679 { 680 unsigned long cnt, top, bottom, msb; 681 682 rb_time_split(val, &top, &bottom, &msb); 683 684 /* Writes always succeed with a valid number even if it gets interrupted. */ 685 do { 686 cnt = local_inc_return(&t->cnt); 687 rb_time_val_set(&t->top, top, cnt); 688 rb_time_val_set(&t->bottom, bottom, cnt); 689 rb_time_val_set(&t->msb, val >> RB_TIME_MSB_SHIFT, cnt); 690 } while (cnt != local_read(&t->cnt)); 691 } 692 693 static inline bool 694 rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set) 695 { 696 unsigned long ret; 697 698 ret = local_cmpxchg(l, expect, set); 699 return ret == expect; 700 } 701 702 static int rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set) 703 { 704 unsigned long cnt, top, bottom, msb; 705 unsigned long cnt2, top2, bottom2, msb2; 706 u64 val; 707 708 /* The cmpxchg always fails if it interrupted an update */ 709 if (!__rb_time_read(t, &val, &cnt2)) 710 return false; 711 712 if (val != expect) 713 return false; 714 715 cnt = local_read(&t->cnt); 716 if ((cnt & 3) != cnt2) 717 return false; 718 719 cnt2 = cnt + 1; 720 721 rb_time_split(val, &top, &bottom, &msb); 722 top = rb_time_val_cnt(top, cnt); 723 bottom = rb_time_val_cnt(bottom, cnt); 724 725 rb_time_split(set, &top2, &bottom2, &msb2); 726 top2 = rb_time_val_cnt(top2, cnt2); 727 bottom2 = rb_time_val_cnt(bottom2, cnt2); 728 729 if (!rb_time_read_cmpxchg(&t->cnt, cnt, cnt2)) 730 return false; 731 if (!rb_time_read_cmpxchg(&t->msb, msb, msb2)) 732 return false; 733 if (!rb_time_read_cmpxchg(&t->top, top, top2)) 734 return false; 735 if (!rb_time_read_cmpxchg(&t->bottom, bottom, bottom2)) 736 return false; 737 return true; 738 } 739 740 #else /* 64 bits */ 741 742 /* local64_t always succeeds */ 743 744 static inline bool rb_time_read(rb_time_t *t, u64 *ret) 745 { 746 *ret = local64_read(&t->time); 747 return true; 748 } 749 static void rb_time_set(rb_time_t *t, u64 val) 750 { 751 local64_set(&t->time, val); 752 } 753 754 static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set) 755 { 756 u64 val; 757 val = local64_cmpxchg(&t->time, expect, set); 758 return val == expect; 759 } 760 #endif 761 762 /* 763 * Enable this to make sure that the event passed to 764 * ring_buffer_event_time_stamp() is not committed and also 765 * is on the buffer that it passed in. 766 */ 767 //#define RB_VERIFY_EVENT 768 #ifdef RB_VERIFY_EVENT 769 static struct list_head *rb_list_head(struct list_head *list); 770 static void verify_event(struct ring_buffer_per_cpu *cpu_buffer, 771 void *event) 772 { 773 struct buffer_page *page = cpu_buffer->commit_page; 774 struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page); 775 struct list_head *next; 776 long commit, write; 777 unsigned long addr = (unsigned long)event; 778 bool done = false; 779 int stop = 0; 780 781 /* Make sure the event exists and is not committed yet */ 782 do { 783 if (page == tail_page || WARN_ON_ONCE(stop++ > 100)) 784 done = true; 785 commit = local_read(&page->page->commit); 786 write = local_read(&page->write); 787 if (addr >= (unsigned long)&page->page->data[commit] && 788 addr < (unsigned long)&page->page->data[write]) 789 return; 790 791 next = rb_list_head(page->list.next); 792 page = list_entry(next, struct buffer_page, list); 793 } while (!done); 794 WARN_ON_ONCE(1); 795 } 796 #else 797 static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer, 798 void *event) 799 { 800 } 801 #endif 802 803 /* 804 * The absolute time stamp drops the 5 MSBs and some clocks may 805 * require them. The rb_fix_abs_ts() will take a previous full 806 * time stamp, and add the 5 MSB of that time stamp on to the 807 * saved absolute time stamp. Then they are compared in case of 808 * the unlikely event that the latest time stamp incremented 809 * the 5 MSB. 810 */ 811 static inline u64 rb_fix_abs_ts(u64 abs, u64 save_ts) 812 { 813 if (save_ts & TS_MSB) { 814 abs |= save_ts & TS_MSB; 815 /* Check for overflow */ 816 if (unlikely(abs < save_ts)) 817 abs += 1ULL << 59; 818 } 819 return abs; 820 } 821 822 static inline u64 rb_time_stamp(struct trace_buffer *buffer); 823 824 /** 825 * ring_buffer_event_time_stamp - return the event's current time stamp 826 * @buffer: The buffer that the event is on 827 * @event: the event to get the time stamp of 828 * 829 * Note, this must be called after @event is reserved, and before it is 830 * committed to the ring buffer. And must be called from the same 831 * context where the event was reserved (normal, softirq, irq, etc). 832 * 833 * Returns the time stamp associated with the current event. 834 * If the event has an extended time stamp, then that is used as 835 * the time stamp to return. 836 * In the highly unlikely case that the event was nested more than 837 * the max nesting, then the write_stamp of the buffer is returned, 838 * otherwise current time is returned, but that really neither of 839 * the last two cases should ever happen. 840 */ 841 u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer, 842 struct ring_buffer_event *event) 843 { 844 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; 845 unsigned int nest; 846 u64 ts; 847 848 /* If the event includes an absolute time, then just use that */ 849 if (event->type_len == RINGBUF_TYPE_TIME_STAMP) { 850 ts = rb_event_time_stamp(event); 851 return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp); 852 } 853 854 nest = local_read(&cpu_buffer->committing); 855 verify_event(cpu_buffer, event); 856 if (WARN_ON_ONCE(!nest)) 857 goto fail; 858 859 /* Read the current saved nesting level time stamp */ 860 if (likely(--nest < MAX_NEST)) 861 return cpu_buffer->event_stamp[nest]; 862 863 /* Shouldn't happen, warn if it does */ 864 WARN_ONCE(1, "nest (%d) greater than max", nest); 865 866 fail: 867 /* Can only fail on 32 bit */ 868 if (!rb_time_read(&cpu_buffer->write_stamp, &ts)) 869 /* Screw it, just read the current time */ 870 ts = rb_time_stamp(cpu_buffer->buffer); 871 872 return ts; 873 } 874 875 /** 876 * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer 877 * @buffer: The ring_buffer to get the number of pages from 878 * @cpu: The cpu of the ring_buffer to get the number of pages from 879 * 880 * Returns the number of pages used by a per_cpu buffer of the ring buffer. 881 */ 882 size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu) 883 { 884 return buffer->buffers[cpu]->nr_pages; 885 } 886 887 /** 888 * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer 889 * @buffer: The ring_buffer to get the number of pages from 890 * @cpu: The cpu of the ring_buffer to get the number of pages from 891 * 892 * Returns the number of pages that have content in the ring buffer. 893 */ 894 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) 895 { 896 size_t read; 897 size_t cnt; 898 899 read = local_read(&buffer->buffers[cpu]->pages_read); 900 cnt = local_read(&buffer->buffers[cpu]->pages_touched); 901 /* The reader can read an empty page, but not more than that */ 902 if (cnt < read) { 903 WARN_ON_ONCE(read > cnt + 1); 904 return 0; 905 } 906 907 return cnt - read; 908 } 909 910 /* 911 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input 912 * 913 * Schedules a delayed work to wake up any task that is blocked on the 914 * ring buffer waiters queue. 915 */ 916 static void rb_wake_up_waiters(struct irq_work *work) 917 { 918 struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work); 919 920 wake_up_all(&rbwork->waiters); 921 if (rbwork->full_waiters_pending || rbwork->wakeup_full) { 922 rbwork->wakeup_full = false; 923 rbwork->full_waiters_pending = false; 924 wake_up_all(&rbwork->full_waiters); 925 } 926 } 927 928 /** 929 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer 930 * @buffer: The ring buffer to wake waiters on 931 * 932 * In the case of a file that represents a ring buffer is closing, 933 * it is prudent to wake up any waiters that are on this. 934 */ 935 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu) 936 { 937 struct ring_buffer_per_cpu *cpu_buffer; 938 struct rb_irq_work *rbwork; 939 940 if (!buffer) 941 return; 942 943 if (cpu == RING_BUFFER_ALL_CPUS) { 944 945 /* Wake up individual ones too. One level recursion */ 946 for_each_buffer_cpu(buffer, cpu) 947 ring_buffer_wake_waiters(buffer, cpu); 948 949 rbwork = &buffer->irq_work; 950 } else { 951 if (WARN_ON_ONCE(!buffer->buffers)) 952 return; 953 if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) 954 return; 955 956 cpu_buffer = buffer->buffers[cpu]; 957 /* The CPU buffer may not have been initialized yet */ 958 if (!cpu_buffer) 959 return; 960 rbwork = &cpu_buffer->irq_work; 961 } 962 963 rbwork->wait_index++; 964 /* make sure the waiters see the new index */ 965 smp_wmb(); 966 967 rb_wake_up_waiters(&rbwork->work); 968 } 969 970 /** 971 * ring_buffer_wait - wait for input to the ring buffer 972 * @buffer: buffer to wait on 973 * @cpu: the cpu buffer to wait on 974 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS 975 * 976 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 977 * as data is added to any of the @buffer's cpu buffers. Otherwise 978 * it will wait for data to be added to a specific cpu buffer. 979 */ 980 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) 981 { 982 struct ring_buffer_per_cpu *cpu_buffer; 983 DEFINE_WAIT(wait); 984 struct rb_irq_work *work; 985 long wait_index; 986 int ret = 0; 987 988 /* 989 * Depending on what the caller is waiting for, either any 990 * data in any cpu buffer, or a specific buffer, put the 991 * caller on the appropriate wait queue. 992 */ 993 if (cpu == RING_BUFFER_ALL_CPUS) { 994 work = &buffer->irq_work; 995 /* Full only makes sense on per cpu reads */ 996 full = 0; 997 } else { 998 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 999 return -ENODEV; 1000 cpu_buffer = buffer->buffers[cpu]; 1001 work = &cpu_buffer->irq_work; 1002 } 1003 1004 wait_index = READ_ONCE(work->wait_index); 1005 1006 while (true) { 1007 if (full) 1008 prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE); 1009 else 1010 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); 1011 1012 /* 1013 * The events can happen in critical sections where 1014 * checking a work queue can cause deadlocks. 1015 * After adding a task to the queue, this flag is set 1016 * only to notify events to try to wake up the queue 1017 * using irq_work. 1018 * 1019 * We don't clear it even if the buffer is no longer 1020 * empty. The flag only causes the next event to run 1021 * irq_work to do the work queue wake up. The worse 1022 * that can happen if we race with !trace_empty() is that 1023 * an event will cause an irq_work to try to wake up 1024 * an empty queue. 1025 * 1026 * There's no reason to protect this flag either, as 1027 * the work queue and irq_work logic will do the necessary 1028 * synchronization for the wake ups. The only thing 1029 * that is necessary is that the wake up happens after 1030 * a task has been queued. It's OK for spurious wake ups. 1031 */ 1032 if (full) 1033 work->full_waiters_pending = true; 1034 else 1035 work->waiters_pending = true; 1036 1037 if (signal_pending(current)) { 1038 ret = -EINTR; 1039 break; 1040 } 1041 1042 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) 1043 break; 1044 1045 if (cpu != RING_BUFFER_ALL_CPUS && 1046 !ring_buffer_empty_cpu(buffer, cpu)) { 1047 unsigned long flags; 1048 bool pagebusy; 1049 size_t nr_pages; 1050 size_t dirty; 1051 1052 if (!full) 1053 break; 1054 1055 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 1056 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; 1057 nr_pages = cpu_buffer->nr_pages; 1058 dirty = ring_buffer_nr_dirty_pages(buffer, cpu); 1059 if (!cpu_buffer->shortest_full || 1060 cpu_buffer->shortest_full > full) 1061 cpu_buffer->shortest_full = full; 1062 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 1063 if (!pagebusy && 1064 (!nr_pages || (dirty * 100) > full * nr_pages)) 1065 break; 1066 } 1067 1068 schedule(); 1069 1070 /* Make sure to see the new wait index */ 1071 smp_rmb(); 1072 if (wait_index != work->wait_index) 1073 break; 1074 } 1075 1076 if (full) 1077 finish_wait(&work->full_waiters, &wait); 1078 else 1079 finish_wait(&work->waiters, &wait); 1080 1081 return ret; 1082 } 1083 1084 /** 1085 * ring_buffer_poll_wait - poll on buffer input 1086 * @buffer: buffer to wait on 1087 * @cpu: the cpu buffer to wait on 1088 * @filp: the file descriptor 1089 * @poll_table: The poll descriptor 1090 * 1091 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 1092 * as data is added to any of the @buffer's cpu buffers. Otherwise 1093 * it will wait for data to be added to a specific cpu buffer. 1094 * 1095 * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers, 1096 * zero otherwise. 1097 */ 1098 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, 1099 struct file *filp, poll_table *poll_table) 1100 { 1101 struct ring_buffer_per_cpu *cpu_buffer; 1102 struct rb_irq_work *work; 1103 1104 if (cpu == RING_BUFFER_ALL_CPUS) 1105 work = &buffer->irq_work; 1106 else { 1107 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1108 return -EINVAL; 1109 1110 cpu_buffer = buffer->buffers[cpu]; 1111 work = &cpu_buffer->irq_work; 1112 } 1113 1114 poll_wait(filp, &work->waiters, poll_table); 1115 work->waiters_pending = true; 1116 /* 1117 * There's a tight race between setting the waiters_pending and 1118 * checking if the ring buffer is empty. Once the waiters_pending bit 1119 * is set, the next event will wake the task up, but we can get stuck 1120 * if there's only a single event in. 1121 * 1122 * FIXME: Ideally, we need a memory barrier on the writer side as well, 1123 * but adding a memory barrier to all events will cause too much of a 1124 * performance hit in the fast path. We only need a memory barrier when 1125 * the buffer goes from empty to having content. But as this race is 1126 * extremely small, and it's not a problem if another event comes in, we 1127 * will fix it later. 1128 */ 1129 smp_mb(); 1130 1131 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || 1132 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) 1133 return EPOLLIN | EPOLLRDNORM; 1134 return 0; 1135 } 1136 1137 /* buffer may be either ring_buffer or ring_buffer_per_cpu */ 1138 #define RB_WARN_ON(b, cond) \ 1139 ({ \ 1140 int _____ret = unlikely(cond); \ 1141 if (_____ret) { \ 1142 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \ 1143 struct ring_buffer_per_cpu *__b = \ 1144 (void *)b; \ 1145 atomic_inc(&__b->buffer->record_disabled); \ 1146 } else \ 1147 atomic_inc(&b->record_disabled); \ 1148 WARN_ON(1); \ 1149 } \ 1150 _____ret; \ 1151 }) 1152 1153 /* Up this if you want to test the TIME_EXTENTS and normalization */ 1154 #define DEBUG_SHIFT 0 1155 1156 static inline u64 rb_time_stamp(struct trace_buffer *buffer) 1157 { 1158 u64 ts; 1159 1160 /* Skip retpolines :-( */ 1161 if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local)) 1162 ts = trace_clock_local(); 1163 else 1164 ts = buffer->clock(); 1165 1166 /* shift to debug/test normalization and TIME_EXTENTS */ 1167 return ts << DEBUG_SHIFT; 1168 } 1169 1170 u64 ring_buffer_time_stamp(struct trace_buffer *buffer) 1171 { 1172 u64 time; 1173 1174 preempt_disable_notrace(); 1175 time = rb_time_stamp(buffer); 1176 preempt_enable_notrace(); 1177 1178 return time; 1179 } 1180 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); 1181 1182 void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer, 1183 int cpu, u64 *ts) 1184 { 1185 /* Just stupid testing the normalize function and deltas */ 1186 *ts >>= DEBUG_SHIFT; 1187 } 1188 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); 1189 1190 /* 1191 * Making the ring buffer lockless makes things tricky. 1192 * Although writes only happen on the CPU that they are on, 1193 * and they only need to worry about interrupts. Reads can 1194 * happen on any CPU. 1195 * 1196 * The reader page is always off the ring buffer, but when the 1197 * reader finishes with a page, it needs to swap its page with 1198 * a new one from the buffer. The reader needs to take from 1199 * the head (writes go to the tail). But if a writer is in overwrite 1200 * mode and wraps, it must push the head page forward. 1201 * 1202 * Here lies the problem. 1203 * 1204 * The reader must be careful to replace only the head page, and 1205 * not another one. As described at the top of the file in the 1206 * ASCII art, the reader sets its old page to point to the next 1207 * page after head. It then sets the page after head to point to 1208 * the old reader page. But if the writer moves the head page 1209 * during this operation, the reader could end up with the tail. 1210 * 1211 * We use cmpxchg to help prevent this race. We also do something 1212 * special with the page before head. We set the LSB to 1. 1213 * 1214 * When the writer must push the page forward, it will clear the 1215 * bit that points to the head page, move the head, and then set 1216 * the bit that points to the new head page. 1217 * 1218 * We also don't want an interrupt coming in and moving the head 1219 * page on another writer. Thus we use the second LSB to catch 1220 * that too. Thus: 1221 * 1222 * head->list->prev->next bit 1 bit 0 1223 * ------- ------- 1224 * Normal page 0 0 1225 * Points to head page 0 1 1226 * New head page 1 0 1227 * 1228 * Note we can not trust the prev pointer of the head page, because: 1229 * 1230 * +----+ +-----+ +-----+ 1231 * | |------>| T |---X--->| N | 1232 * | |<------| | | | 1233 * +----+ +-----+ +-----+ 1234 * ^ ^ | 1235 * | +-----+ | | 1236 * +----------| R |----------+ | 1237 * | |<-----------+ 1238 * +-----+ 1239 * 1240 * Key: ---X--> HEAD flag set in pointer 1241 * T Tail page 1242 * R Reader page 1243 * N Next page 1244 * 1245 * (see __rb_reserve_next() to see where this happens) 1246 * 1247 * What the above shows is that the reader just swapped out 1248 * the reader page with a page in the buffer, but before it 1249 * could make the new header point back to the new page added 1250 * it was preempted by a writer. The writer moved forward onto 1251 * the new page added by the reader and is about to move forward 1252 * again. 1253 * 1254 * You can see, it is legitimate for the previous pointer of 1255 * the head (or any page) not to point back to itself. But only 1256 * temporarily. 1257 */ 1258 1259 #define RB_PAGE_NORMAL 0UL 1260 #define RB_PAGE_HEAD 1UL 1261 #define RB_PAGE_UPDATE 2UL 1262 1263 1264 #define RB_FLAG_MASK 3UL 1265 1266 /* PAGE_MOVED is not part of the mask */ 1267 #define RB_PAGE_MOVED 4UL 1268 1269 /* 1270 * rb_list_head - remove any bit 1271 */ 1272 static struct list_head *rb_list_head(struct list_head *list) 1273 { 1274 unsigned long val = (unsigned long)list; 1275 1276 return (struct list_head *)(val & ~RB_FLAG_MASK); 1277 } 1278 1279 /* 1280 * rb_is_head_page - test if the given page is the head page 1281 * 1282 * Because the reader may move the head_page pointer, we can 1283 * not trust what the head page is (it may be pointing to 1284 * the reader page). But if the next page is a header page, 1285 * its flags will be non zero. 1286 */ 1287 static inline int 1288 rb_is_head_page(struct buffer_page *page, struct list_head *list) 1289 { 1290 unsigned long val; 1291 1292 val = (unsigned long)list->next; 1293 1294 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) 1295 return RB_PAGE_MOVED; 1296 1297 return val & RB_FLAG_MASK; 1298 } 1299 1300 /* 1301 * rb_is_reader_page 1302 * 1303 * The unique thing about the reader page, is that, if the 1304 * writer is ever on it, the previous pointer never points 1305 * back to the reader page. 1306 */ 1307 static bool rb_is_reader_page(struct buffer_page *page) 1308 { 1309 struct list_head *list = page->list.prev; 1310 1311 return rb_list_head(list->next) != &page->list; 1312 } 1313 1314 /* 1315 * rb_set_list_to_head - set a list_head to be pointing to head. 1316 */ 1317 static void rb_set_list_to_head(struct list_head *list) 1318 { 1319 unsigned long *ptr; 1320 1321 ptr = (unsigned long *)&list->next; 1322 *ptr |= RB_PAGE_HEAD; 1323 *ptr &= ~RB_PAGE_UPDATE; 1324 } 1325 1326 /* 1327 * rb_head_page_activate - sets up head page 1328 */ 1329 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) 1330 { 1331 struct buffer_page *head; 1332 1333 head = cpu_buffer->head_page; 1334 if (!head) 1335 return; 1336 1337 /* 1338 * Set the previous list pointer to have the HEAD flag. 1339 */ 1340 rb_set_list_to_head(head->list.prev); 1341 } 1342 1343 static void rb_list_head_clear(struct list_head *list) 1344 { 1345 unsigned long *ptr = (unsigned long *)&list->next; 1346 1347 *ptr &= ~RB_FLAG_MASK; 1348 } 1349 1350 /* 1351 * rb_head_page_deactivate - clears head page ptr (for free list) 1352 */ 1353 static void 1354 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) 1355 { 1356 struct list_head *hd; 1357 1358 /* Go through the whole list and clear any pointers found. */ 1359 rb_list_head_clear(cpu_buffer->pages); 1360 1361 list_for_each(hd, cpu_buffer->pages) 1362 rb_list_head_clear(hd); 1363 } 1364 1365 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, 1366 struct buffer_page *head, 1367 struct buffer_page *prev, 1368 int old_flag, int new_flag) 1369 { 1370 struct list_head *list; 1371 unsigned long val = (unsigned long)&head->list; 1372 unsigned long ret; 1373 1374 list = &prev->list; 1375 1376 val &= ~RB_FLAG_MASK; 1377 1378 ret = cmpxchg((unsigned long *)&list->next, 1379 val | old_flag, val | new_flag); 1380 1381 /* check if the reader took the page */ 1382 if ((ret & ~RB_FLAG_MASK) != val) 1383 return RB_PAGE_MOVED; 1384 1385 return ret & RB_FLAG_MASK; 1386 } 1387 1388 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, 1389 struct buffer_page *head, 1390 struct buffer_page *prev, 1391 int old_flag) 1392 { 1393 return rb_head_page_set(cpu_buffer, head, prev, 1394 old_flag, RB_PAGE_UPDATE); 1395 } 1396 1397 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, 1398 struct buffer_page *head, 1399 struct buffer_page *prev, 1400 int old_flag) 1401 { 1402 return rb_head_page_set(cpu_buffer, head, prev, 1403 old_flag, RB_PAGE_HEAD); 1404 } 1405 1406 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, 1407 struct buffer_page *head, 1408 struct buffer_page *prev, 1409 int old_flag) 1410 { 1411 return rb_head_page_set(cpu_buffer, head, prev, 1412 old_flag, RB_PAGE_NORMAL); 1413 } 1414 1415 static inline void rb_inc_page(struct buffer_page **bpage) 1416 { 1417 struct list_head *p = rb_list_head((*bpage)->list.next); 1418 1419 *bpage = list_entry(p, struct buffer_page, list); 1420 } 1421 1422 static struct buffer_page * 1423 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) 1424 { 1425 struct buffer_page *head; 1426 struct buffer_page *page; 1427 struct list_head *list; 1428 int i; 1429 1430 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) 1431 return NULL; 1432 1433 /* sanity check */ 1434 list = cpu_buffer->pages; 1435 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) 1436 return NULL; 1437 1438 page = head = cpu_buffer->head_page; 1439 /* 1440 * It is possible that the writer moves the header behind 1441 * where we started, and we miss in one loop. 1442 * A second loop should grab the header, but we'll do 1443 * three loops just because I'm paranoid. 1444 */ 1445 for (i = 0; i < 3; i++) { 1446 do { 1447 if (rb_is_head_page(page, page->list.prev)) { 1448 cpu_buffer->head_page = page; 1449 return page; 1450 } 1451 rb_inc_page(&page); 1452 } while (page != head); 1453 } 1454 1455 RB_WARN_ON(cpu_buffer, 1); 1456 1457 return NULL; 1458 } 1459 1460 static int rb_head_page_replace(struct buffer_page *old, 1461 struct buffer_page *new) 1462 { 1463 unsigned long *ptr = (unsigned long *)&old->list.prev->next; 1464 unsigned long val; 1465 unsigned long ret; 1466 1467 val = *ptr & ~RB_FLAG_MASK; 1468 val |= RB_PAGE_HEAD; 1469 1470 ret = cmpxchg(ptr, val, (unsigned long)&new->list); 1471 1472 return ret == val; 1473 } 1474 1475 /* 1476 * rb_tail_page_update - move the tail page forward 1477 */ 1478 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, 1479 struct buffer_page *tail_page, 1480 struct buffer_page *next_page) 1481 { 1482 unsigned long old_entries; 1483 unsigned long old_write; 1484 1485 /* 1486 * The tail page now needs to be moved forward. 1487 * 1488 * We need to reset the tail page, but without messing 1489 * with possible erasing of data brought in by interrupts 1490 * that have moved the tail page and are currently on it. 1491 * 1492 * We add a counter to the write field to denote this. 1493 */ 1494 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); 1495 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); 1496 1497 local_inc(&cpu_buffer->pages_touched); 1498 /* 1499 * Just make sure we have seen our old_write and synchronize 1500 * with any interrupts that come in. 1501 */ 1502 barrier(); 1503 1504 /* 1505 * If the tail page is still the same as what we think 1506 * it is, then it is up to us to update the tail 1507 * pointer. 1508 */ 1509 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { 1510 /* Zero the write counter */ 1511 unsigned long val = old_write & ~RB_WRITE_MASK; 1512 unsigned long eval = old_entries & ~RB_WRITE_MASK; 1513 1514 /* 1515 * This will only succeed if an interrupt did 1516 * not come in and change it. In which case, we 1517 * do not want to modify it. 1518 * 1519 * We add (void) to let the compiler know that we do not care 1520 * about the return value of these functions. We use the 1521 * cmpxchg to only update if an interrupt did not already 1522 * do it for us. If the cmpxchg fails, we don't care. 1523 */ 1524 (void)local_cmpxchg(&next_page->write, old_write, val); 1525 (void)local_cmpxchg(&next_page->entries, old_entries, eval); 1526 1527 /* 1528 * No need to worry about races with clearing out the commit. 1529 * it only can increment when a commit takes place. But that 1530 * only happens in the outer most nested commit. 1531 */ 1532 local_set(&next_page->page->commit, 0); 1533 1534 /* Again, either we update tail_page or an interrupt does */ 1535 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page); 1536 } 1537 } 1538 1539 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, 1540 struct buffer_page *bpage) 1541 { 1542 unsigned long val = (unsigned long)bpage; 1543 1544 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK)) 1545 return 1; 1546 1547 return 0; 1548 } 1549 1550 /** 1551 * rb_check_list - make sure a pointer to a list has the last bits zero 1552 */ 1553 static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer, 1554 struct list_head *list) 1555 { 1556 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev)) 1557 return 1; 1558 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next)) 1559 return 1; 1560 return 0; 1561 } 1562 1563 /** 1564 * rb_check_pages - integrity check of buffer pages 1565 * @cpu_buffer: CPU buffer with pages to test 1566 * 1567 * As a safety measure we check to make sure the data pages have not 1568 * been corrupted. 1569 */ 1570 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) 1571 { 1572 struct list_head *head = cpu_buffer->pages; 1573 struct buffer_page *bpage, *tmp; 1574 1575 /* Reset the head page if it exists */ 1576 if (cpu_buffer->head_page) 1577 rb_set_head_page(cpu_buffer); 1578 1579 rb_head_page_deactivate(cpu_buffer); 1580 1581 if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) 1582 return -1; 1583 if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) 1584 return -1; 1585 1586 if (rb_check_list(cpu_buffer, head)) 1587 return -1; 1588 1589 list_for_each_entry_safe(bpage, tmp, head, list) { 1590 if (RB_WARN_ON(cpu_buffer, 1591 bpage->list.next->prev != &bpage->list)) 1592 return -1; 1593 if (RB_WARN_ON(cpu_buffer, 1594 bpage->list.prev->next != &bpage->list)) 1595 return -1; 1596 if (rb_check_list(cpu_buffer, &bpage->list)) 1597 return -1; 1598 } 1599 1600 rb_head_page_activate(cpu_buffer); 1601 1602 return 0; 1603 } 1604 1605 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 1606 long nr_pages, struct list_head *pages) 1607 { 1608 struct buffer_page *bpage, *tmp; 1609 bool user_thread = current->mm != NULL; 1610 gfp_t mflags; 1611 long i; 1612 1613 /* 1614 * Check if the available memory is there first. 1615 * Note, si_mem_available() only gives us a rough estimate of available 1616 * memory. It may not be accurate. But we don't care, we just want 1617 * to prevent doing any allocation when it is obvious that it is 1618 * not going to succeed. 1619 */ 1620 i = si_mem_available(); 1621 if (i < nr_pages) 1622 return -ENOMEM; 1623 1624 /* 1625 * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails 1626 * gracefully without invoking oom-killer and the system is not 1627 * destabilized. 1628 */ 1629 mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL; 1630 1631 /* 1632 * If a user thread allocates too much, and si_mem_available() 1633 * reports there's enough memory, even though there is not. 1634 * Make sure the OOM killer kills this thread. This can happen 1635 * even with RETRY_MAYFAIL because another task may be doing 1636 * an allocation after this task has taken all memory. 1637 * This is the task the OOM killer needs to take out during this 1638 * loop, even if it was triggered by an allocation somewhere else. 1639 */ 1640 if (user_thread) 1641 set_current_oom_origin(); 1642 for (i = 0; i < nr_pages; i++) { 1643 struct page *page; 1644 1645 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1646 mflags, cpu_to_node(cpu_buffer->cpu)); 1647 if (!bpage) 1648 goto free_pages; 1649 1650 rb_check_bpage(cpu_buffer, bpage); 1651 1652 list_add(&bpage->list, pages); 1653 1654 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, 0); 1655 if (!page) 1656 goto free_pages; 1657 bpage->page = page_address(page); 1658 rb_init_page(bpage->page); 1659 1660 if (user_thread && fatal_signal_pending(current)) 1661 goto free_pages; 1662 } 1663 if (user_thread) 1664 clear_current_oom_origin(); 1665 1666 return 0; 1667 1668 free_pages: 1669 list_for_each_entry_safe(bpage, tmp, pages, list) { 1670 list_del_init(&bpage->list); 1671 free_buffer_page(bpage); 1672 } 1673 if (user_thread) 1674 clear_current_oom_origin(); 1675 1676 return -ENOMEM; 1677 } 1678 1679 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 1680 unsigned long nr_pages) 1681 { 1682 LIST_HEAD(pages); 1683 1684 WARN_ON(!nr_pages); 1685 1686 if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages)) 1687 return -ENOMEM; 1688 1689 /* 1690 * The ring buffer page list is a circular list that does not 1691 * start and end with a list head. All page list items point to 1692 * other pages. 1693 */ 1694 cpu_buffer->pages = pages.next; 1695 list_del(&pages); 1696 1697 cpu_buffer->nr_pages = nr_pages; 1698 1699 rb_check_pages(cpu_buffer); 1700 1701 return 0; 1702 } 1703 1704 static struct ring_buffer_per_cpu * 1705 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu) 1706 { 1707 struct ring_buffer_per_cpu *cpu_buffer; 1708 struct buffer_page *bpage; 1709 struct page *page; 1710 int ret; 1711 1712 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), 1713 GFP_KERNEL, cpu_to_node(cpu)); 1714 if (!cpu_buffer) 1715 return NULL; 1716 1717 cpu_buffer->cpu = cpu; 1718 cpu_buffer->buffer = buffer; 1719 raw_spin_lock_init(&cpu_buffer->reader_lock); 1720 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1721 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 1722 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); 1723 init_completion(&cpu_buffer->update_done); 1724 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); 1725 init_waitqueue_head(&cpu_buffer->irq_work.waiters); 1726 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); 1727 1728 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1729 GFP_KERNEL, cpu_to_node(cpu)); 1730 if (!bpage) 1731 goto fail_free_buffer; 1732 1733 rb_check_bpage(cpu_buffer, bpage); 1734 1735 cpu_buffer->reader_page = bpage; 1736 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); 1737 if (!page) 1738 goto fail_free_reader; 1739 bpage->page = page_address(page); 1740 rb_init_page(bpage->page); 1741 1742 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 1743 INIT_LIST_HEAD(&cpu_buffer->new_pages); 1744 1745 ret = rb_allocate_pages(cpu_buffer, nr_pages); 1746 if (ret < 0) 1747 goto fail_free_reader; 1748 1749 cpu_buffer->head_page 1750 = list_entry(cpu_buffer->pages, struct buffer_page, list); 1751 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; 1752 1753 rb_head_page_activate(cpu_buffer); 1754 1755 return cpu_buffer; 1756 1757 fail_free_reader: 1758 free_buffer_page(cpu_buffer->reader_page); 1759 1760 fail_free_buffer: 1761 kfree(cpu_buffer); 1762 return NULL; 1763 } 1764 1765 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 1766 { 1767 struct list_head *head = cpu_buffer->pages; 1768 struct buffer_page *bpage, *tmp; 1769 1770 free_buffer_page(cpu_buffer->reader_page); 1771 1772 rb_head_page_deactivate(cpu_buffer); 1773 1774 if (head) { 1775 list_for_each_entry_safe(bpage, tmp, head, list) { 1776 list_del_init(&bpage->list); 1777 free_buffer_page(bpage); 1778 } 1779 bpage = list_entry(head, struct buffer_page, list); 1780 free_buffer_page(bpage); 1781 } 1782 1783 kfree(cpu_buffer); 1784 } 1785 1786 /** 1787 * __ring_buffer_alloc - allocate a new ring_buffer 1788 * @size: the size in bytes per cpu that is needed. 1789 * @flags: attributes to set for the ring buffer. 1790 * @key: ring buffer reader_lock_key. 1791 * 1792 * Currently the only flag that is available is the RB_FL_OVERWRITE 1793 * flag. This flag means that the buffer will overwrite old data 1794 * when the buffer wraps. If this flag is not set, the buffer will 1795 * drop data when the tail hits the head. 1796 */ 1797 struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, 1798 struct lock_class_key *key) 1799 { 1800 struct trace_buffer *buffer; 1801 long nr_pages; 1802 int bsize; 1803 int cpu; 1804 int ret; 1805 1806 /* keep it in its own cache line */ 1807 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), 1808 GFP_KERNEL); 1809 if (!buffer) 1810 return NULL; 1811 1812 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) 1813 goto fail_free_buffer; 1814 1815 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 1816 buffer->flags = flags; 1817 buffer->clock = trace_clock_local; 1818 buffer->reader_lock_key = key; 1819 1820 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); 1821 init_waitqueue_head(&buffer->irq_work.waiters); 1822 1823 /* need at least two pages */ 1824 if (nr_pages < 2) 1825 nr_pages = 2; 1826 1827 buffer->cpus = nr_cpu_ids; 1828 1829 bsize = sizeof(void *) * nr_cpu_ids; 1830 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), 1831 GFP_KERNEL); 1832 if (!buffer->buffers) 1833 goto fail_free_cpumask; 1834 1835 cpu = raw_smp_processor_id(); 1836 cpumask_set_cpu(cpu, buffer->cpumask); 1837 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 1838 if (!buffer->buffers[cpu]) 1839 goto fail_free_buffers; 1840 1841 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); 1842 if (ret < 0) 1843 goto fail_free_buffers; 1844 1845 mutex_init(&buffer->mutex); 1846 1847 return buffer; 1848 1849 fail_free_buffers: 1850 for_each_buffer_cpu(buffer, cpu) { 1851 if (buffer->buffers[cpu]) 1852 rb_free_cpu_buffer(buffer->buffers[cpu]); 1853 } 1854 kfree(buffer->buffers); 1855 1856 fail_free_cpumask: 1857 free_cpumask_var(buffer->cpumask); 1858 1859 fail_free_buffer: 1860 kfree(buffer); 1861 return NULL; 1862 } 1863 EXPORT_SYMBOL_GPL(__ring_buffer_alloc); 1864 1865 /** 1866 * ring_buffer_free - free a ring buffer. 1867 * @buffer: the buffer to free. 1868 */ 1869 void 1870 ring_buffer_free(struct trace_buffer *buffer) 1871 { 1872 int cpu; 1873 1874 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); 1875 1876 for_each_buffer_cpu(buffer, cpu) 1877 rb_free_cpu_buffer(buffer->buffers[cpu]); 1878 1879 kfree(buffer->buffers); 1880 free_cpumask_var(buffer->cpumask); 1881 1882 kfree(buffer); 1883 } 1884 EXPORT_SYMBOL_GPL(ring_buffer_free); 1885 1886 void ring_buffer_set_clock(struct trace_buffer *buffer, 1887 u64 (*clock)(void)) 1888 { 1889 buffer->clock = clock; 1890 } 1891 1892 void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs) 1893 { 1894 buffer->time_stamp_abs = abs; 1895 } 1896 1897 bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer) 1898 { 1899 return buffer->time_stamp_abs; 1900 } 1901 1902 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); 1903 1904 static inline unsigned long rb_page_entries(struct buffer_page *bpage) 1905 { 1906 return local_read(&bpage->entries) & RB_WRITE_MASK; 1907 } 1908 1909 static inline unsigned long rb_page_write(struct buffer_page *bpage) 1910 { 1911 return local_read(&bpage->write) & RB_WRITE_MASK; 1912 } 1913 1914 static int 1915 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) 1916 { 1917 struct list_head *tail_page, *to_remove, *next_page; 1918 struct buffer_page *to_remove_page, *tmp_iter_page; 1919 struct buffer_page *last_page, *first_page; 1920 unsigned long nr_removed; 1921 unsigned long head_bit; 1922 int page_entries; 1923 1924 head_bit = 0; 1925 1926 raw_spin_lock_irq(&cpu_buffer->reader_lock); 1927 atomic_inc(&cpu_buffer->record_disabled); 1928 /* 1929 * We don't race with the readers since we have acquired the reader 1930 * lock. We also don't race with writers after disabling recording. 1931 * This makes it easy to figure out the first and the last page to be 1932 * removed from the list. We unlink all the pages in between including 1933 * the first and last pages. This is done in a busy loop so that we 1934 * lose the least number of traces. 1935 * The pages are freed after we restart recording and unlock readers. 1936 */ 1937 tail_page = &cpu_buffer->tail_page->list; 1938 1939 /* 1940 * tail page might be on reader page, we remove the next page 1941 * from the ring buffer 1942 */ 1943 if (cpu_buffer->tail_page == cpu_buffer->reader_page) 1944 tail_page = rb_list_head(tail_page->next); 1945 to_remove = tail_page; 1946 1947 /* start of pages to remove */ 1948 first_page = list_entry(rb_list_head(to_remove->next), 1949 struct buffer_page, list); 1950 1951 for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) { 1952 to_remove = rb_list_head(to_remove)->next; 1953 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD; 1954 } 1955 1956 next_page = rb_list_head(to_remove)->next; 1957 1958 /* 1959 * Now we remove all pages between tail_page and next_page. 1960 * Make sure that we have head_bit value preserved for the 1961 * next page 1962 */ 1963 tail_page->next = (struct list_head *)((unsigned long)next_page | 1964 head_bit); 1965 next_page = rb_list_head(next_page); 1966 next_page->prev = tail_page; 1967 1968 /* make sure pages points to a valid page in the ring buffer */ 1969 cpu_buffer->pages = next_page; 1970 1971 /* update head page */ 1972 if (head_bit) 1973 cpu_buffer->head_page = list_entry(next_page, 1974 struct buffer_page, list); 1975 1976 /* 1977 * change read pointer to make sure any read iterators reset 1978 * themselves 1979 */ 1980 cpu_buffer->read = 0; 1981 1982 /* pages are removed, resume tracing and then free the pages */ 1983 atomic_dec(&cpu_buffer->record_disabled); 1984 raw_spin_unlock_irq(&cpu_buffer->reader_lock); 1985 1986 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); 1987 1988 /* last buffer page to remove */ 1989 last_page = list_entry(rb_list_head(to_remove), struct buffer_page, 1990 list); 1991 tmp_iter_page = first_page; 1992 1993 do { 1994 cond_resched(); 1995 1996 to_remove_page = tmp_iter_page; 1997 rb_inc_page(&tmp_iter_page); 1998 1999 /* update the counters */ 2000 page_entries = rb_page_entries(to_remove_page); 2001 if (page_entries) { 2002 /* 2003 * If something was added to this page, it was full 2004 * since it is not the tail page. So we deduct the 2005 * bytes consumed in ring buffer from here. 2006 * Increment overrun to account for the lost events. 2007 */ 2008 local_add(page_entries, &cpu_buffer->overrun); 2009 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); 2010 } 2011 2012 /* 2013 * We have already removed references to this list item, just 2014 * free up the buffer_page and its page 2015 */ 2016 free_buffer_page(to_remove_page); 2017 nr_removed--; 2018 2019 } while (to_remove_page != last_page); 2020 2021 RB_WARN_ON(cpu_buffer, nr_removed); 2022 2023 return nr_removed == 0; 2024 } 2025 2026 static int 2027 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) 2028 { 2029 struct list_head *pages = &cpu_buffer->new_pages; 2030 int retries, success; 2031 2032 raw_spin_lock_irq(&cpu_buffer->reader_lock); 2033 /* 2034 * We are holding the reader lock, so the reader page won't be swapped 2035 * in the ring buffer. Now we are racing with the writer trying to 2036 * move head page and the tail page. 2037 * We are going to adapt the reader page update process where: 2038 * 1. We first splice the start and end of list of new pages between 2039 * the head page and its previous page. 2040 * 2. We cmpxchg the prev_page->next to point from head page to the 2041 * start of new pages list. 2042 * 3. Finally, we update the head->prev to the end of new list. 2043 * 2044 * We will try this process 10 times, to make sure that we don't keep 2045 * spinning. 2046 */ 2047 retries = 10; 2048 success = 0; 2049 while (retries--) { 2050 struct list_head *head_page, *prev_page, *r; 2051 struct list_head *last_page, *first_page; 2052 struct list_head *head_page_with_bit; 2053 2054 head_page = &rb_set_head_page(cpu_buffer)->list; 2055 if (!head_page) 2056 break; 2057 prev_page = head_page->prev; 2058 2059 first_page = pages->next; 2060 last_page = pages->prev; 2061 2062 head_page_with_bit = (struct list_head *) 2063 ((unsigned long)head_page | RB_PAGE_HEAD); 2064 2065 last_page->next = head_page_with_bit; 2066 first_page->prev = prev_page; 2067 2068 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page); 2069 2070 if (r == head_page_with_bit) { 2071 /* 2072 * yay, we replaced the page pointer to our new list, 2073 * now, we just have to update to head page's prev 2074 * pointer to point to end of list 2075 */ 2076 head_page->prev = last_page; 2077 success = 1; 2078 break; 2079 } 2080 } 2081 2082 if (success) 2083 INIT_LIST_HEAD(pages); 2084 /* 2085 * If we weren't successful in adding in new pages, warn and stop 2086 * tracing 2087 */ 2088 RB_WARN_ON(cpu_buffer, !success); 2089 raw_spin_unlock_irq(&cpu_buffer->reader_lock); 2090 2091 /* free pages if they weren't inserted */ 2092 if (!success) { 2093 struct buffer_page *bpage, *tmp; 2094 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 2095 list) { 2096 list_del_init(&bpage->list); 2097 free_buffer_page(bpage); 2098 } 2099 } 2100 return success; 2101 } 2102 2103 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) 2104 { 2105 int success; 2106 2107 if (cpu_buffer->nr_pages_to_update > 0) 2108 success = rb_insert_pages(cpu_buffer); 2109 else 2110 success = rb_remove_pages(cpu_buffer, 2111 -cpu_buffer->nr_pages_to_update); 2112 2113 if (success) 2114 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; 2115 } 2116 2117 static void update_pages_handler(struct work_struct *work) 2118 { 2119 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, 2120 struct ring_buffer_per_cpu, update_pages_work); 2121 rb_update_pages(cpu_buffer); 2122 complete(&cpu_buffer->update_done); 2123 } 2124 2125 /** 2126 * ring_buffer_resize - resize the ring buffer 2127 * @buffer: the buffer to resize. 2128 * @size: the new size. 2129 * @cpu_id: the cpu buffer to resize 2130 * 2131 * Minimum size is 2 * BUF_PAGE_SIZE. 2132 * 2133 * Returns 0 on success and < 0 on failure. 2134 */ 2135 int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, 2136 int cpu_id) 2137 { 2138 struct ring_buffer_per_cpu *cpu_buffer; 2139 unsigned long nr_pages; 2140 int cpu, err; 2141 2142 /* 2143 * Always succeed at resizing a non-existent buffer: 2144 */ 2145 if (!buffer) 2146 return 0; 2147 2148 /* Make sure the requested buffer exists */ 2149 if (cpu_id != RING_BUFFER_ALL_CPUS && 2150 !cpumask_test_cpu(cpu_id, buffer->cpumask)) 2151 return 0; 2152 2153 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 2154 2155 /* we need a minimum of two pages */ 2156 if (nr_pages < 2) 2157 nr_pages = 2; 2158 2159 /* prevent another thread from changing buffer sizes */ 2160 mutex_lock(&buffer->mutex); 2161 2162 2163 if (cpu_id == RING_BUFFER_ALL_CPUS) { 2164 /* 2165 * Don't succeed if resizing is disabled, as a reader might be 2166 * manipulating the ring buffer and is expecting a sane state while 2167 * this is true. 2168 */ 2169 for_each_buffer_cpu(buffer, cpu) { 2170 cpu_buffer = buffer->buffers[cpu]; 2171 if (atomic_read(&cpu_buffer->resize_disabled)) { 2172 err = -EBUSY; 2173 goto out_err_unlock; 2174 } 2175 } 2176 2177 /* calculate the pages to update */ 2178 for_each_buffer_cpu(buffer, cpu) { 2179 cpu_buffer = buffer->buffers[cpu]; 2180 2181 cpu_buffer->nr_pages_to_update = nr_pages - 2182 cpu_buffer->nr_pages; 2183 /* 2184 * nothing more to do for removing pages or no update 2185 */ 2186 if (cpu_buffer->nr_pages_to_update <= 0) 2187 continue; 2188 /* 2189 * to add pages, make sure all new pages can be 2190 * allocated without receiving ENOMEM 2191 */ 2192 INIT_LIST_HEAD(&cpu_buffer->new_pages); 2193 if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, 2194 &cpu_buffer->new_pages)) { 2195 /* not enough memory for new pages */ 2196 err = -ENOMEM; 2197 goto out_err; 2198 } 2199 } 2200 2201 cpus_read_lock(); 2202 /* 2203 * Fire off all the required work handlers 2204 * We can't schedule on offline CPUs, but it's not necessary 2205 * since we can change their buffer sizes without any race. 2206 */ 2207 for_each_buffer_cpu(buffer, cpu) { 2208 cpu_buffer = buffer->buffers[cpu]; 2209 if (!cpu_buffer->nr_pages_to_update) 2210 continue; 2211 2212 /* Can't run something on an offline CPU. */ 2213 if (!cpu_online(cpu)) { 2214 rb_update_pages(cpu_buffer); 2215 cpu_buffer->nr_pages_to_update = 0; 2216 } else { 2217 schedule_work_on(cpu, 2218 &cpu_buffer->update_pages_work); 2219 } 2220 } 2221 2222 /* wait for all the updates to complete */ 2223 for_each_buffer_cpu(buffer, cpu) { 2224 cpu_buffer = buffer->buffers[cpu]; 2225 if (!cpu_buffer->nr_pages_to_update) 2226 continue; 2227 2228 if (cpu_online(cpu)) 2229 wait_for_completion(&cpu_buffer->update_done); 2230 cpu_buffer->nr_pages_to_update = 0; 2231 } 2232 2233 cpus_read_unlock(); 2234 } else { 2235 cpu_buffer = buffer->buffers[cpu_id]; 2236 2237 if (nr_pages == cpu_buffer->nr_pages) 2238 goto out; 2239 2240 /* 2241 * Don't succeed if resizing is disabled, as a reader might be 2242 * manipulating the ring buffer and is expecting a sane state while 2243 * this is true. 2244 */ 2245 if (atomic_read(&cpu_buffer->resize_disabled)) { 2246 err = -EBUSY; 2247 goto out_err_unlock; 2248 } 2249 2250 cpu_buffer->nr_pages_to_update = nr_pages - 2251 cpu_buffer->nr_pages; 2252 2253 INIT_LIST_HEAD(&cpu_buffer->new_pages); 2254 if (cpu_buffer->nr_pages_to_update > 0 && 2255 __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, 2256 &cpu_buffer->new_pages)) { 2257 err = -ENOMEM; 2258 goto out_err; 2259 } 2260 2261 cpus_read_lock(); 2262 2263 /* Can't run something on an offline CPU. */ 2264 if (!cpu_online(cpu_id)) 2265 rb_update_pages(cpu_buffer); 2266 else { 2267 schedule_work_on(cpu_id, 2268 &cpu_buffer->update_pages_work); 2269 wait_for_completion(&cpu_buffer->update_done); 2270 } 2271 2272 cpu_buffer->nr_pages_to_update = 0; 2273 cpus_read_unlock(); 2274 } 2275 2276 out: 2277 /* 2278 * The ring buffer resize can happen with the ring buffer 2279 * enabled, so that the update disturbs the tracing as little 2280 * as possible. But if the buffer is disabled, we do not need 2281 * to worry about that, and we can take the time to verify 2282 * that the buffer is not corrupt. 2283 */ 2284 if (atomic_read(&buffer->record_disabled)) { 2285 atomic_inc(&buffer->record_disabled); 2286 /* 2287 * Even though the buffer was disabled, we must make sure 2288 * that it is truly disabled before calling rb_check_pages. 2289 * There could have been a race between checking 2290 * record_disable and incrementing it. 2291 */ 2292 synchronize_rcu(); 2293 for_each_buffer_cpu(buffer, cpu) { 2294 cpu_buffer = buffer->buffers[cpu]; 2295 rb_check_pages(cpu_buffer); 2296 } 2297 atomic_dec(&buffer->record_disabled); 2298 } 2299 2300 mutex_unlock(&buffer->mutex); 2301 return 0; 2302 2303 out_err: 2304 for_each_buffer_cpu(buffer, cpu) { 2305 struct buffer_page *bpage, *tmp; 2306 2307 cpu_buffer = buffer->buffers[cpu]; 2308 cpu_buffer->nr_pages_to_update = 0; 2309 2310 if (list_empty(&cpu_buffer->new_pages)) 2311 continue; 2312 2313 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 2314 list) { 2315 list_del_init(&bpage->list); 2316 free_buffer_page(bpage); 2317 } 2318 } 2319 out_err_unlock: 2320 mutex_unlock(&buffer->mutex); 2321 return err; 2322 } 2323 EXPORT_SYMBOL_GPL(ring_buffer_resize); 2324 2325 void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val) 2326 { 2327 mutex_lock(&buffer->mutex); 2328 if (val) 2329 buffer->flags |= RB_FL_OVERWRITE; 2330 else 2331 buffer->flags &= ~RB_FL_OVERWRITE; 2332 mutex_unlock(&buffer->mutex); 2333 } 2334 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite); 2335 2336 static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) 2337 { 2338 return bpage->page->data + index; 2339 } 2340 2341 static __always_inline struct ring_buffer_event * 2342 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) 2343 { 2344 return __rb_page_index(cpu_buffer->reader_page, 2345 cpu_buffer->reader_page->read); 2346 } 2347 2348 static __always_inline unsigned rb_page_commit(struct buffer_page *bpage) 2349 { 2350 return local_read(&bpage->page->commit); 2351 } 2352 2353 static struct ring_buffer_event * 2354 rb_iter_head_event(struct ring_buffer_iter *iter) 2355 { 2356 struct ring_buffer_event *event; 2357 struct buffer_page *iter_head_page = iter->head_page; 2358 unsigned long commit; 2359 unsigned length; 2360 2361 if (iter->head != iter->next_event) 2362 return iter->event; 2363 2364 /* 2365 * When the writer goes across pages, it issues a cmpxchg which 2366 * is a mb(), which will synchronize with the rmb here. 2367 * (see rb_tail_page_update() and __rb_reserve_next()) 2368 */ 2369 commit = rb_page_commit(iter_head_page); 2370 smp_rmb(); 2371 event = __rb_page_index(iter_head_page, iter->head); 2372 length = rb_event_length(event); 2373 2374 /* 2375 * READ_ONCE() doesn't work on functions and we don't want the 2376 * compiler doing any crazy optimizations with length. 2377 */ 2378 barrier(); 2379 2380 if ((iter->head + length) > commit || length > BUF_MAX_DATA_SIZE) 2381 /* Writer corrupted the read? */ 2382 goto reset; 2383 2384 memcpy(iter->event, event, length); 2385 /* 2386 * If the page stamp is still the same after this rmb() then the 2387 * event was safely copied without the writer entering the page. 2388 */ 2389 smp_rmb(); 2390 2391 /* Make sure the page didn't change since we read this */ 2392 if (iter->page_stamp != iter_head_page->page->time_stamp || 2393 commit > rb_page_commit(iter_head_page)) 2394 goto reset; 2395 2396 iter->next_event = iter->head + length; 2397 return iter->event; 2398 reset: 2399 /* Reset to the beginning */ 2400 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; 2401 iter->head = 0; 2402 iter->next_event = 0; 2403 iter->missed_events = 1; 2404 return NULL; 2405 } 2406 2407 /* Size is determined by what has been committed */ 2408 static __always_inline unsigned rb_page_size(struct buffer_page *bpage) 2409 { 2410 return rb_page_commit(bpage); 2411 } 2412 2413 static __always_inline unsigned 2414 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) 2415 { 2416 return rb_page_commit(cpu_buffer->commit_page); 2417 } 2418 2419 static __always_inline unsigned 2420 rb_event_index(struct ring_buffer_event *event) 2421 { 2422 unsigned long addr = (unsigned long)event; 2423 2424 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; 2425 } 2426 2427 static void rb_inc_iter(struct ring_buffer_iter *iter) 2428 { 2429 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 2430 2431 /* 2432 * The iterator could be on the reader page (it starts there). 2433 * But the head could have moved, since the reader was 2434 * found. Check for this case and assign the iterator 2435 * to the head page instead of next. 2436 */ 2437 if (iter->head_page == cpu_buffer->reader_page) 2438 iter->head_page = rb_set_head_page(cpu_buffer); 2439 else 2440 rb_inc_page(&iter->head_page); 2441 2442 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; 2443 iter->head = 0; 2444 iter->next_event = 0; 2445 } 2446 2447 /* 2448 * rb_handle_head_page - writer hit the head page 2449 * 2450 * Returns: +1 to retry page 2451 * 0 to continue 2452 * -1 on error 2453 */ 2454 static int 2455 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, 2456 struct buffer_page *tail_page, 2457 struct buffer_page *next_page) 2458 { 2459 struct buffer_page *new_head; 2460 int entries; 2461 int type; 2462 int ret; 2463 2464 entries = rb_page_entries(next_page); 2465 2466 /* 2467 * The hard part is here. We need to move the head 2468 * forward, and protect against both readers on 2469 * other CPUs and writers coming in via interrupts. 2470 */ 2471 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, 2472 RB_PAGE_HEAD); 2473 2474 /* 2475 * type can be one of four: 2476 * NORMAL - an interrupt already moved it for us 2477 * HEAD - we are the first to get here. 2478 * UPDATE - we are the interrupt interrupting 2479 * a current move. 2480 * MOVED - a reader on another CPU moved the next 2481 * pointer to its reader page. Give up 2482 * and try again. 2483 */ 2484 2485 switch (type) { 2486 case RB_PAGE_HEAD: 2487 /* 2488 * We changed the head to UPDATE, thus 2489 * it is our responsibility to update 2490 * the counters. 2491 */ 2492 local_add(entries, &cpu_buffer->overrun); 2493 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); 2494 2495 /* 2496 * The entries will be zeroed out when we move the 2497 * tail page. 2498 */ 2499 2500 /* still more to do */ 2501 break; 2502 2503 case RB_PAGE_UPDATE: 2504 /* 2505 * This is an interrupt that interrupt the 2506 * previous update. Still more to do. 2507 */ 2508 break; 2509 case RB_PAGE_NORMAL: 2510 /* 2511 * An interrupt came in before the update 2512 * and processed this for us. 2513 * Nothing left to do. 2514 */ 2515 return 1; 2516 case RB_PAGE_MOVED: 2517 /* 2518 * The reader is on another CPU and just did 2519 * a swap with our next_page. 2520 * Try again. 2521 */ 2522 return 1; 2523 default: 2524 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ 2525 return -1; 2526 } 2527 2528 /* 2529 * Now that we are here, the old head pointer is 2530 * set to UPDATE. This will keep the reader from 2531 * swapping the head page with the reader page. 2532 * The reader (on another CPU) will spin till 2533 * we are finished. 2534 * 2535 * We just need to protect against interrupts 2536 * doing the job. We will set the next pointer 2537 * to HEAD. After that, we set the old pointer 2538 * to NORMAL, but only if it was HEAD before. 2539 * otherwise we are an interrupt, and only 2540 * want the outer most commit to reset it. 2541 */ 2542 new_head = next_page; 2543 rb_inc_page(&new_head); 2544 2545 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, 2546 RB_PAGE_NORMAL); 2547 2548 /* 2549 * Valid returns are: 2550 * HEAD - an interrupt came in and already set it. 2551 * NORMAL - One of two things: 2552 * 1) We really set it. 2553 * 2) A bunch of interrupts came in and moved 2554 * the page forward again. 2555 */ 2556 switch (ret) { 2557 case RB_PAGE_HEAD: 2558 case RB_PAGE_NORMAL: 2559 /* OK */ 2560 break; 2561 default: 2562 RB_WARN_ON(cpu_buffer, 1); 2563 return -1; 2564 } 2565 2566 /* 2567 * It is possible that an interrupt came in, 2568 * set the head up, then more interrupts came in 2569 * and moved it again. When we get back here, 2570 * the page would have been set to NORMAL but we 2571 * just set it back to HEAD. 2572 * 2573 * How do you detect this? Well, if that happened 2574 * the tail page would have moved. 2575 */ 2576 if (ret == RB_PAGE_NORMAL) { 2577 struct buffer_page *buffer_tail_page; 2578 2579 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); 2580 /* 2581 * If the tail had moved passed next, then we need 2582 * to reset the pointer. 2583 */ 2584 if (buffer_tail_page != tail_page && 2585 buffer_tail_page != next_page) 2586 rb_head_page_set_normal(cpu_buffer, new_head, 2587 next_page, 2588 RB_PAGE_HEAD); 2589 } 2590 2591 /* 2592 * If this was the outer most commit (the one that 2593 * changed the original pointer from HEAD to UPDATE), 2594 * then it is up to us to reset it to NORMAL. 2595 */ 2596 if (type == RB_PAGE_HEAD) { 2597 ret = rb_head_page_set_normal(cpu_buffer, next_page, 2598 tail_page, 2599 RB_PAGE_UPDATE); 2600 if (RB_WARN_ON(cpu_buffer, 2601 ret != RB_PAGE_UPDATE)) 2602 return -1; 2603 } 2604 2605 return 0; 2606 } 2607 2608 static inline void 2609 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, 2610 unsigned long tail, struct rb_event_info *info) 2611 { 2612 struct buffer_page *tail_page = info->tail_page; 2613 struct ring_buffer_event *event; 2614 unsigned long length = info->length; 2615 2616 /* 2617 * Only the event that crossed the page boundary 2618 * must fill the old tail_page with padding. 2619 */ 2620 if (tail >= BUF_PAGE_SIZE) { 2621 /* 2622 * If the page was filled, then we still need 2623 * to update the real_end. Reset it to zero 2624 * and the reader will ignore it. 2625 */ 2626 if (tail == BUF_PAGE_SIZE) 2627 tail_page->real_end = 0; 2628 2629 local_sub(length, &tail_page->write); 2630 return; 2631 } 2632 2633 event = __rb_page_index(tail_page, tail); 2634 2635 /* account for padding bytes */ 2636 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); 2637 2638 /* 2639 * Save the original length to the meta data. 2640 * This will be used by the reader to add lost event 2641 * counter. 2642 */ 2643 tail_page->real_end = tail; 2644 2645 /* 2646 * If this event is bigger than the minimum size, then 2647 * we need to be careful that we don't subtract the 2648 * write counter enough to allow another writer to slip 2649 * in on this page. 2650 * We put in a discarded commit instead, to make sure 2651 * that this space is not used again. 2652 * 2653 * If we are less than the minimum size, we don't need to 2654 * worry about it. 2655 */ 2656 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) { 2657 /* No room for any events */ 2658 2659 /* Mark the rest of the page with padding */ 2660 rb_event_set_padding(event); 2661 2662 /* Make sure the padding is visible before the write update */ 2663 smp_wmb(); 2664 2665 /* Set the write back to the previous setting */ 2666 local_sub(length, &tail_page->write); 2667 return; 2668 } 2669 2670 /* Put in a discarded event */ 2671 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; 2672 event->type_len = RINGBUF_TYPE_PADDING; 2673 /* time delta must be non zero */ 2674 event->time_delta = 1; 2675 2676 /* Make sure the padding is visible before the tail_page->write update */ 2677 smp_wmb(); 2678 2679 /* Set write to end of buffer */ 2680 length = (tail + length) - BUF_PAGE_SIZE; 2681 local_sub(length, &tail_page->write); 2682 } 2683 2684 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer); 2685 2686 /* 2687 * This is the slow path, force gcc not to inline it. 2688 */ 2689 static noinline struct ring_buffer_event * 2690 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, 2691 unsigned long tail, struct rb_event_info *info) 2692 { 2693 struct buffer_page *tail_page = info->tail_page; 2694 struct buffer_page *commit_page = cpu_buffer->commit_page; 2695 struct trace_buffer *buffer = cpu_buffer->buffer; 2696 struct buffer_page *next_page; 2697 int ret; 2698 2699 next_page = tail_page; 2700 2701 rb_inc_page(&next_page); 2702 2703 /* 2704 * If for some reason, we had an interrupt storm that made 2705 * it all the way around the buffer, bail, and warn 2706 * about it. 2707 */ 2708 if (unlikely(next_page == commit_page)) { 2709 local_inc(&cpu_buffer->commit_overrun); 2710 goto out_reset; 2711 } 2712 2713 /* 2714 * This is where the fun begins! 2715 * 2716 * We are fighting against races between a reader that 2717 * could be on another CPU trying to swap its reader 2718 * page with the buffer head. 2719 * 2720 * We are also fighting against interrupts coming in and 2721 * moving the head or tail on us as well. 2722 * 2723 * If the next page is the head page then we have filled 2724 * the buffer, unless the commit page is still on the 2725 * reader page. 2726 */ 2727 if (rb_is_head_page(next_page, &tail_page->list)) { 2728 2729 /* 2730 * If the commit is not on the reader page, then 2731 * move the header page. 2732 */ 2733 if (!rb_is_reader_page(cpu_buffer->commit_page)) { 2734 /* 2735 * If we are not in overwrite mode, 2736 * this is easy, just stop here. 2737 */ 2738 if (!(buffer->flags & RB_FL_OVERWRITE)) { 2739 local_inc(&cpu_buffer->dropped_events); 2740 goto out_reset; 2741 } 2742 2743 ret = rb_handle_head_page(cpu_buffer, 2744 tail_page, 2745 next_page); 2746 if (ret < 0) 2747 goto out_reset; 2748 if (ret) 2749 goto out_again; 2750 } else { 2751 /* 2752 * We need to be careful here too. The 2753 * commit page could still be on the reader 2754 * page. We could have a small buffer, and 2755 * have filled up the buffer with events 2756 * from interrupts and such, and wrapped. 2757 * 2758 * Note, if the tail page is also on the 2759 * reader_page, we let it move out. 2760 */ 2761 if (unlikely((cpu_buffer->commit_page != 2762 cpu_buffer->tail_page) && 2763 (cpu_buffer->commit_page == 2764 cpu_buffer->reader_page))) { 2765 local_inc(&cpu_buffer->commit_overrun); 2766 goto out_reset; 2767 } 2768 } 2769 } 2770 2771 rb_tail_page_update(cpu_buffer, tail_page, next_page); 2772 2773 out_again: 2774 2775 rb_reset_tail(cpu_buffer, tail, info); 2776 2777 /* Commit what we have for now. */ 2778 rb_end_commit(cpu_buffer); 2779 /* rb_end_commit() decs committing */ 2780 local_inc(&cpu_buffer->committing); 2781 2782 /* fail and let the caller try again */ 2783 return ERR_PTR(-EAGAIN); 2784 2785 out_reset: 2786 /* reset write */ 2787 rb_reset_tail(cpu_buffer, tail, info); 2788 2789 return NULL; 2790 } 2791 2792 /* Slow path */ 2793 static struct ring_buffer_event * 2794 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs) 2795 { 2796 if (abs) 2797 event->type_len = RINGBUF_TYPE_TIME_STAMP; 2798 else 2799 event->type_len = RINGBUF_TYPE_TIME_EXTEND; 2800 2801 /* Not the first event on the page, or not delta? */ 2802 if (abs || rb_event_index(event)) { 2803 event->time_delta = delta & TS_MASK; 2804 event->array[0] = delta >> TS_SHIFT; 2805 } else { 2806 /* nope, just zero it */ 2807 event->time_delta = 0; 2808 event->array[0] = 0; 2809 } 2810 2811 return skip_time_extend(event); 2812 } 2813 2814 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 2815 static inline bool sched_clock_stable(void) 2816 { 2817 return true; 2818 } 2819 #endif 2820 2821 static void 2822 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer, 2823 struct rb_event_info *info) 2824 { 2825 u64 write_stamp; 2826 2827 WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s", 2828 (unsigned long long)info->delta, 2829 (unsigned long long)info->ts, 2830 (unsigned long long)info->before, 2831 (unsigned long long)info->after, 2832 (unsigned long long)(rb_time_read(&cpu_buffer->write_stamp, &write_stamp) ? write_stamp : 0), 2833 sched_clock_stable() ? "" : 2834 "If you just came from a suspend/resume,\n" 2835 "please switch to the trace global clock:\n" 2836 " echo global > /sys/kernel/debug/tracing/trace_clock\n" 2837 "or add trace_clock=global to the kernel command line\n"); 2838 } 2839 2840 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer, 2841 struct ring_buffer_event **event, 2842 struct rb_event_info *info, 2843 u64 *delta, 2844 unsigned int *length) 2845 { 2846 bool abs = info->add_timestamp & 2847 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE); 2848 2849 if (unlikely(info->delta > (1ULL << 59))) { 2850 /* 2851 * Some timers can use more than 59 bits, and when a timestamp 2852 * is added to the buffer, it will lose those bits. 2853 */ 2854 if (abs && (info->ts & TS_MSB)) { 2855 info->delta &= ABS_TS_MASK; 2856 2857 /* did the clock go backwards */ 2858 } else if (info->before == info->after && info->before > info->ts) { 2859 /* not interrupted */ 2860 static int once; 2861 2862 /* 2863 * This is possible with a recalibrating of the TSC. 2864 * Do not produce a call stack, but just report it. 2865 */ 2866 if (!once) { 2867 once++; 2868 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n", 2869 info->before, info->ts); 2870 } 2871 } else 2872 rb_check_timestamp(cpu_buffer, info); 2873 if (!abs) 2874 info->delta = 0; 2875 } 2876 *event = rb_add_time_stamp(*event, info->delta, abs); 2877 *length -= RB_LEN_TIME_EXTEND; 2878 *delta = 0; 2879 } 2880 2881 /** 2882 * rb_update_event - update event type and data 2883 * @cpu_buffer: The per cpu buffer of the @event 2884 * @event: the event to update 2885 * @info: The info to update the @event with (contains length and delta) 2886 * 2887 * Update the type and data fields of the @event. The length 2888 * is the actual size that is written to the ring buffer, 2889 * and with this, we can determine what to place into the 2890 * data field. 2891 */ 2892 static void 2893 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, 2894 struct ring_buffer_event *event, 2895 struct rb_event_info *info) 2896 { 2897 unsigned length = info->length; 2898 u64 delta = info->delta; 2899 unsigned int nest = local_read(&cpu_buffer->committing) - 1; 2900 2901 if (!WARN_ON_ONCE(nest >= MAX_NEST)) 2902 cpu_buffer->event_stamp[nest] = info->ts; 2903 2904 /* 2905 * If we need to add a timestamp, then we 2906 * add it to the start of the reserved space. 2907 */ 2908 if (unlikely(info->add_timestamp)) 2909 rb_add_timestamp(cpu_buffer, &event, info, &delta, &length); 2910 2911 event->time_delta = delta; 2912 length -= RB_EVNT_HDR_SIZE; 2913 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) { 2914 event->type_len = 0; 2915 event->array[0] = length; 2916 } else 2917 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); 2918 } 2919 2920 static unsigned rb_calculate_event_length(unsigned length) 2921 { 2922 struct ring_buffer_event event; /* Used only for sizeof array */ 2923 2924 /* zero length can cause confusions */ 2925 if (!length) 2926 length++; 2927 2928 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) 2929 length += sizeof(event.array[0]); 2930 2931 length += RB_EVNT_HDR_SIZE; 2932 length = ALIGN(length, RB_ARCH_ALIGNMENT); 2933 2934 /* 2935 * In case the time delta is larger than the 27 bits for it 2936 * in the header, we need to add a timestamp. If another 2937 * event comes in when trying to discard this one to increase 2938 * the length, then the timestamp will be added in the allocated 2939 * space of this event. If length is bigger than the size needed 2940 * for the TIME_EXTEND, then padding has to be used. The events 2941 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal 2942 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding. 2943 * As length is a multiple of 4, we only need to worry if it 2944 * is 12 (RB_LEN_TIME_EXTEND + 4). 2945 */ 2946 if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT) 2947 length += RB_ALIGNMENT; 2948 2949 return length; 2950 } 2951 2952 static u64 rb_time_delta(struct ring_buffer_event *event) 2953 { 2954 switch (event->type_len) { 2955 case RINGBUF_TYPE_PADDING: 2956 return 0; 2957 2958 case RINGBUF_TYPE_TIME_EXTEND: 2959 return rb_event_time_stamp(event); 2960 2961 case RINGBUF_TYPE_TIME_STAMP: 2962 return 0; 2963 2964 case RINGBUF_TYPE_DATA: 2965 return event->time_delta; 2966 default: 2967 return 0; 2968 } 2969 } 2970 2971 static inline int 2972 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, 2973 struct ring_buffer_event *event) 2974 { 2975 unsigned long new_index, old_index; 2976 struct buffer_page *bpage; 2977 unsigned long index; 2978 unsigned long addr; 2979 u64 write_stamp; 2980 u64 delta; 2981 2982 new_index = rb_event_index(event); 2983 old_index = new_index + rb_event_ts_length(event); 2984 addr = (unsigned long)event; 2985 addr &= PAGE_MASK; 2986 2987 bpage = READ_ONCE(cpu_buffer->tail_page); 2988 2989 delta = rb_time_delta(event); 2990 2991 if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp)) 2992 return 0; 2993 2994 /* Make sure the write stamp is read before testing the location */ 2995 barrier(); 2996 2997 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { 2998 unsigned long write_mask = 2999 local_read(&bpage->write) & ~RB_WRITE_MASK; 3000 unsigned long event_length = rb_event_length(event); 3001 3002 /* Something came in, can't discard */ 3003 if (!rb_time_cmpxchg(&cpu_buffer->write_stamp, 3004 write_stamp, write_stamp - delta)) 3005 return 0; 3006 3007 /* 3008 * It's possible that the event time delta is zero 3009 * (has the same time stamp as the previous event) 3010 * in which case write_stamp and before_stamp could 3011 * be the same. In such a case, force before_stamp 3012 * to be different than write_stamp. It doesn't 3013 * matter what it is, as long as its different. 3014 */ 3015 if (!delta) 3016 rb_time_set(&cpu_buffer->before_stamp, 0); 3017 3018 /* 3019 * If an event were to come in now, it would see that the 3020 * write_stamp and the before_stamp are different, and assume 3021 * that this event just added itself before updating 3022 * the write stamp. The interrupting event will fix the 3023 * write stamp for us, and use the before stamp as its delta. 3024 */ 3025 3026 /* 3027 * This is on the tail page. It is possible that 3028 * a write could come in and move the tail page 3029 * and write to the next page. That is fine 3030 * because we just shorten what is on this page. 3031 */ 3032 old_index += write_mask; 3033 new_index += write_mask; 3034 index = local_cmpxchg(&bpage->write, old_index, new_index); 3035 if (index == old_index) { 3036 /* update counters */ 3037 local_sub(event_length, &cpu_buffer->entries_bytes); 3038 return 1; 3039 } 3040 } 3041 3042 /* could not discard */ 3043 return 0; 3044 } 3045 3046 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) 3047 { 3048 local_inc(&cpu_buffer->committing); 3049 local_inc(&cpu_buffer->commits); 3050 } 3051 3052 static __always_inline void 3053 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) 3054 { 3055 unsigned long max_count; 3056 3057 /* 3058 * We only race with interrupts and NMIs on this CPU. 3059 * If we own the commit event, then we can commit 3060 * all others that interrupted us, since the interruptions 3061 * are in stack format (they finish before they come 3062 * back to us). This allows us to do a simple loop to 3063 * assign the commit to the tail. 3064 */ 3065 again: 3066 max_count = cpu_buffer->nr_pages * 100; 3067 3068 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { 3069 if (RB_WARN_ON(cpu_buffer, !(--max_count))) 3070 return; 3071 if (RB_WARN_ON(cpu_buffer, 3072 rb_is_reader_page(cpu_buffer->tail_page))) 3073 return; 3074 local_set(&cpu_buffer->commit_page->page->commit, 3075 rb_page_write(cpu_buffer->commit_page)); 3076 rb_inc_page(&cpu_buffer->commit_page); 3077 /* add barrier to keep gcc from optimizing too much */ 3078 barrier(); 3079 } 3080 while (rb_commit_index(cpu_buffer) != 3081 rb_page_write(cpu_buffer->commit_page)) { 3082 3083 local_set(&cpu_buffer->commit_page->page->commit, 3084 rb_page_write(cpu_buffer->commit_page)); 3085 RB_WARN_ON(cpu_buffer, 3086 local_read(&cpu_buffer->commit_page->page->commit) & 3087 ~RB_WRITE_MASK); 3088 barrier(); 3089 } 3090 3091 /* again, keep gcc from optimizing */ 3092 barrier(); 3093 3094 /* 3095 * If an interrupt came in just after the first while loop 3096 * and pushed the tail page forward, we will be left with 3097 * a dangling commit that will never go forward. 3098 */ 3099 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) 3100 goto again; 3101 } 3102 3103 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) 3104 { 3105 unsigned long commits; 3106 3107 if (RB_WARN_ON(cpu_buffer, 3108 !local_read(&cpu_buffer->committing))) 3109 return; 3110 3111 again: 3112 commits = local_read(&cpu_buffer->commits); 3113 /* synchronize with interrupts */ 3114 barrier(); 3115 if (local_read(&cpu_buffer->committing) == 1) 3116 rb_set_commit_to_write(cpu_buffer); 3117 3118 local_dec(&cpu_buffer->committing); 3119 3120 /* synchronize with interrupts */ 3121 barrier(); 3122 3123 /* 3124 * Need to account for interrupts coming in between the 3125 * updating of the commit page and the clearing of the 3126 * committing counter. 3127 */ 3128 if (unlikely(local_read(&cpu_buffer->commits) != commits) && 3129 !local_read(&cpu_buffer->committing)) { 3130 local_inc(&cpu_buffer->committing); 3131 goto again; 3132 } 3133 } 3134 3135 static inline void rb_event_discard(struct ring_buffer_event *event) 3136 { 3137 if (extended_time(event)) 3138 event = skip_time_extend(event); 3139 3140 /* array[0] holds the actual length for the discarded event */ 3141 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; 3142 event->type_len = RINGBUF_TYPE_PADDING; 3143 /* time delta must be non zero */ 3144 if (!event->time_delta) 3145 event->time_delta = 1; 3146 } 3147 3148 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, 3149 struct ring_buffer_event *event) 3150 { 3151 local_inc(&cpu_buffer->entries); 3152 rb_end_commit(cpu_buffer); 3153 } 3154 3155 static __always_inline void 3156 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) 3157 { 3158 size_t nr_pages; 3159 size_t dirty; 3160 size_t full; 3161 3162 if (buffer->irq_work.waiters_pending) { 3163 buffer->irq_work.waiters_pending = false; 3164 /* irq_work_queue() supplies it's own memory barriers */ 3165 irq_work_queue(&buffer->irq_work.work); 3166 } 3167 3168 if (cpu_buffer->irq_work.waiters_pending) { 3169 cpu_buffer->irq_work.waiters_pending = false; 3170 /* irq_work_queue() supplies it's own memory barriers */ 3171 irq_work_queue(&cpu_buffer->irq_work.work); 3172 } 3173 3174 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched)) 3175 return; 3176 3177 if (cpu_buffer->reader_page == cpu_buffer->commit_page) 3178 return; 3179 3180 if (!cpu_buffer->irq_work.full_waiters_pending) 3181 return; 3182 3183 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); 3184 3185 full = cpu_buffer->shortest_full; 3186 nr_pages = cpu_buffer->nr_pages; 3187 dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu); 3188 if (full && nr_pages && (dirty * 100) <= full * nr_pages) 3189 return; 3190 3191 cpu_buffer->irq_work.wakeup_full = true; 3192 cpu_buffer->irq_work.full_waiters_pending = false; 3193 /* irq_work_queue() supplies it's own memory barriers */ 3194 irq_work_queue(&cpu_buffer->irq_work.work); 3195 } 3196 3197 #ifdef CONFIG_RING_BUFFER_RECORD_RECURSION 3198 # define do_ring_buffer_record_recursion() \ 3199 do_ftrace_record_recursion(_THIS_IP_, _RET_IP_) 3200 #else 3201 # define do_ring_buffer_record_recursion() do { } while (0) 3202 #endif 3203 3204 /* 3205 * The lock and unlock are done within a preempt disable section. 3206 * The current_context per_cpu variable can only be modified 3207 * by the current task between lock and unlock. But it can 3208 * be modified more than once via an interrupt. To pass this 3209 * information from the lock to the unlock without having to 3210 * access the 'in_interrupt()' functions again (which do show 3211 * a bit of overhead in something as critical as function tracing, 3212 * we use a bitmask trick. 3213 * 3214 * bit 1 = NMI context 3215 * bit 2 = IRQ context 3216 * bit 3 = SoftIRQ context 3217 * bit 4 = normal context. 3218 * 3219 * This works because this is the order of contexts that can 3220 * preempt other contexts. A SoftIRQ never preempts an IRQ 3221 * context. 3222 * 3223 * When the context is determined, the corresponding bit is 3224 * checked and set (if it was set, then a recursion of that context 3225 * happened). 3226 * 3227 * On unlock, we need to clear this bit. To do so, just subtract 3228 * 1 from the current_context and AND it to itself. 3229 * 3230 * (binary) 3231 * 101 - 1 = 100 3232 * 101 & 100 = 100 (clearing bit zero) 3233 * 3234 * 1010 - 1 = 1001 3235 * 1010 & 1001 = 1000 (clearing bit 1) 3236 * 3237 * The least significant bit can be cleared this way, and it 3238 * just so happens that it is the same bit corresponding to 3239 * the current context. 3240 * 3241 * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit 3242 * is set when a recursion is detected at the current context, and if 3243 * the TRANSITION bit is already set, it will fail the recursion. 3244 * This is needed because there's a lag between the changing of 3245 * interrupt context and updating the preempt count. In this case, 3246 * a false positive will be found. To handle this, one extra recursion 3247 * is allowed, and this is done by the TRANSITION bit. If the TRANSITION 3248 * bit is already set, then it is considered a recursion and the function 3249 * ends. Otherwise, the TRANSITION bit is set, and that bit is returned. 3250 * 3251 * On the trace_recursive_unlock(), the TRANSITION bit will be the first 3252 * to be cleared. Even if it wasn't the context that set it. That is, 3253 * if an interrupt comes in while NORMAL bit is set and the ring buffer 3254 * is called before preempt_count() is updated, since the check will 3255 * be on the NORMAL bit, the TRANSITION bit will then be set. If an 3256 * NMI then comes in, it will set the NMI bit, but when the NMI code 3257 * does the trace_recursive_unlock() it will clear the TRANSITION bit 3258 * and leave the NMI bit set. But this is fine, because the interrupt 3259 * code that set the TRANSITION bit will then clear the NMI bit when it 3260 * calls trace_recursive_unlock(). If another NMI comes in, it will 3261 * set the TRANSITION bit and continue. 3262 * 3263 * Note: The TRANSITION bit only handles a single transition between context. 3264 */ 3265 3266 static __always_inline int 3267 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) 3268 { 3269 unsigned int val = cpu_buffer->current_context; 3270 int bit = interrupt_context_level(); 3271 3272 bit = RB_CTX_NORMAL - bit; 3273 3274 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { 3275 /* 3276 * It is possible that this was called by transitioning 3277 * between interrupt context, and preempt_count() has not 3278 * been updated yet. In this case, use the TRANSITION bit. 3279 */ 3280 bit = RB_CTX_TRANSITION; 3281 if (val & (1 << (bit + cpu_buffer->nest))) { 3282 do_ring_buffer_record_recursion(); 3283 return 1; 3284 } 3285 } 3286 3287 val |= (1 << (bit + cpu_buffer->nest)); 3288 cpu_buffer->current_context = val; 3289 3290 return 0; 3291 } 3292 3293 static __always_inline void 3294 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) 3295 { 3296 cpu_buffer->current_context &= 3297 cpu_buffer->current_context - (1 << cpu_buffer->nest); 3298 } 3299 3300 /* The recursive locking above uses 5 bits */ 3301 #define NESTED_BITS 5 3302 3303 /** 3304 * ring_buffer_nest_start - Allow to trace while nested 3305 * @buffer: The ring buffer to modify 3306 * 3307 * The ring buffer has a safety mechanism to prevent recursion. 3308 * But there may be a case where a trace needs to be done while 3309 * tracing something else. In this case, calling this function 3310 * will allow this function to nest within a currently active 3311 * ring_buffer_lock_reserve(). 3312 * 3313 * Call this function before calling another ring_buffer_lock_reserve() and 3314 * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit(). 3315 */ 3316 void ring_buffer_nest_start(struct trace_buffer *buffer) 3317 { 3318 struct ring_buffer_per_cpu *cpu_buffer; 3319 int cpu; 3320 3321 /* Enabled by ring_buffer_nest_end() */ 3322 preempt_disable_notrace(); 3323 cpu = raw_smp_processor_id(); 3324 cpu_buffer = buffer->buffers[cpu]; 3325 /* This is the shift value for the above recursive locking */ 3326 cpu_buffer->nest += NESTED_BITS; 3327 } 3328 3329 /** 3330 * ring_buffer_nest_end - Allow to trace while nested 3331 * @buffer: The ring buffer to modify 3332 * 3333 * Must be called after ring_buffer_nest_start() and after the 3334 * ring_buffer_unlock_commit(). 3335 */ 3336 void ring_buffer_nest_end(struct trace_buffer *buffer) 3337 { 3338 struct ring_buffer_per_cpu *cpu_buffer; 3339 int cpu; 3340 3341 /* disabled by ring_buffer_nest_start() */ 3342 cpu = raw_smp_processor_id(); 3343 cpu_buffer = buffer->buffers[cpu]; 3344 /* This is the shift value for the above recursive locking */ 3345 cpu_buffer->nest -= NESTED_BITS; 3346 preempt_enable_notrace(); 3347 } 3348 3349 /** 3350 * ring_buffer_unlock_commit - commit a reserved 3351 * @buffer: The buffer to commit to 3352 * @event: The event pointer to commit. 3353 * 3354 * This commits the data to the ring buffer, and releases any locks held. 3355 * 3356 * Must be paired with ring_buffer_lock_reserve. 3357 */ 3358 int ring_buffer_unlock_commit(struct trace_buffer *buffer, 3359 struct ring_buffer_event *event) 3360 { 3361 struct ring_buffer_per_cpu *cpu_buffer; 3362 int cpu = raw_smp_processor_id(); 3363 3364 cpu_buffer = buffer->buffers[cpu]; 3365 3366 rb_commit(cpu_buffer, event); 3367 3368 rb_wakeups(buffer, cpu_buffer); 3369 3370 trace_recursive_unlock(cpu_buffer); 3371 3372 preempt_enable_notrace(); 3373 3374 return 0; 3375 } 3376 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); 3377 3378 /* Special value to validate all deltas on a page. */ 3379 #define CHECK_FULL_PAGE 1L 3380 3381 #ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS 3382 static void dump_buffer_page(struct buffer_data_page *bpage, 3383 struct rb_event_info *info, 3384 unsigned long tail) 3385 { 3386 struct ring_buffer_event *event; 3387 u64 ts, delta; 3388 int e; 3389 3390 ts = bpage->time_stamp; 3391 pr_warn(" [%lld] PAGE TIME STAMP\n", ts); 3392 3393 for (e = 0; e < tail; e += rb_event_length(event)) { 3394 3395 event = (struct ring_buffer_event *)(bpage->data + e); 3396 3397 switch (event->type_len) { 3398 3399 case RINGBUF_TYPE_TIME_EXTEND: 3400 delta = rb_event_time_stamp(event); 3401 ts += delta; 3402 pr_warn(" [%lld] delta:%lld TIME EXTEND\n", ts, delta); 3403 break; 3404 3405 case RINGBUF_TYPE_TIME_STAMP: 3406 delta = rb_event_time_stamp(event); 3407 ts = rb_fix_abs_ts(delta, ts); 3408 pr_warn(" [%lld] absolute:%lld TIME STAMP\n", ts, delta); 3409 break; 3410 3411 case RINGBUF_TYPE_PADDING: 3412 ts += event->time_delta; 3413 pr_warn(" [%lld] delta:%d PADDING\n", ts, event->time_delta); 3414 break; 3415 3416 case RINGBUF_TYPE_DATA: 3417 ts += event->time_delta; 3418 pr_warn(" [%lld] delta:%d\n", ts, event->time_delta); 3419 break; 3420 3421 default: 3422 break; 3423 } 3424 } 3425 } 3426 3427 static DEFINE_PER_CPU(atomic_t, checking); 3428 static atomic_t ts_dump; 3429 3430 /* 3431 * Check if the current event time stamp matches the deltas on 3432 * the buffer page. 3433 */ 3434 static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, 3435 struct rb_event_info *info, 3436 unsigned long tail) 3437 { 3438 struct ring_buffer_event *event; 3439 struct buffer_data_page *bpage; 3440 u64 ts, delta; 3441 bool full = false; 3442 int e; 3443 3444 bpage = info->tail_page->page; 3445 3446 if (tail == CHECK_FULL_PAGE) { 3447 full = true; 3448 tail = local_read(&bpage->commit); 3449 } else if (info->add_timestamp & 3450 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) { 3451 /* Ignore events with absolute time stamps */ 3452 return; 3453 } 3454 3455 /* 3456 * Do not check the first event (skip possible extends too). 3457 * Also do not check if previous events have not been committed. 3458 */ 3459 if (tail <= 8 || tail > local_read(&bpage->commit)) 3460 return; 3461 3462 /* 3463 * If this interrupted another event, 3464 */ 3465 if (atomic_inc_return(this_cpu_ptr(&checking)) != 1) 3466 goto out; 3467 3468 ts = bpage->time_stamp; 3469 3470 for (e = 0; e < tail; e += rb_event_length(event)) { 3471 3472 event = (struct ring_buffer_event *)(bpage->data + e); 3473 3474 switch (event->type_len) { 3475 3476 case RINGBUF_TYPE_TIME_EXTEND: 3477 delta = rb_event_time_stamp(event); 3478 ts += delta; 3479 break; 3480 3481 case RINGBUF_TYPE_TIME_STAMP: 3482 delta = rb_event_time_stamp(event); 3483 ts = rb_fix_abs_ts(delta, ts); 3484 break; 3485 3486 case RINGBUF_TYPE_PADDING: 3487 if (event->time_delta == 1) 3488 break; 3489 fallthrough; 3490 case RINGBUF_TYPE_DATA: 3491 ts += event->time_delta; 3492 break; 3493 3494 default: 3495 RB_WARN_ON(cpu_buffer, 1); 3496 } 3497 } 3498 if ((full && ts > info->ts) || 3499 (!full && ts + info->delta != info->ts)) { 3500 /* If another report is happening, ignore this one */ 3501 if (atomic_inc_return(&ts_dump) != 1) { 3502 atomic_dec(&ts_dump); 3503 goto out; 3504 } 3505 atomic_inc(&cpu_buffer->record_disabled); 3506 /* There's some cases in boot up that this can happen */ 3507 WARN_ON_ONCE(system_state != SYSTEM_BOOTING); 3508 pr_warn("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s\n", 3509 cpu_buffer->cpu, 3510 ts + info->delta, info->ts, info->delta, 3511 info->before, info->after, 3512 full ? " (full)" : ""); 3513 dump_buffer_page(bpage, info, tail); 3514 atomic_dec(&ts_dump); 3515 /* Do not re-enable checking */ 3516 return; 3517 } 3518 out: 3519 atomic_dec(this_cpu_ptr(&checking)); 3520 } 3521 #else 3522 static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, 3523 struct rb_event_info *info, 3524 unsigned long tail) 3525 { 3526 } 3527 #endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */ 3528 3529 static struct ring_buffer_event * 3530 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 3531 struct rb_event_info *info) 3532 { 3533 struct ring_buffer_event *event; 3534 struct buffer_page *tail_page; 3535 unsigned long tail, write, w; 3536 bool a_ok; 3537 bool b_ok; 3538 3539 /* Don't let the compiler play games with cpu_buffer->tail_page */ 3540 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); 3541 3542 /*A*/ w = local_read(&tail_page->write) & RB_WRITE_MASK; 3543 barrier(); 3544 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); 3545 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3546 barrier(); 3547 info->ts = rb_time_stamp(cpu_buffer->buffer); 3548 3549 if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) { 3550 info->delta = info->ts; 3551 } else { 3552 /* 3553 * If interrupting an event time update, we may need an 3554 * absolute timestamp. 3555 * Don't bother if this is the start of a new page (w == 0). 3556 */ 3557 if (unlikely(!a_ok || !b_ok || (info->before != info->after && w))) { 3558 info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND; 3559 info->length += RB_LEN_TIME_EXTEND; 3560 } else { 3561 info->delta = info->ts - info->after; 3562 if (unlikely(test_time_stamp(info->delta))) { 3563 info->add_timestamp |= RB_ADD_STAMP_EXTEND; 3564 info->length += RB_LEN_TIME_EXTEND; 3565 } 3566 } 3567 } 3568 3569 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts); 3570 3571 /*C*/ write = local_add_return(info->length, &tail_page->write); 3572 3573 /* set write to only the index of the write */ 3574 write &= RB_WRITE_MASK; 3575 3576 tail = write - info->length; 3577 3578 /* See if we shot pass the end of this buffer page */ 3579 if (unlikely(write > BUF_PAGE_SIZE)) { 3580 /* before and after may now different, fix it up*/ 3581 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); 3582 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3583 if (a_ok && b_ok && info->before != info->after) 3584 (void)rb_time_cmpxchg(&cpu_buffer->before_stamp, 3585 info->before, info->after); 3586 if (a_ok && b_ok) 3587 check_buffer(cpu_buffer, info, CHECK_FULL_PAGE); 3588 return rb_move_tail(cpu_buffer, tail, info); 3589 } 3590 3591 if (likely(tail == w)) { 3592 u64 save_before; 3593 bool s_ok; 3594 3595 /* Nothing interrupted us between A and C */ 3596 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts); 3597 barrier(); 3598 /*E*/ s_ok = rb_time_read(&cpu_buffer->before_stamp, &save_before); 3599 RB_WARN_ON(cpu_buffer, !s_ok); 3600 if (likely(!(info->add_timestamp & 3601 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)))) 3602 /* This did not interrupt any time update */ 3603 info->delta = info->ts - info->after; 3604 else 3605 /* Just use full timestamp for interrupting event */ 3606 info->delta = info->ts; 3607 barrier(); 3608 check_buffer(cpu_buffer, info, tail); 3609 if (unlikely(info->ts != save_before)) { 3610 /* SLOW PATH - Interrupted between C and E */ 3611 3612 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3613 RB_WARN_ON(cpu_buffer, !a_ok); 3614 3615 /* Write stamp must only go forward */ 3616 if (save_before > info->after) { 3617 /* 3618 * We do not care about the result, only that 3619 * it gets updated atomically. 3620 */ 3621 (void)rb_time_cmpxchg(&cpu_buffer->write_stamp, 3622 info->after, save_before); 3623 } 3624 } 3625 } else { 3626 u64 ts; 3627 /* SLOW PATH - Interrupted between A and C */ 3628 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3629 /* Was interrupted before here, write_stamp must be valid */ 3630 RB_WARN_ON(cpu_buffer, !a_ok); 3631 ts = rb_time_stamp(cpu_buffer->buffer); 3632 barrier(); 3633 /*E*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) && 3634 info->after < ts && 3635 rb_time_cmpxchg(&cpu_buffer->write_stamp, 3636 info->after, ts)) { 3637 /* Nothing came after this event between C and E */ 3638 info->delta = ts - info->after; 3639 } else { 3640 /* 3641 * Interrupted between C and E: 3642 * Lost the previous events time stamp. Just set the 3643 * delta to zero, and this will be the same time as 3644 * the event this event interrupted. And the events that 3645 * came after this will still be correct (as they would 3646 * have built their delta on the previous event. 3647 */ 3648 info->delta = 0; 3649 } 3650 info->ts = ts; 3651 info->add_timestamp &= ~RB_ADD_STAMP_FORCE; 3652 } 3653 3654 /* 3655 * If this is the first commit on the page, then it has the same 3656 * timestamp as the page itself. 3657 */ 3658 if (unlikely(!tail && !(info->add_timestamp & 3659 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)))) 3660 info->delta = 0; 3661 3662 /* We reserved something on the buffer */ 3663 3664 event = __rb_page_index(tail_page, tail); 3665 rb_update_event(cpu_buffer, event, info); 3666 3667 local_inc(&tail_page->entries); 3668 3669 /* 3670 * If this is the first commit on the page, then update 3671 * its timestamp. 3672 */ 3673 if (unlikely(!tail)) 3674 tail_page->page->time_stamp = info->ts; 3675 3676 /* account for these added bytes */ 3677 local_add(info->length, &cpu_buffer->entries_bytes); 3678 3679 return event; 3680 } 3681 3682 static __always_inline struct ring_buffer_event * 3683 rb_reserve_next_event(struct trace_buffer *buffer, 3684 struct ring_buffer_per_cpu *cpu_buffer, 3685 unsigned long length) 3686 { 3687 struct ring_buffer_event *event; 3688 struct rb_event_info info; 3689 int nr_loops = 0; 3690 int add_ts_default; 3691 3692 rb_start_commit(cpu_buffer); 3693 /* The commit page can not change after this */ 3694 3695 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 3696 /* 3697 * Due to the ability to swap a cpu buffer from a buffer 3698 * it is possible it was swapped before we committed. 3699 * (committing stops a swap). We check for it here and 3700 * if it happened, we have to fail the write. 3701 */ 3702 barrier(); 3703 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { 3704 local_dec(&cpu_buffer->committing); 3705 local_dec(&cpu_buffer->commits); 3706 return NULL; 3707 } 3708 #endif 3709 3710 info.length = rb_calculate_event_length(length); 3711 3712 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { 3713 add_ts_default = RB_ADD_STAMP_ABSOLUTE; 3714 info.length += RB_LEN_TIME_EXTEND; 3715 } else { 3716 add_ts_default = RB_ADD_STAMP_NONE; 3717 } 3718 3719 again: 3720 info.add_timestamp = add_ts_default; 3721 info.delta = 0; 3722 3723 /* 3724 * We allow for interrupts to reenter here and do a trace. 3725 * If one does, it will cause this original code to loop 3726 * back here. Even with heavy interrupts happening, this 3727 * should only happen a few times in a row. If this happens 3728 * 1000 times in a row, there must be either an interrupt 3729 * storm or we have something buggy. 3730 * Bail! 3731 */ 3732 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) 3733 goto out_fail; 3734 3735 event = __rb_reserve_next(cpu_buffer, &info); 3736 3737 if (unlikely(PTR_ERR(event) == -EAGAIN)) { 3738 if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND)) 3739 info.length -= RB_LEN_TIME_EXTEND; 3740 goto again; 3741 } 3742 3743 if (likely(event)) 3744 return event; 3745 out_fail: 3746 rb_end_commit(cpu_buffer); 3747 return NULL; 3748 } 3749 3750 /** 3751 * ring_buffer_lock_reserve - reserve a part of the buffer 3752 * @buffer: the ring buffer to reserve from 3753 * @length: the length of the data to reserve (excluding event header) 3754 * 3755 * Returns a reserved event on the ring buffer to copy directly to. 3756 * The user of this interface will need to get the body to write into 3757 * and can use the ring_buffer_event_data() interface. 3758 * 3759 * The length is the length of the data needed, not the event length 3760 * which also includes the event header. 3761 * 3762 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned. 3763 * If NULL is returned, then nothing has been allocated or locked. 3764 */ 3765 struct ring_buffer_event * 3766 ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length) 3767 { 3768 struct ring_buffer_per_cpu *cpu_buffer; 3769 struct ring_buffer_event *event; 3770 int cpu; 3771 3772 /* If we are tracing schedule, we don't want to recurse */ 3773 preempt_disable_notrace(); 3774 3775 if (unlikely(atomic_read(&buffer->record_disabled))) 3776 goto out; 3777 3778 cpu = raw_smp_processor_id(); 3779 3780 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) 3781 goto out; 3782 3783 cpu_buffer = buffer->buffers[cpu]; 3784 3785 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) 3786 goto out; 3787 3788 if (unlikely(length > BUF_MAX_DATA_SIZE)) 3789 goto out; 3790 3791 if (unlikely(trace_recursive_lock(cpu_buffer))) 3792 goto out; 3793 3794 event = rb_reserve_next_event(buffer, cpu_buffer, length); 3795 if (!event) 3796 goto out_unlock; 3797 3798 return event; 3799 3800 out_unlock: 3801 trace_recursive_unlock(cpu_buffer); 3802 out: 3803 preempt_enable_notrace(); 3804 return NULL; 3805 } 3806 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); 3807 3808 /* 3809 * Decrement the entries to the page that an event is on. 3810 * The event does not even need to exist, only the pointer 3811 * to the page it is on. This may only be called before the commit 3812 * takes place. 3813 */ 3814 static inline void 3815 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, 3816 struct ring_buffer_event *event) 3817 { 3818 unsigned long addr = (unsigned long)event; 3819 struct buffer_page *bpage = cpu_buffer->commit_page; 3820 struct buffer_page *start; 3821 3822 addr &= PAGE_MASK; 3823 3824 /* Do the likely case first */ 3825 if (likely(bpage->page == (void *)addr)) { 3826 local_dec(&bpage->entries); 3827 return; 3828 } 3829 3830 /* 3831 * Because the commit page may be on the reader page we 3832 * start with the next page and check the end loop there. 3833 */ 3834 rb_inc_page(&bpage); 3835 start = bpage; 3836 do { 3837 if (bpage->page == (void *)addr) { 3838 local_dec(&bpage->entries); 3839 return; 3840 } 3841 rb_inc_page(&bpage); 3842 } while (bpage != start); 3843 3844 /* commit not part of this buffer?? */ 3845 RB_WARN_ON(cpu_buffer, 1); 3846 } 3847 3848 /** 3849 * ring_buffer_discard_commit - discard an event that has not been committed 3850 * @buffer: the ring buffer 3851 * @event: non committed event to discard 3852 * 3853 * Sometimes an event that is in the ring buffer needs to be ignored. 3854 * This function lets the user discard an event in the ring buffer 3855 * and then that event will not be read later. 3856 * 3857 * This function only works if it is called before the item has been 3858 * committed. It will try to free the event from the ring buffer 3859 * if another event has not been added behind it. 3860 * 3861 * If another event has been added behind it, it will set the event 3862 * up as discarded, and perform the commit. 3863 * 3864 * If this function is called, do not call ring_buffer_unlock_commit on 3865 * the event. 3866 */ 3867 void ring_buffer_discard_commit(struct trace_buffer *buffer, 3868 struct ring_buffer_event *event) 3869 { 3870 struct ring_buffer_per_cpu *cpu_buffer; 3871 int cpu; 3872 3873 /* The event is discarded regardless */ 3874 rb_event_discard(event); 3875 3876 cpu = smp_processor_id(); 3877 cpu_buffer = buffer->buffers[cpu]; 3878 3879 /* 3880 * This must only be called if the event has not been 3881 * committed yet. Thus we can assume that preemption 3882 * is still disabled. 3883 */ 3884 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); 3885 3886 rb_decrement_entry(cpu_buffer, event); 3887 if (rb_try_to_discard(cpu_buffer, event)) 3888 goto out; 3889 3890 out: 3891 rb_end_commit(cpu_buffer); 3892 3893 trace_recursive_unlock(cpu_buffer); 3894 3895 preempt_enable_notrace(); 3896 3897 } 3898 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); 3899 3900 /** 3901 * ring_buffer_write - write data to the buffer without reserving 3902 * @buffer: The ring buffer to write to. 3903 * @length: The length of the data being written (excluding the event header) 3904 * @data: The data to write to the buffer. 3905 * 3906 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as 3907 * one function. If you already have the data to write to the buffer, it 3908 * may be easier to simply call this function. 3909 * 3910 * Note, like ring_buffer_lock_reserve, the length is the length of the data 3911 * and not the length of the event which would hold the header. 3912 */ 3913 int ring_buffer_write(struct trace_buffer *buffer, 3914 unsigned long length, 3915 void *data) 3916 { 3917 struct ring_buffer_per_cpu *cpu_buffer; 3918 struct ring_buffer_event *event; 3919 void *body; 3920 int ret = -EBUSY; 3921 int cpu; 3922 3923 preempt_disable_notrace(); 3924 3925 if (atomic_read(&buffer->record_disabled)) 3926 goto out; 3927 3928 cpu = raw_smp_processor_id(); 3929 3930 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3931 goto out; 3932 3933 cpu_buffer = buffer->buffers[cpu]; 3934 3935 if (atomic_read(&cpu_buffer->record_disabled)) 3936 goto out; 3937 3938 if (length > BUF_MAX_DATA_SIZE) 3939 goto out; 3940 3941 if (unlikely(trace_recursive_lock(cpu_buffer))) 3942 goto out; 3943 3944 event = rb_reserve_next_event(buffer, cpu_buffer, length); 3945 if (!event) 3946 goto out_unlock; 3947 3948 body = rb_event_data(event); 3949 3950 memcpy(body, data, length); 3951 3952 rb_commit(cpu_buffer, event); 3953 3954 rb_wakeups(buffer, cpu_buffer); 3955 3956 ret = 0; 3957 3958 out_unlock: 3959 trace_recursive_unlock(cpu_buffer); 3960 3961 out: 3962 preempt_enable_notrace(); 3963 3964 return ret; 3965 } 3966 EXPORT_SYMBOL_GPL(ring_buffer_write); 3967 3968 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 3969 { 3970 struct buffer_page *reader = cpu_buffer->reader_page; 3971 struct buffer_page *head = rb_set_head_page(cpu_buffer); 3972 struct buffer_page *commit = cpu_buffer->commit_page; 3973 3974 /* In case of error, head will be NULL */ 3975 if (unlikely(!head)) 3976 return true; 3977 3978 /* Reader should exhaust content in reader page */ 3979 if (reader->read != rb_page_commit(reader)) 3980 return false; 3981 3982 /* 3983 * If writers are committing on the reader page, knowing all 3984 * committed content has been read, the ring buffer is empty. 3985 */ 3986 if (commit == reader) 3987 return true; 3988 3989 /* 3990 * If writers are committing on a page other than reader page 3991 * and head page, there should always be content to read. 3992 */ 3993 if (commit != head) 3994 return false; 3995 3996 /* 3997 * Writers are committing on the head page, we just need 3998 * to care about there're committed data, and the reader will 3999 * swap reader page with head page when it is to read data. 4000 */ 4001 return rb_page_commit(commit) == 0; 4002 } 4003 4004 /** 4005 * ring_buffer_record_disable - stop all writes into the buffer 4006 * @buffer: The ring buffer to stop writes to. 4007 * 4008 * This prevents all writes to the buffer. Any attempt to write 4009 * to the buffer after this will fail and return NULL. 4010 * 4011 * The caller should call synchronize_rcu() after this. 4012 */ 4013 void ring_buffer_record_disable(struct trace_buffer *buffer) 4014 { 4015 atomic_inc(&buffer->record_disabled); 4016 } 4017 EXPORT_SYMBOL_GPL(ring_buffer_record_disable); 4018 4019 /** 4020 * ring_buffer_record_enable - enable writes to the buffer 4021 * @buffer: The ring buffer to enable writes 4022 * 4023 * Note, multiple disables will need the same number of enables 4024 * to truly enable the writing (much like preempt_disable). 4025 */ 4026 void ring_buffer_record_enable(struct trace_buffer *buffer) 4027 { 4028 atomic_dec(&buffer->record_disabled); 4029 } 4030 EXPORT_SYMBOL_GPL(ring_buffer_record_enable); 4031 4032 /** 4033 * ring_buffer_record_off - stop all writes into the buffer 4034 * @buffer: The ring buffer to stop writes to. 4035 * 4036 * This prevents all writes to the buffer. Any attempt to write 4037 * to the buffer after this will fail and return NULL. 4038 * 4039 * This is different than ring_buffer_record_disable() as 4040 * it works like an on/off switch, where as the disable() version 4041 * must be paired with a enable(). 4042 */ 4043 void ring_buffer_record_off(struct trace_buffer *buffer) 4044 { 4045 unsigned int rd; 4046 unsigned int new_rd; 4047 4048 do { 4049 rd = atomic_read(&buffer->record_disabled); 4050 new_rd = rd | RB_BUFFER_OFF; 4051 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); 4052 } 4053 EXPORT_SYMBOL_GPL(ring_buffer_record_off); 4054 4055 /** 4056 * ring_buffer_record_on - restart writes into the buffer 4057 * @buffer: The ring buffer to start writes to. 4058 * 4059 * This enables all writes to the buffer that was disabled by 4060 * ring_buffer_record_off(). 4061 * 4062 * This is different than ring_buffer_record_enable() as 4063 * it works like an on/off switch, where as the enable() version 4064 * must be paired with a disable(). 4065 */ 4066 void ring_buffer_record_on(struct trace_buffer *buffer) 4067 { 4068 unsigned int rd; 4069 unsigned int new_rd; 4070 4071 do { 4072 rd = atomic_read(&buffer->record_disabled); 4073 new_rd = rd & ~RB_BUFFER_OFF; 4074 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); 4075 } 4076 EXPORT_SYMBOL_GPL(ring_buffer_record_on); 4077 4078 /** 4079 * ring_buffer_record_is_on - return true if the ring buffer can write 4080 * @buffer: The ring buffer to see if write is enabled 4081 * 4082 * Returns true if the ring buffer is in a state that it accepts writes. 4083 */ 4084 bool ring_buffer_record_is_on(struct trace_buffer *buffer) 4085 { 4086 return !atomic_read(&buffer->record_disabled); 4087 } 4088 4089 /** 4090 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable 4091 * @buffer: The ring buffer to see if write is set enabled 4092 * 4093 * Returns true if the ring buffer is set writable by ring_buffer_record_on(). 4094 * Note that this does NOT mean it is in a writable state. 4095 * 4096 * It may return true when the ring buffer has been disabled by 4097 * ring_buffer_record_disable(), as that is a temporary disabling of 4098 * the ring buffer. 4099 */ 4100 bool ring_buffer_record_is_set_on(struct trace_buffer *buffer) 4101 { 4102 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); 4103 } 4104 4105 /** 4106 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 4107 * @buffer: The ring buffer to stop writes to. 4108 * @cpu: The CPU buffer to stop 4109 * 4110 * This prevents all writes to the buffer. Any attempt to write 4111 * to the buffer after this will fail and return NULL. 4112 * 4113 * The caller should call synchronize_rcu() after this. 4114 */ 4115 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu) 4116 { 4117 struct ring_buffer_per_cpu *cpu_buffer; 4118 4119 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4120 return; 4121 4122 cpu_buffer = buffer->buffers[cpu]; 4123 atomic_inc(&cpu_buffer->record_disabled); 4124 } 4125 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); 4126 4127 /** 4128 * ring_buffer_record_enable_cpu - enable writes to the buffer 4129 * @buffer: The ring buffer to enable writes 4130 * @cpu: The CPU to enable. 4131 * 4132 * Note, multiple disables will need the same number of enables 4133 * to truly enable the writing (much like preempt_disable). 4134 */ 4135 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu) 4136 { 4137 struct ring_buffer_per_cpu *cpu_buffer; 4138 4139 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4140 return; 4141 4142 cpu_buffer = buffer->buffers[cpu]; 4143 atomic_dec(&cpu_buffer->record_disabled); 4144 } 4145 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); 4146 4147 /* 4148 * The total entries in the ring buffer is the running counter 4149 * of entries entered into the ring buffer, minus the sum of 4150 * the entries read from the ring buffer and the number of 4151 * entries that were overwritten. 4152 */ 4153 static inline unsigned long 4154 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) 4155 { 4156 return local_read(&cpu_buffer->entries) - 4157 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); 4158 } 4159 4160 /** 4161 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer 4162 * @buffer: The ring buffer 4163 * @cpu: The per CPU buffer to read from. 4164 */ 4165 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu) 4166 { 4167 unsigned long flags; 4168 struct ring_buffer_per_cpu *cpu_buffer; 4169 struct buffer_page *bpage; 4170 u64 ret = 0; 4171 4172 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4173 return 0; 4174 4175 cpu_buffer = buffer->buffers[cpu]; 4176 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4177 /* 4178 * if the tail is on reader_page, oldest time stamp is on the reader 4179 * page 4180 */ 4181 if (cpu_buffer->tail_page == cpu_buffer->reader_page) 4182 bpage = cpu_buffer->reader_page; 4183 else 4184 bpage = rb_set_head_page(cpu_buffer); 4185 if (bpage) 4186 ret = bpage->page->time_stamp; 4187 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4188 4189 return ret; 4190 } 4191 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts); 4192 4193 /** 4194 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer 4195 * @buffer: The ring buffer 4196 * @cpu: The per CPU buffer to read from. 4197 */ 4198 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu) 4199 { 4200 struct ring_buffer_per_cpu *cpu_buffer; 4201 unsigned long ret; 4202 4203 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4204 return 0; 4205 4206 cpu_buffer = buffer->buffers[cpu]; 4207 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; 4208 4209 return ret; 4210 } 4211 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu); 4212 4213 /** 4214 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer 4215 * @buffer: The ring buffer 4216 * @cpu: The per CPU buffer to get the entries from. 4217 */ 4218 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu) 4219 { 4220 struct ring_buffer_per_cpu *cpu_buffer; 4221 4222 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4223 return 0; 4224 4225 cpu_buffer = buffer->buffers[cpu]; 4226 4227 return rb_num_of_entries(cpu_buffer); 4228 } 4229 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); 4230 4231 /** 4232 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring 4233 * buffer wrapping around (only if RB_FL_OVERWRITE is on). 4234 * @buffer: The ring buffer 4235 * @cpu: The per CPU buffer to get the number of overruns from 4236 */ 4237 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu) 4238 { 4239 struct ring_buffer_per_cpu *cpu_buffer; 4240 unsigned long ret; 4241 4242 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4243 return 0; 4244 4245 cpu_buffer = buffer->buffers[cpu]; 4246 ret = local_read(&cpu_buffer->overrun); 4247 4248 return ret; 4249 } 4250 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); 4251 4252 /** 4253 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by 4254 * commits failing due to the buffer wrapping around while there are uncommitted 4255 * events, such as during an interrupt storm. 4256 * @buffer: The ring buffer 4257 * @cpu: The per CPU buffer to get the number of overruns from 4258 */ 4259 unsigned long 4260 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu) 4261 { 4262 struct ring_buffer_per_cpu *cpu_buffer; 4263 unsigned long ret; 4264 4265 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4266 return 0; 4267 4268 cpu_buffer = buffer->buffers[cpu]; 4269 ret = local_read(&cpu_buffer->commit_overrun); 4270 4271 return ret; 4272 } 4273 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); 4274 4275 /** 4276 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by 4277 * the ring buffer filling up (only if RB_FL_OVERWRITE is off). 4278 * @buffer: The ring buffer 4279 * @cpu: The per CPU buffer to get the number of overruns from 4280 */ 4281 unsigned long 4282 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu) 4283 { 4284 struct ring_buffer_per_cpu *cpu_buffer; 4285 unsigned long ret; 4286 4287 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4288 return 0; 4289 4290 cpu_buffer = buffer->buffers[cpu]; 4291 ret = local_read(&cpu_buffer->dropped_events); 4292 4293 return ret; 4294 } 4295 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu); 4296 4297 /** 4298 * ring_buffer_read_events_cpu - get the number of events successfully read 4299 * @buffer: The ring buffer 4300 * @cpu: The per CPU buffer to get the number of events read 4301 */ 4302 unsigned long 4303 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu) 4304 { 4305 struct ring_buffer_per_cpu *cpu_buffer; 4306 4307 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4308 return 0; 4309 4310 cpu_buffer = buffer->buffers[cpu]; 4311 return cpu_buffer->read; 4312 } 4313 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu); 4314 4315 /** 4316 * ring_buffer_entries - get the number of entries in a buffer 4317 * @buffer: The ring buffer 4318 * 4319 * Returns the total number of entries in the ring buffer 4320 * (all CPU entries) 4321 */ 4322 unsigned long ring_buffer_entries(struct trace_buffer *buffer) 4323 { 4324 struct ring_buffer_per_cpu *cpu_buffer; 4325 unsigned long entries = 0; 4326 int cpu; 4327 4328 /* if you care about this being correct, lock the buffer */ 4329 for_each_buffer_cpu(buffer, cpu) { 4330 cpu_buffer = buffer->buffers[cpu]; 4331 entries += rb_num_of_entries(cpu_buffer); 4332 } 4333 4334 return entries; 4335 } 4336 EXPORT_SYMBOL_GPL(ring_buffer_entries); 4337 4338 /** 4339 * ring_buffer_overruns - get the number of overruns in buffer 4340 * @buffer: The ring buffer 4341 * 4342 * Returns the total number of overruns in the ring buffer 4343 * (all CPU entries) 4344 */ 4345 unsigned long ring_buffer_overruns(struct trace_buffer *buffer) 4346 { 4347 struct ring_buffer_per_cpu *cpu_buffer; 4348 unsigned long overruns = 0; 4349 int cpu; 4350 4351 /* if you care about this being correct, lock the buffer */ 4352 for_each_buffer_cpu(buffer, cpu) { 4353 cpu_buffer = buffer->buffers[cpu]; 4354 overruns += local_read(&cpu_buffer->overrun); 4355 } 4356 4357 return overruns; 4358 } 4359 EXPORT_SYMBOL_GPL(ring_buffer_overruns); 4360 4361 static void rb_iter_reset(struct ring_buffer_iter *iter) 4362 { 4363 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 4364 4365 /* Iterator usage is expected to have record disabled */ 4366 iter->head_page = cpu_buffer->reader_page; 4367 iter->head = cpu_buffer->reader_page->read; 4368 iter->next_event = iter->head; 4369 4370 iter->cache_reader_page = iter->head_page; 4371 iter->cache_read = cpu_buffer->read; 4372 4373 if (iter->head) { 4374 iter->read_stamp = cpu_buffer->read_stamp; 4375 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp; 4376 } else { 4377 iter->read_stamp = iter->head_page->page->time_stamp; 4378 iter->page_stamp = iter->read_stamp; 4379 } 4380 } 4381 4382 /** 4383 * ring_buffer_iter_reset - reset an iterator 4384 * @iter: The iterator to reset 4385 * 4386 * Resets the iterator, so that it will start from the beginning 4387 * again. 4388 */ 4389 void ring_buffer_iter_reset(struct ring_buffer_iter *iter) 4390 { 4391 struct ring_buffer_per_cpu *cpu_buffer; 4392 unsigned long flags; 4393 4394 if (!iter) 4395 return; 4396 4397 cpu_buffer = iter->cpu_buffer; 4398 4399 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4400 rb_iter_reset(iter); 4401 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4402 } 4403 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); 4404 4405 /** 4406 * ring_buffer_iter_empty - check if an iterator has no more to read 4407 * @iter: The iterator to check 4408 */ 4409 int ring_buffer_iter_empty(struct ring_buffer_iter *iter) 4410 { 4411 struct ring_buffer_per_cpu *cpu_buffer; 4412 struct buffer_page *reader; 4413 struct buffer_page *head_page; 4414 struct buffer_page *commit_page; 4415 struct buffer_page *curr_commit_page; 4416 unsigned commit; 4417 u64 curr_commit_ts; 4418 u64 commit_ts; 4419 4420 cpu_buffer = iter->cpu_buffer; 4421 reader = cpu_buffer->reader_page; 4422 head_page = cpu_buffer->head_page; 4423 commit_page = cpu_buffer->commit_page; 4424 commit_ts = commit_page->page->time_stamp; 4425 4426 /* 4427 * When the writer goes across pages, it issues a cmpxchg which 4428 * is a mb(), which will synchronize with the rmb here. 4429 * (see rb_tail_page_update()) 4430 */ 4431 smp_rmb(); 4432 commit = rb_page_commit(commit_page); 4433 /* We want to make sure that the commit page doesn't change */ 4434 smp_rmb(); 4435 4436 /* Make sure commit page didn't change */ 4437 curr_commit_page = READ_ONCE(cpu_buffer->commit_page); 4438 curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp); 4439 4440 /* If the commit page changed, then there's more data */ 4441 if (curr_commit_page != commit_page || 4442 curr_commit_ts != commit_ts) 4443 return 0; 4444 4445 /* Still racy, as it may return a false positive, but that's OK */ 4446 return ((iter->head_page == commit_page && iter->head >= commit) || 4447 (iter->head_page == reader && commit_page == head_page && 4448 head_page->read == commit && 4449 iter->head == rb_page_commit(cpu_buffer->reader_page))); 4450 } 4451 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); 4452 4453 static void 4454 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, 4455 struct ring_buffer_event *event) 4456 { 4457 u64 delta; 4458 4459 switch (event->type_len) { 4460 case RINGBUF_TYPE_PADDING: 4461 return; 4462 4463 case RINGBUF_TYPE_TIME_EXTEND: 4464 delta = rb_event_time_stamp(event); 4465 cpu_buffer->read_stamp += delta; 4466 return; 4467 4468 case RINGBUF_TYPE_TIME_STAMP: 4469 delta = rb_event_time_stamp(event); 4470 delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp); 4471 cpu_buffer->read_stamp = delta; 4472 return; 4473 4474 case RINGBUF_TYPE_DATA: 4475 cpu_buffer->read_stamp += event->time_delta; 4476 return; 4477 4478 default: 4479 RB_WARN_ON(cpu_buffer, 1); 4480 } 4481 return; 4482 } 4483 4484 static void 4485 rb_update_iter_read_stamp(struct ring_buffer_iter *iter, 4486 struct ring_buffer_event *event) 4487 { 4488 u64 delta; 4489 4490 switch (event->type_len) { 4491 case RINGBUF_TYPE_PADDING: 4492 return; 4493 4494 case RINGBUF_TYPE_TIME_EXTEND: 4495 delta = rb_event_time_stamp(event); 4496 iter->read_stamp += delta; 4497 return; 4498 4499 case RINGBUF_TYPE_TIME_STAMP: 4500 delta = rb_event_time_stamp(event); 4501 delta = rb_fix_abs_ts(delta, iter->read_stamp); 4502 iter->read_stamp = delta; 4503 return; 4504 4505 case RINGBUF_TYPE_DATA: 4506 iter->read_stamp += event->time_delta; 4507 return; 4508 4509 default: 4510 RB_WARN_ON(iter->cpu_buffer, 1); 4511 } 4512 return; 4513 } 4514 4515 static struct buffer_page * 4516 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 4517 { 4518 struct buffer_page *reader = NULL; 4519 unsigned long overwrite; 4520 unsigned long flags; 4521 int nr_loops = 0; 4522 int ret; 4523 4524 local_irq_save(flags); 4525 arch_spin_lock(&cpu_buffer->lock); 4526 4527 again: 4528 /* 4529 * This should normally only loop twice. But because the 4530 * start of the reader inserts an empty page, it causes 4531 * a case where we will loop three times. There should be no 4532 * reason to loop four times (that I know of). 4533 */ 4534 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { 4535 reader = NULL; 4536 goto out; 4537 } 4538 4539 reader = cpu_buffer->reader_page; 4540 4541 /* If there's more to read, return this page */ 4542 if (cpu_buffer->reader_page->read < rb_page_size(reader)) 4543 goto out; 4544 4545 /* Never should we have an index greater than the size */ 4546 if (RB_WARN_ON(cpu_buffer, 4547 cpu_buffer->reader_page->read > rb_page_size(reader))) 4548 goto out; 4549 4550 /* check if we caught up to the tail */ 4551 reader = NULL; 4552 if (cpu_buffer->commit_page == cpu_buffer->reader_page) 4553 goto out; 4554 4555 /* Don't bother swapping if the ring buffer is empty */ 4556 if (rb_num_of_entries(cpu_buffer) == 0) 4557 goto out; 4558 4559 /* 4560 * Reset the reader page to size zero. 4561 */ 4562 local_set(&cpu_buffer->reader_page->write, 0); 4563 local_set(&cpu_buffer->reader_page->entries, 0); 4564 local_set(&cpu_buffer->reader_page->page->commit, 0); 4565 cpu_buffer->reader_page->real_end = 0; 4566 4567 spin: 4568 /* 4569 * Splice the empty reader page into the list around the head. 4570 */ 4571 reader = rb_set_head_page(cpu_buffer); 4572 if (!reader) 4573 goto out; 4574 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); 4575 cpu_buffer->reader_page->list.prev = reader->list.prev; 4576 4577 /* 4578 * cpu_buffer->pages just needs to point to the buffer, it 4579 * has no specific buffer page to point to. Lets move it out 4580 * of our way so we don't accidentally swap it. 4581 */ 4582 cpu_buffer->pages = reader->list.prev; 4583 4584 /* The reader page will be pointing to the new head */ 4585 rb_set_list_to_head(&cpu_buffer->reader_page->list); 4586 4587 /* 4588 * We want to make sure we read the overruns after we set up our 4589 * pointers to the next object. The writer side does a 4590 * cmpxchg to cross pages which acts as the mb on the writer 4591 * side. Note, the reader will constantly fail the swap 4592 * while the writer is updating the pointers, so this 4593 * guarantees that the overwrite recorded here is the one we 4594 * want to compare with the last_overrun. 4595 */ 4596 smp_mb(); 4597 overwrite = local_read(&(cpu_buffer->overrun)); 4598 4599 /* 4600 * Here's the tricky part. 4601 * 4602 * We need to move the pointer past the header page. 4603 * But we can only do that if a writer is not currently 4604 * moving it. The page before the header page has the 4605 * flag bit '1' set if it is pointing to the page we want. 4606 * but if the writer is in the process of moving it 4607 * than it will be '2' or already moved '0'. 4608 */ 4609 4610 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); 4611 4612 /* 4613 * If we did not convert it, then we must try again. 4614 */ 4615 if (!ret) 4616 goto spin; 4617 4618 /* 4619 * Yay! We succeeded in replacing the page. 4620 * 4621 * Now make the new head point back to the reader page. 4622 */ 4623 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; 4624 rb_inc_page(&cpu_buffer->head_page); 4625 4626 local_inc(&cpu_buffer->pages_read); 4627 4628 /* Finally update the reader page to the new head */ 4629 cpu_buffer->reader_page = reader; 4630 cpu_buffer->reader_page->read = 0; 4631 4632 if (overwrite != cpu_buffer->last_overrun) { 4633 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; 4634 cpu_buffer->last_overrun = overwrite; 4635 } 4636 4637 goto again; 4638 4639 out: 4640 /* Update the read_stamp on the first event */ 4641 if (reader && reader->read == 0) 4642 cpu_buffer->read_stamp = reader->page->time_stamp; 4643 4644 arch_spin_unlock(&cpu_buffer->lock); 4645 local_irq_restore(flags); 4646 4647 /* 4648 * The writer has preempt disable, wait for it. But not forever 4649 * Although, 1 second is pretty much "forever" 4650 */ 4651 #define USECS_WAIT 1000000 4652 for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) { 4653 /* If the write is past the end of page, a writer is still updating it */ 4654 if (likely(!reader || rb_page_write(reader) <= BUF_PAGE_SIZE)) 4655 break; 4656 4657 udelay(1); 4658 4659 /* Get the latest version of the reader write value */ 4660 smp_rmb(); 4661 } 4662 4663 /* The writer is not moving forward? Something is wrong */ 4664 if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT)) 4665 reader = NULL; 4666 4667 /* 4668 * Make sure we see any padding after the write update 4669 * (see rb_reset_tail()) 4670 */ 4671 smp_rmb(); 4672 4673 4674 return reader; 4675 } 4676 4677 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) 4678 { 4679 struct ring_buffer_event *event; 4680 struct buffer_page *reader; 4681 unsigned length; 4682 4683 reader = rb_get_reader_page(cpu_buffer); 4684 4685 /* This function should not be called when buffer is empty */ 4686 if (RB_WARN_ON(cpu_buffer, !reader)) 4687 return; 4688 4689 event = rb_reader_event(cpu_buffer); 4690 4691 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 4692 cpu_buffer->read++; 4693 4694 rb_update_read_stamp(cpu_buffer, event); 4695 4696 length = rb_event_length(event); 4697 cpu_buffer->reader_page->read += length; 4698 } 4699 4700 static void rb_advance_iter(struct ring_buffer_iter *iter) 4701 { 4702 struct ring_buffer_per_cpu *cpu_buffer; 4703 4704 cpu_buffer = iter->cpu_buffer; 4705 4706 /* If head == next_event then we need to jump to the next event */ 4707 if (iter->head == iter->next_event) { 4708 /* If the event gets overwritten again, there's nothing to do */ 4709 if (rb_iter_head_event(iter) == NULL) 4710 return; 4711 } 4712 4713 iter->head = iter->next_event; 4714 4715 /* 4716 * Check if we are at the end of the buffer. 4717 */ 4718 if (iter->next_event >= rb_page_size(iter->head_page)) { 4719 /* discarded commits can make the page empty */ 4720 if (iter->head_page == cpu_buffer->commit_page) 4721 return; 4722 rb_inc_iter(iter); 4723 return; 4724 } 4725 4726 rb_update_iter_read_stamp(iter, iter->event); 4727 } 4728 4729 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) 4730 { 4731 return cpu_buffer->lost_events; 4732 } 4733 4734 static struct ring_buffer_event * 4735 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, 4736 unsigned long *lost_events) 4737 { 4738 struct ring_buffer_event *event; 4739 struct buffer_page *reader; 4740 int nr_loops = 0; 4741 4742 if (ts) 4743 *ts = 0; 4744 again: 4745 /* 4746 * We repeat when a time extend is encountered. 4747 * Since the time extend is always attached to a data event, 4748 * we should never loop more than once. 4749 * (We never hit the following condition more than twice). 4750 */ 4751 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) 4752 return NULL; 4753 4754 reader = rb_get_reader_page(cpu_buffer); 4755 if (!reader) 4756 return NULL; 4757 4758 event = rb_reader_event(cpu_buffer); 4759 4760 switch (event->type_len) { 4761 case RINGBUF_TYPE_PADDING: 4762 if (rb_null_event(event)) 4763 RB_WARN_ON(cpu_buffer, 1); 4764 /* 4765 * Because the writer could be discarding every 4766 * event it creates (which would probably be bad) 4767 * if we were to go back to "again" then we may never 4768 * catch up, and will trigger the warn on, or lock 4769 * the box. Return the padding, and we will release 4770 * the current locks, and try again. 4771 */ 4772 return event; 4773 4774 case RINGBUF_TYPE_TIME_EXTEND: 4775 /* Internal data, OK to advance */ 4776 rb_advance_reader(cpu_buffer); 4777 goto again; 4778 4779 case RINGBUF_TYPE_TIME_STAMP: 4780 if (ts) { 4781 *ts = rb_event_time_stamp(event); 4782 *ts = rb_fix_abs_ts(*ts, reader->page->time_stamp); 4783 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 4784 cpu_buffer->cpu, ts); 4785 } 4786 /* Internal data, OK to advance */ 4787 rb_advance_reader(cpu_buffer); 4788 goto again; 4789 4790 case RINGBUF_TYPE_DATA: 4791 if (ts && !(*ts)) { 4792 *ts = cpu_buffer->read_stamp + event->time_delta; 4793 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 4794 cpu_buffer->cpu, ts); 4795 } 4796 if (lost_events) 4797 *lost_events = rb_lost_events(cpu_buffer); 4798 return event; 4799 4800 default: 4801 RB_WARN_ON(cpu_buffer, 1); 4802 } 4803 4804 return NULL; 4805 } 4806 EXPORT_SYMBOL_GPL(ring_buffer_peek); 4807 4808 static struct ring_buffer_event * 4809 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 4810 { 4811 struct trace_buffer *buffer; 4812 struct ring_buffer_per_cpu *cpu_buffer; 4813 struct ring_buffer_event *event; 4814 int nr_loops = 0; 4815 4816 if (ts) 4817 *ts = 0; 4818 4819 cpu_buffer = iter->cpu_buffer; 4820 buffer = cpu_buffer->buffer; 4821 4822 /* 4823 * Check if someone performed a consuming read to 4824 * the buffer. A consuming read invalidates the iterator 4825 * and we need to reset the iterator in this case. 4826 */ 4827 if (unlikely(iter->cache_read != cpu_buffer->read || 4828 iter->cache_reader_page != cpu_buffer->reader_page)) 4829 rb_iter_reset(iter); 4830 4831 again: 4832 if (ring_buffer_iter_empty(iter)) 4833 return NULL; 4834 4835 /* 4836 * As the writer can mess with what the iterator is trying 4837 * to read, just give up if we fail to get an event after 4838 * three tries. The iterator is not as reliable when reading 4839 * the ring buffer with an active write as the consumer is. 4840 * Do not warn if the three failures is reached. 4841 */ 4842 if (++nr_loops > 3) 4843 return NULL; 4844 4845 if (rb_per_cpu_empty(cpu_buffer)) 4846 return NULL; 4847 4848 if (iter->head >= rb_page_size(iter->head_page)) { 4849 rb_inc_iter(iter); 4850 goto again; 4851 } 4852 4853 event = rb_iter_head_event(iter); 4854 if (!event) 4855 goto again; 4856 4857 switch (event->type_len) { 4858 case RINGBUF_TYPE_PADDING: 4859 if (rb_null_event(event)) { 4860 rb_inc_iter(iter); 4861 goto again; 4862 } 4863 rb_advance_iter(iter); 4864 return event; 4865 4866 case RINGBUF_TYPE_TIME_EXTEND: 4867 /* Internal data, OK to advance */ 4868 rb_advance_iter(iter); 4869 goto again; 4870 4871 case RINGBUF_TYPE_TIME_STAMP: 4872 if (ts) { 4873 *ts = rb_event_time_stamp(event); 4874 *ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp); 4875 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 4876 cpu_buffer->cpu, ts); 4877 } 4878 /* Internal data, OK to advance */ 4879 rb_advance_iter(iter); 4880 goto again; 4881 4882 case RINGBUF_TYPE_DATA: 4883 if (ts && !(*ts)) { 4884 *ts = iter->read_stamp + event->time_delta; 4885 ring_buffer_normalize_time_stamp(buffer, 4886 cpu_buffer->cpu, ts); 4887 } 4888 return event; 4889 4890 default: 4891 RB_WARN_ON(cpu_buffer, 1); 4892 } 4893 4894 return NULL; 4895 } 4896 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); 4897 4898 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) 4899 { 4900 if (likely(!in_nmi())) { 4901 raw_spin_lock(&cpu_buffer->reader_lock); 4902 return true; 4903 } 4904 4905 /* 4906 * If an NMI die dumps out the content of the ring buffer 4907 * trylock must be used to prevent a deadlock if the NMI 4908 * preempted a task that holds the ring buffer locks. If 4909 * we get the lock then all is fine, if not, then continue 4910 * to do the read, but this can corrupt the ring buffer, 4911 * so it must be permanently disabled from future writes. 4912 * Reading from NMI is a oneshot deal. 4913 */ 4914 if (raw_spin_trylock(&cpu_buffer->reader_lock)) 4915 return true; 4916 4917 /* Continue without locking, but disable the ring buffer */ 4918 atomic_inc(&cpu_buffer->record_disabled); 4919 return false; 4920 } 4921 4922 static inline void 4923 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) 4924 { 4925 if (likely(locked)) 4926 raw_spin_unlock(&cpu_buffer->reader_lock); 4927 return; 4928 } 4929 4930 /** 4931 * ring_buffer_peek - peek at the next event to be read 4932 * @buffer: The ring buffer to read 4933 * @cpu: The cpu to peak at 4934 * @ts: The timestamp counter of this event. 4935 * @lost_events: a variable to store if events were lost (may be NULL) 4936 * 4937 * This will return the event that will be read next, but does 4938 * not consume the data. 4939 */ 4940 struct ring_buffer_event * 4941 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, 4942 unsigned long *lost_events) 4943 { 4944 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 4945 struct ring_buffer_event *event; 4946 unsigned long flags; 4947 bool dolock; 4948 4949 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4950 return NULL; 4951 4952 again: 4953 local_irq_save(flags); 4954 dolock = rb_reader_lock(cpu_buffer); 4955 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 4956 if (event && event->type_len == RINGBUF_TYPE_PADDING) 4957 rb_advance_reader(cpu_buffer); 4958 rb_reader_unlock(cpu_buffer, dolock); 4959 local_irq_restore(flags); 4960 4961 if (event && event->type_len == RINGBUF_TYPE_PADDING) 4962 goto again; 4963 4964 return event; 4965 } 4966 4967 /** ring_buffer_iter_dropped - report if there are dropped events 4968 * @iter: The ring buffer iterator 4969 * 4970 * Returns true if there was dropped events since the last peek. 4971 */ 4972 bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter) 4973 { 4974 bool ret = iter->missed_events != 0; 4975 4976 iter->missed_events = 0; 4977 return ret; 4978 } 4979 EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped); 4980 4981 /** 4982 * ring_buffer_iter_peek - peek at the next event to be read 4983 * @iter: The ring buffer iterator 4984 * @ts: The timestamp counter of this event. 4985 * 4986 * This will return the event that will be read next, but does 4987 * not increment the iterator. 4988 */ 4989 struct ring_buffer_event * 4990 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 4991 { 4992 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 4993 struct ring_buffer_event *event; 4994 unsigned long flags; 4995 4996 again: 4997 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4998 event = rb_iter_peek(iter, ts); 4999 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5000 5001 if (event && event->type_len == RINGBUF_TYPE_PADDING) 5002 goto again; 5003 5004 return event; 5005 } 5006 5007 /** 5008 * ring_buffer_consume - return an event and consume it 5009 * @buffer: The ring buffer to get the next event from 5010 * @cpu: the cpu to read the buffer from 5011 * @ts: a variable to store the timestamp (may be NULL) 5012 * @lost_events: a variable to store if events were lost (may be NULL) 5013 * 5014 * Returns the next event in the ring buffer, and that event is consumed. 5015 * Meaning, that sequential reads will keep returning a different event, 5016 * and eventually empty the ring buffer if the producer is slower. 5017 */ 5018 struct ring_buffer_event * 5019 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, 5020 unsigned long *lost_events) 5021 { 5022 struct ring_buffer_per_cpu *cpu_buffer; 5023 struct ring_buffer_event *event = NULL; 5024 unsigned long flags; 5025 bool dolock; 5026 5027 again: 5028 /* might be called in atomic */ 5029 preempt_disable(); 5030 5031 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5032 goto out; 5033 5034 cpu_buffer = buffer->buffers[cpu]; 5035 local_irq_save(flags); 5036 dolock = rb_reader_lock(cpu_buffer); 5037 5038 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 5039 if (event) { 5040 cpu_buffer->lost_events = 0; 5041 rb_advance_reader(cpu_buffer); 5042 } 5043 5044 rb_reader_unlock(cpu_buffer, dolock); 5045 local_irq_restore(flags); 5046 5047 out: 5048 preempt_enable(); 5049 5050 if (event && event->type_len == RINGBUF_TYPE_PADDING) 5051 goto again; 5052 5053 return event; 5054 } 5055 EXPORT_SYMBOL_GPL(ring_buffer_consume); 5056 5057 /** 5058 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer 5059 * @buffer: The ring buffer to read from 5060 * @cpu: The cpu buffer to iterate over 5061 * @flags: gfp flags to use for memory allocation 5062 * 5063 * This performs the initial preparations necessary to iterate 5064 * through the buffer. Memory is allocated, buffer recording 5065 * is disabled, and the iterator pointer is returned to the caller. 5066 * 5067 * Disabling buffer recording prevents the reading from being 5068 * corrupted. This is not a consuming read, so a producer is not 5069 * expected. 5070 * 5071 * After a sequence of ring_buffer_read_prepare calls, the user is 5072 * expected to make at least one call to ring_buffer_read_prepare_sync. 5073 * Afterwards, ring_buffer_read_start is invoked to get things going 5074 * for real. 5075 * 5076 * This overall must be paired with ring_buffer_read_finish. 5077 */ 5078 struct ring_buffer_iter * 5079 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags) 5080 { 5081 struct ring_buffer_per_cpu *cpu_buffer; 5082 struct ring_buffer_iter *iter; 5083 5084 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5085 return NULL; 5086 5087 iter = kzalloc(sizeof(*iter), flags); 5088 if (!iter) 5089 return NULL; 5090 5091 iter->event = kmalloc(BUF_MAX_DATA_SIZE, flags); 5092 if (!iter->event) { 5093 kfree(iter); 5094 return NULL; 5095 } 5096 5097 cpu_buffer = buffer->buffers[cpu]; 5098 5099 iter->cpu_buffer = cpu_buffer; 5100 5101 atomic_inc(&cpu_buffer->resize_disabled); 5102 5103 return iter; 5104 } 5105 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare); 5106 5107 /** 5108 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls 5109 * 5110 * All previously invoked ring_buffer_read_prepare calls to prepare 5111 * iterators will be synchronized. Afterwards, read_buffer_read_start 5112 * calls on those iterators are allowed. 5113 */ 5114 void 5115 ring_buffer_read_prepare_sync(void) 5116 { 5117 synchronize_rcu(); 5118 } 5119 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); 5120 5121 /** 5122 * ring_buffer_read_start - start a non consuming read of the buffer 5123 * @iter: The iterator returned by ring_buffer_read_prepare 5124 * 5125 * This finalizes the startup of an iteration through the buffer. 5126 * The iterator comes from a call to ring_buffer_read_prepare and 5127 * an intervening ring_buffer_read_prepare_sync must have been 5128 * performed. 5129 * 5130 * Must be paired with ring_buffer_read_finish. 5131 */ 5132 void 5133 ring_buffer_read_start(struct ring_buffer_iter *iter) 5134 { 5135 struct ring_buffer_per_cpu *cpu_buffer; 5136 unsigned long flags; 5137 5138 if (!iter) 5139 return; 5140 5141 cpu_buffer = iter->cpu_buffer; 5142 5143 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5144 arch_spin_lock(&cpu_buffer->lock); 5145 rb_iter_reset(iter); 5146 arch_spin_unlock(&cpu_buffer->lock); 5147 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5148 } 5149 EXPORT_SYMBOL_GPL(ring_buffer_read_start); 5150 5151 /** 5152 * ring_buffer_read_finish - finish reading the iterator of the buffer 5153 * @iter: The iterator retrieved by ring_buffer_start 5154 * 5155 * This re-enables the recording to the buffer, and frees the 5156 * iterator. 5157 */ 5158 void 5159 ring_buffer_read_finish(struct ring_buffer_iter *iter) 5160 { 5161 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5162 unsigned long flags; 5163 5164 /* 5165 * Ring buffer is disabled from recording, here's a good place 5166 * to check the integrity of the ring buffer. 5167 * Must prevent readers from trying to read, as the check 5168 * clears the HEAD page and readers require it. 5169 */ 5170 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5171 rb_check_pages(cpu_buffer); 5172 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5173 5174 atomic_dec(&cpu_buffer->resize_disabled); 5175 kfree(iter->event); 5176 kfree(iter); 5177 } 5178 EXPORT_SYMBOL_GPL(ring_buffer_read_finish); 5179 5180 /** 5181 * ring_buffer_iter_advance - advance the iterator to the next location 5182 * @iter: The ring buffer iterator 5183 * 5184 * Move the location of the iterator such that the next read will 5185 * be the next location of the iterator. 5186 */ 5187 void ring_buffer_iter_advance(struct ring_buffer_iter *iter) 5188 { 5189 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5190 unsigned long flags; 5191 5192 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5193 5194 rb_advance_iter(iter); 5195 5196 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5197 } 5198 EXPORT_SYMBOL_GPL(ring_buffer_iter_advance); 5199 5200 /** 5201 * ring_buffer_size - return the size of the ring buffer (in bytes) 5202 * @buffer: The ring buffer. 5203 * @cpu: The CPU to get ring buffer size from. 5204 */ 5205 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu) 5206 { 5207 /* 5208 * Earlier, this method returned 5209 * BUF_PAGE_SIZE * buffer->nr_pages 5210 * Since the nr_pages field is now removed, we have converted this to 5211 * return the per cpu buffer value. 5212 */ 5213 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5214 return 0; 5215 5216 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; 5217 } 5218 EXPORT_SYMBOL_GPL(ring_buffer_size); 5219 5220 static void 5221 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 5222 { 5223 rb_head_page_deactivate(cpu_buffer); 5224 5225 cpu_buffer->head_page 5226 = list_entry(cpu_buffer->pages, struct buffer_page, list); 5227 local_set(&cpu_buffer->head_page->write, 0); 5228 local_set(&cpu_buffer->head_page->entries, 0); 5229 local_set(&cpu_buffer->head_page->page->commit, 0); 5230 5231 cpu_buffer->head_page->read = 0; 5232 5233 cpu_buffer->tail_page = cpu_buffer->head_page; 5234 cpu_buffer->commit_page = cpu_buffer->head_page; 5235 5236 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 5237 INIT_LIST_HEAD(&cpu_buffer->new_pages); 5238 local_set(&cpu_buffer->reader_page->write, 0); 5239 local_set(&cpu_buffer->reader_page->entries, 0); 5240 local_set(&cpu_buffer->reader_page->page->commit, 0); 5241 cpu_buffer->reader_page->read = 0; 5242 5243 local_set(&cpu_buffer->entries_bytes, 0); 5244 local_set(&cpu_buffer->overrun, 0); 5245 local_set(&cpu_buffer->commit_overrun, 0); 5246 local_set(&cpu_buffer->dropped_events, 0); 5247 local_set(&cpu_buffer->entries, 0); 5248 local_set(&cpu_buffer->committing, 0); 5249 local_set(&cpu_buffer->commits, 0); 5250 local_set(&cpu_buffer->pages_touched, 0); 5251 local_set(&cpu_buffer->pages_read, 0); 5252 cpu_buffer->last_pages_touch = 0; 5253 cpu_buffer->shortest_full = 0; 5254 cpu_buffer->read = 0; 5255 cpu_buffer->read_bytes = 0; 5256 5257 rb_time_set(&cpu_buffer->write_stamp, 0); 5258 rb_time_set(&cpu_buffer->before_stamp, 0); 5259 5260 memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp)); 5261 5262 cpu_buffer->lost_events = 0; 5263 cpu_buffer->last_overrun = 0; 5264 5265 rb_head_page_activate(cpu_buffer); 5266 } 5267 5268 /* Must have disabled the cpu buffer then done a synchronize_rcu */ 5269 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 5270 { 5271 unsigned long flags; 5272 5273 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5274 5275 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 5276 goto out; 5277 5278 arch_spin_lock(&cpu_buffer->lock); 5279 5280 rb_reset_cpu(cpu_buffer); 5281 5282 arch_spin_unlock(&cpu_buffer->lock); 5283 5284 out: 5285 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5286 } 5287 5288 /** 5289 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer 5290 * @buffer: The ring buffer to reset a per cpu buffer of 5291 * @cpu: The CPU buffer to be reset 5292 */ 5293 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu) 5294 { 5295 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 5296 5297 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5298 return; 5299 5300 /* prevent another thread from changing buffer sizes */ 5301 mutex_lock(&buffer->mutex); 5302 5303 atomic_inc(&cpu_buffer->resize_disabled); 5304 atomic_inc(&cpu_buffer->record_disabled); 5305 5306 /* Make sure all commits have finished */ 5307 synchronize_rcu(); 5308 5309 reset_disabled_cpu_buffer(cpu_buffer); 5310 5311 atomic_dec(&cpu_buffer->record_disabled); 5312 atomic_dec(&cpu_buffer->resize_disabled); 5313 5314 mutex_unlock(&buffer->mutex); 5315 } 5316 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); 5317 5318 /** 5319 * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer 5320 * @buffer: The ring buffer to reset a per cpu buffer of 5321 * @cpu: The CPU buffer to be reset 5322 */ 5323 void ring_buffer_reset_online_cpus(struct trace_buffer *buffer) 5324 { 5325 struct ring_buffer_per_cpu *cpu_buffer; 5326 int cpu; 5327 5328 /* prevent another thread from changing buffer sizes */ 5329 mutex_lock(&buffer->mutex); 5330 5331 for_each_online_buffer_cpu(buffer, cpu) { 5332 cpu_buffer = buffer->buffers[cpu]; 5333 5334 atomic_inc(&cpu_buffer->resize_disabled); 5335 atomic_inc(&cpu_buffer->record_disabled); 5336 } 5337 5338 /* Make sure all commits have finished */ 5339 synchronize_rcu(); 5340 5341 for_each_online_buffer_cpu(buffer, cpu) { 5342 cpu_buffer = buffer->buffers[cpu]; 5343 5344 reset_disabled_cpu_buffer(cpu_buffer); 5345 5346 atomic_dec(&cpu_buffer->record_disabled); 5347 atomic_dec(&cpu_buffer->resize_disabled); 5348 } 5349 5350 mutex_unlock(&buffer->mutex); 5351 } 5352 5353 /** 5354 * ring_buffer_reset - reset a ring buffer 5355 * @buffer: The ring buffer to reset all cpu buffers 5356 */ 5357 void ring_buffer_reset(struct trace_buffer *buffer) 5358 { 5359 struct ring_buffer_per_cpu *cpu_buffer; 5360 int cpu; 5361 5362 /* prevent another thread from changing buffer sizes */ 5363 mutex_lock(&buffer->mutex); 5364 5365 for_each_buffer_cpu(buffer, cpu) { 5366 cpu_buffer = buffer->buffers[cpu]; 5367 5368 atomic_inc(&cpu_buffer->resize_disabled); 5369 atomic_inc(&cpu_buffer->record_disabled); 5370 } 5371 5372 /* Make sure all commits have finished */ 5373 synchronize_rcu(); 5374 5375 for_each_buffer_cpu(buffer, cpu) { 5376 cpu_buffer = buffer->buffers[cpu]; 5377 5378 reset_disabled_cpu_buffer(cpu_buffer); 5379 5380 atomic_dec(&cpu_buffer->record_disabled); 5381 atomic_dec(&cpu_buffer->resize_disabled); 5382 } 5383 5384 mutex_unlock(&buffer->mutex); 5385 } 5386 EXPORT_SYMBOL_GPL(ring_buffer_reset); 5387 5388 /** 5389 * ring_buffer_empty - is the ring buffer empty? 5390 * @buffer: The ring buffer to test 5391 */ 5392 bool ring_buffer_empty(struct trace_buffer *buffer) 5393 { 5394 struct ring_buffer_per_cpu *cpu_buffer; 5395 unsigned long flags; 5396 bool dolock; 5397 int cpu; 5398 int ret; 5399 5400 /* yes this is racy, but if you don't like the race, lock the buffer */ 5401 for_each_buffer_cpu(buffer, cpu) { 5402 cpu_buffer = buffer->buffers[cpu]; 5403 local_irq_save(flags); 5404 dolock = rb_reader_lock(cpu_buffer); 5405 ret = rb_per_cpu_empty(cpu_buffer); 5406 rb_reader_unlock(cpu_buffer, dolock); 5407 local_irq_restore(flags); 5408 5409 if (!ret) 5410 return false; 5411 } 5412 5413 return true; 5414 } 5415 EXPORT_SYMBOL_GPL(ring_buffer_empty); 5416 5417 /** 5418 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? 5419 * @buffer: The ring buffer 5420 * @cpu: The CPU buffer to test 5421 */ 5422 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu) 5423 { 5424 struct ring_buffer_per_cpu *cpu_buffer; 5425 unsigned long flags; 5426 bool dolock; 5427 int ret; 5428 5429 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5430 return true; 5431 5432 cpu_buffer = buffer->buffers[cpu]; 5433 local_irq_save(flags); 5434 dolock = rb_reader_lock(cpu_buffer); 5435 ret = rb_per_cpu_empty(cpu_buffer); 5436 rb_reader_unlock(cpu_buffer, dolock); 5437 local_irq_restore(flags); 5438 5439 return ret; 5440 } 5441 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); 5442 5443 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 5444 /** 5445 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers 5446 * @buffer_a: One buffer to swap with 5447 * @buffer_b: The other buffer to swap with 5448 * @cpu: the CPU of the buffers to swap 5449 * 5450 * This function is useful for tracers that want to take a "snapshot" 5451 * of a CPU buffer and has another back up buffer lying around. 5452 * it is expected that the tracer handles the cpu buffer not being 5453 * used at the moment. 5454 */ 5455 int ring_buffer_swap_cpu(struct trace_buffer *buffer_a, 5456 struct trace_buffer *buffer_b, int cpu) 5457 { 5458 struct ring_buffer_per_cpu *cpu_buffer_a; 5459 struct ring_buffer_per_cpu *cpu_buffer_b; 5460 int ret = -EINVAL; 5461 5462 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || 5463 !cpumask_test_cpu(cpu, buffer_b->cpumask)) 5464 goto out; 5465 5466 cpu_buffer_a = buffer_a->buffers[cpu]; 5467 cpu_buffer_b = buffer_b->buffers[cpu]; 5468 5469 /* At least make sure the two buffers are somewhat the same */ 5470 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages) 5471 goto out; 5472 5473 ret = -EAGAIN; 5474 5475 if (atomic_read(&buffer_a->record_disabled)) 5476 goto out; 5477 5478 if (atomic_read(&buffer_b->record_disabled)) 5479 goto out; 5480 5481 if (atomic_read(&cpu_buffer_a->record_disabled)) 5482 goto out; 5483 5484 if (atomic_read(&cpu_buffer_b->record_disabled)) 5485 goto out; 5486 5487 /* 5488 * We can't do a synchronize_rcu here because this 5489 * function can be called in atomic context. 5490 * Normally this will be called from the same CPU as cpu. 5491 * If not it's up to the caller to protect this. 5492 */ 5493 atomic_inc(&cpu_buffer_a->record_disabled); 5494 atomic_inc(&cpu_buffer_b->record_disabled); 5495 5496 ret = -EBUSY; 5497 if (local_read(&cpu_buffer_a->committing)) 5498 goto out_dec; 5499 if (local_read(&cpu_buffer_b->committing)) 5500 goto out_dec; 5501 5502 buffer_a->buffers[cpu] = cpu_buffer_b; 5503 buffer_b->buffers[cpu] = cpu_buffer_a; 5504 5505 cpu_buffer_b->buffer = buffer_a; 5506 cpu_buffer_a->buffer = buffer_b; 5507 5508 ret = 0; 5509 5510 out_dec: 5511 atomic_dec(&cpu_buffer_a->record_disabled); 5512 atomic_dec(&cpu_buffer_b->record_disabled); 5513 out: 5514 return ret; 5515 } 5516 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); 5517 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */ 5518 5519 /** 5520 * ring_buffer_alloc_read_page - allocate a page to read from buffer 5521 * @buffer: the buffer to allocate for. 5522 * @cpu: the cpu buffer to allocate. 5523 * 5524 * This function is used in conjunction with ring_buffer_read_page. 5525 * When reading a full page from the ring buffer, these functions 5526 * can be used to speed up the process. The calling function should 5527 * allocate a few pages first with this function. Then when it 5528 * needs to get pages from the ring buffer, it passes the result 5529 * of this function into ring_buffer_read_page, which will swap 5530 * the page that was allocated, with the read page of the buffer. 5531 * 5532 * Returns: 5533 * The page allocated, or ERR_PTR 5534 */ 5535 void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu) 5536 { 5537 struct ring_buffer_per_cpu *cpu_buffer; 5538 struct buffer_data_page *bpage = NULL; 5539 unsigned long flags; 5540 struct page *page; 5541 5542 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5543 return ERR_PTR(-ENODEV); 5544 5545 cpu_buffer = buffer->buffers[cpu]; 5546 local_irq_save(flags); 5547 arch_spin_lock(&cpu_buffer->lock); 5548 5549 if (cpu_buffer->free_page) { 5550 bpage = cpu_buffer->free_page; 5551 cpu_buffer->free_page = NULL; 5552 } 5553 5554 arch_spin_unlock(&cpu_buffer->lock); 5555 local_irq_restore(flags); 5556 5557 if (bpage) 5558 goto out; 5559 5560 page = alloc_pages_node(cpu_to_node(cpu), 5561 GFP_KERNEL | __GFP_NORETRY, 0); 5562 if (!page) 5563 return ERR_PTR(-ENOMEM); 5564 5565 bpage = page_address(page); 5566 5567 out: 5568 rb_init_page(bpage); 5569 5570 return bpage; 5571 } 5572 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page); 5573 5574 /** 5575 * ring_buffer_free_read_page - free an allocated read page 5576 * @buffer: the buffer the page was allocate for 5577 * @cpu: the cpu buffer the page came from 5578 * @data: the page to free 5579 * 5580 * Free a page allocated from ring_buffer_alloc_read_page. 5581 */ 5582 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data) 5583 { 5584 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 5585 struct buffer_data_page *bpage = data; 5586 struct page *page = virt_to_page(bpage); 5587 unsigned long flags; 5588 5589 /* If the page is still in use someplace else, we can't reuse it */ 5590 if (page_ref_count(page) > 1) 5591 goto out; 5592 5593 local_irq_save(flags); 5594 arch_spin_lock(&cpu_buffer->lock); 5595 5596 if (!cpu_buffer->free_page) { 5597 cpu_buffer->free_page = bpage; 5598 bpage = NULL; 5599 } 5600 5601 arch_spin_unlock(&cpu_buffer->lock); 5602 local_irq_restore(flags); 5603 5604 out: 5605 free_page((unsigned long)bpage); 5606 } 5607 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); 5608 5609 /** 5610 * ring_buffer_read_page - extract a page from the ring buffer 5611 * @buffer: buffer to extract from 5612 * @data_page: the page to use allocated from ring_buffer_alloc_read_page 5613 * @len: amount to extract 5614 * @cpu: the cpu of the buffer to extract 5615 * @full: should the extraction only happen when the page is full. 5616 * 5617 * This function will pull out a page from the ring buffer and consume it. 5618 * @data_page must be the address of the variable that was returned 5619 * from ring_buffer_alloc_read_page. This is because the page might be used 5620 * to swap with a page in the ring buffer. 5621 * 5622 * for example: 5623 * rpage = ring_buffer_alloc_read_page(buffer, cpu); 5624 * if (IS_ERR(rpage)) 5625 * return PTR_ERR(rpage); 5626 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); 5627 * if (ret >= 0) 5628 * process_page(rpage, ret); 5629 * 5630 * When @full is set, the function will not return true unless 5631 * the writer is off the reader page. 5632 * 5633 * Note: it is up to the calling functions to handle sleeps and wakeups. 5634 * The ring buffer can be used anywhere in the kernel and can not 5635 * blindly call wake_up. The layer that uses the ring buffer must be 5636 * responsible for that. 5637 * 5638 * Returns: 5639 * >=0 if data has been transferred, returns the offset of consumed data. 5640 * <0 if no data has been transferred. 5641 */ 5642 int ring_buffer_read_page(struct trace_buffer *buffer, 5643 void **data_page, size_t len, int cpu, int full) 5644 { 5645 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 5646 struct ring_buffer_event *event; 5647 struct buffer_data_page *bpage; 5648 struct buffer_page *reader; 5649 unsigned long missed_events; 5650 unsigned long flags; 5651 unsigned int commit; 5652 unsigned int read; 5653 u64 save_timestamp; 5654 int ret = -1; 5655 5656 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5657 goto out; 5658 5659 /* 5660 * If len is not big enough to hold the page header, then 5661 * we can not copy anything. 5662 */ 5663 if (len <= BUF_PAGE_HDR_SIZE) 5664 goto out; 5665 5666 len -= BUF_PAGE_HDR_SIZE; 5667 5668 if (!data_page) 5669 goto out; 5670 5671 bpage = *data_page; 5672 if (!bpage) 5673 goto out; 5674 5675 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5676 5677 reader = rb_get_reader_page(cpu_buffer); 5678 if (!reader) 5679 goto out_unlock; 5680 5681 event = rb_reader_event(cpu_buffer); 5682 5683 read = reader->read; 5684 commit = rb_page_commit(reader); 5685 5686 /* Check if any events were dropped */ 5687 missed_events = cpu_buffer->lost_events; 5688 5689 /* 5690 * If this page has been partially read or 5691 * if len is not big enough to read the rest of the page or 5692 * a writer is still on the page, then 5693 * we must copy the data from the page to the buffer. 5694 * Otherwise, we can simply swap the page with the one passed in. 5695 */ 5696 if (read || (len < (commit - read)) || 5697 cpu_buffer->reader_page == cpu_buffer->commit_page) { 5698 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; 5699 unsigned int rpos = read; 5700 unsigned int pos = 0; 5701 unsigned int size; 5702 5703 /* 5704 * If a full page is expected, this can still be returned 5705 * if there's been a previous partial read and the 5706 * rest of the page can be read and the commit page is off 5707 * the reader page. 5708 */ 5709 if (full && 5710 (!read || (len < (commit - read)) || 5711 cpu_buffer->reader_page == cpu_buffer->commit_page)) 5712 goto out_unlock; 5713 5714 if (len > (commit - read)) 5715 len = (commit - read); 5716 5717 /* Always keep the time extend and data together */ 5718 size = rb_event_ts_length(event); 5719 5720 if (len < size) 5721 goto out_unlock; 5722 5723 /* save the current timestamp, since the user will need it */ 5724 save_timestamp = cpu_buffer->read_stamp; 5725 5726 /* Need to copy one event at a time */ 5727 do { 5728 /* We need the size of one event, because 5729 * rb_advance_reader only advances by one event, 5730 * whereas rb_event_ts_length may include the size of 5731 * one or two events. 5732 * We have already ensured there's enough space if this 5733 * is a time extend. */ 5734 size = rb_event_length(event); 5735 memcpy(bpage->data + pos, rpage->data + rpos, size); 5736 5737 len -= size; 5738 5739 rb_advance_reader(cpu_buffer); 5740 rpos = reader->read; 5741 pos += size; 5742 5743 if (rpos >= commit) 5744 break; 5745 5746 event = rb_reader_event(cpu_buffer); 5747 /* Always keep the time extend and data together */ 5748 size = rb_event_ts_length(event); 5749 } while (len >= size); 5750 5751 /* update bpage */ 5752 local_set(&bpage->commit, pos); 5753 bpage->time_stamp = save_timestamp; 5754 5755 /* we copied everything to the beginning */ 5756 read = 0; 5757 } else { 5758 /* update the entry counter */ 5759 cpu_buffer->read += rb_page_entries(reader); 5760 cpu_buffer->read_bytes += BUF_PAGE_SIZE; 5761 5762 /* swap the pages */ 5763 rb_init_page(bpage); 5764 bpage = reader->page; 5765 reader->page = *data_page; 5766 local_set(&reader->write, 0); 5767 local_set(&reader->entries, 0); 5768 reader->read = 0; 5769 *data_page = bpage; 5770 5771 /* 5772 * Use the real_end for the data size, 5773 * This gives us a chance to store the lost events 5774 * on the page. 5775 */ 5776 if (reader->real_end) 5777 local_set(&bpage->commit, reader->real_end); 5778 } 5779 ret = read; 5780 5781 cpu_buffer->lost_events = 0; 5782 5783 commit = local_read(&bpage->commit); 5784 /* 5785 * Set a flag in the commit field if we lost events 5786 */ 5787 if (missed_events) { 5788 /* If there is room at the end of the page to save the 5789 * missed events, then record it there. 5790 */ 5791 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) { 5792 memcpy(&bpage->data[commit], &missed_events, 5793 sizeof(missed_events)); 5794 local_add(RB_MISSED_STORED, &bpage->commit); 5795 commit += sizeof(missed_events); 5796 } 5797 local_add(RB_MISSED_EVENTS, &bpage->commit); 5798 } 5799 5800 /* 5801 * This page may be off to user land. Zero it out here. 5802 */ 5803 if (commit < BUF_PAGE_SIZE) 5804 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); 5805 5806 out_unlock: 5807 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5808 5809 out: 5810 return ret; 5811 } 5812 EXPORT_SYMBOL_GPL(ring_buffer_read_page); 5813 5814 /* 5815 * We only allocate new buffers, never free them if the CPU goes down. 5816 * If we were to free the buffer, then the user would lose any trace that was in 5817 * the buffer. 5818 */ 5819 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node) 5820 { 5821 struct trace_buffer *buffer; 5822 long nr_pages_same; 5823 int cpu_i; 5824 unsigned long nr_pages; 5825 5826 buffer = container_of(node, struct trace_buffer, node); 5827 if (cpumask_test_cpu(cpu, buffer->cpumask)) 5828 return 0; 5829 5830 nr_pages = 0; 5831 nr_pages_same = 1; 5832 /* check if all cpu sizes are same */ 5833 for_each_buffer_cpu(buffer, cpu_i) { 5834 /* fill in the size from first enabled cpu */ 5835 if (nr_pages == 0) 5836 nr_pages = buffer->buffers[cpu_i]->nr_pages; 5837 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { 5838 nr_pages_same = 0; 5839 break; 5840 } 5841 } 5842 /* allocate minimum pages, user can later expand it */ 5843 if (!nr_pages_same) 5844 nr_pages = 2; 5845 buffer->buffers[cpu] = 5846 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 5847 if (!buffer->buffers[cpu]) { 5848 WARN(1, "failed to allocate ring buffer on CPU %u\n", 5849 cpu); 5850 return -ENOMEM; 5851 } 5852 smp_wmb(); 5853 cpumask_set_cpu(cpu, buffer->cpumask); 5854 return 0; 5855 } 5856 5857 #ifdef CONFIG_RING_BUFFER_STARTUP_TEST 5858 /* 5859 * This is a basic integrity check of the ring buffer. 5860 * Late in the boot cycle this test will run when configured in. 5861 * It will kick off a thread per CPU that will go into a loop 5862 * writing to the per cpu ring buffer various sizes of data. 5863 * Some of the data will be large items, some small. 5864 * 5865 * Another thread is created that goes into a spin, sending out 5866 * IPIs to the other CPUs to also write into the ring buffer. 5867 * this is to test the nesting ability of the buffer. 5868 * 5869 * Basic stats are recorded and reported. If something in the 5870 * ring buffer should happen that's not expected, a big warning 5871 * is displayed and all ring buffers are disabled. 5872 */ 5873 static struct task_struct *rb_threads[NR_CPUS] __initdata; 5874 5875 struct rb_test_data { 5876 struct trace_buffer *buffer; 5877 unsigned long events; 5878 unsigned long bytes_written; 5879 unsigned long bytes_alloc; 5880 unsigned long bytes_dropped; 5881 unsigned long events_nested; 5882 unsigned long bytes_written_nested; 5883 unsigned long bytes_alloc_nested; 5884 unsigned long bytes_dropped_nested; 5885 int min_size_nested; 5886 int max_size_nested; 5887 int max_size; 5888 int min_size; 5889 int cpu; 5890 int cnt; 5891 }; 5892 5893 static struct rb_test_data rb_data[NR_CPUS] __initdata; 5894 5895 /* 1 meg per cpu */ 5896 #define RB_TEST_BUFFER_SIZE 1048576 5897 5898 static char rb_string[] __initdata = 5899 "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\" 5900 "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890" 5901 "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv"; 5902 5903 static bool rb_test_started __initdata; 5904 5905 struct rb_item { 5906 int size; 5907 char str[]; 5908 }; 5909 5910 static __init int rb_write_something(struct rb_test_data *data, bool nested) 5911 { 5912 struct ring_buffer_event *event; 5913 struct rb_item *item; 5914 bool started; 5915 int event_len; 5916 int size; 5917 int len; 5918 int cnt; 5919 5920 /* Have nested writes different that what is written */ 5921 cnt = data->cnt + (nested ? 27 : 0); 5922 5923 /* Multiply cnt by ~e, to make some unique increment */ 5924 size = (cnt * 68 / 25) % (sizeof(rb_string) - 1); 5925 5926 len = size + sizeof(struct rb_item); 5927 5928 started = rb_test_started; 5929 /* read rb_test_started before checking buffer enabled */ 5930 smp_rmb(); 5931 5932 event = ring_buffer_lock_reserve(data->buffer, len); 5933 if (!event) { 5934 /* Ignore dropped events before test starts. */ 5935 if (started) { 5936 if (nested) 5937 data->bytes_dropped += len; 5938 else 5939 data->bytes_dropped_nested += len; 5940 } 5941 return len; 5942 } 5943 5944 event_len = ring_buffer_event_length(event); 5945 5946 if (RB_WARN_ON(data->buffer, event_len < len)) 5947 goto out; 5948 5949 item = ring_buffer_event_data(event); 5950 item->size = size; 5951 memcpy(item->str, rb_string, size); 5952 5953 if (nested) { 5954 data->bytes_alloc_nested += event_len; 5955 data->bytes_written_nested += len; 5956 data->events_nested++; 5957 if (!data->min_size_nested || len < data->min_size_nested) 5958 data->min_size_nested = len; 5959 if (len > data->max_size_nested) 5960 data->max_size_nested = len; 5961 } else { 5962 data->bytes_alloc += event_len; 5963 data->bytes_written += len; 5964 data->events++; 5965 if (!data->min_size || len < data->min_size) 5966 data->max_size = len; 5967 if (len > data->max_size) 5968 data->max_size = len; 5969 } 5970 5971 out: 5972 ring_buffer_unlock_commit(data->buffer, event); 5973 5974 return 0; 5975 } 5976 5977 static __init int rb_test(void *arg) 5978 { 5979 struct rb_test_data *data = arg; 5980 5981 while (!kthread_should_stop()) { 5982 rb_write_something(data, false); 5983 data->cnt++; 5984 5985 set_current_state(TASK_INTERRUPTIBLE); 5986 /* Now sleep between a min of 100-300us and a max of 1ms */ 5987 usleep_range(((data->cnt % 3) + 1) * 100, 1000); 5988 } 5989 5990 return 0; 5991 } 5992 5993 static __init void rb_ipi(void *ignore) 5994 { 5995 struct rb_test_data *data; 5996 int cpu = smp_processor_id(); 5997 5998 data = &rb_data[cpu]; 5999 rb_write_something(data, true); 6000 } 6001 6002 static __init int rb_hammer_test(void *arg) 6003 { 6004 while (!kthread_should_stop()) { 6005 6006 /* Send an IPI to all cpus to write data! */ 6007 smp_call_function(rb_ipi, NULL, 1); 6008 /* No sleep, but for non preempt, let others run */ 6009 schedule(); 6010 } 6011 6012 return 0; 6013 } 6014 6015 static __init int test_ringbuffer(void) 6016 { 6017 struct task_struct *rb_hammer; 6018 struct trace_buffer *buffer; 6019 int cpu; 6020 int ret = 0; 6021 6022 if (security_locked_down(LOCKDOWN_TRACEFS)) { 6023 pr_warn("Lockdown is enabled, skipping ring buffer tests\n"); 6024 return 0; 6025 } 6026 6027 pr_info("Running ring buffer tests...\n"); 6028 6029 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE); 6030 if (WARN_ON(!buffer)) 6031 return 0; 6032 6033 /* Disable buffer so that threads can't write to it yet */ 6034 ring_buffer_record_off(buffer); 6035 6036 for_each_online_cpu(cpu) { 6037 rb_data[cpu].buffer = buffer; 6038 rb_data[cpu].cpu = cpu; 6039 rb_data[cpu].cnt = cpu; 6040 rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu], 6041 cpu, "rbtester/%u"); 6042 if (WARN_ON(IS_ERR(rb_threads[cpu]))) { 6043 pr_cont("FAILED\n"); 6044 ret = PTR_ERR(rb_threads[cpu]); 6045 goto out_free; 6046 } 6047 } 6048 6049 /* Now create the rb hammer! */ 6050 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer"); 6051 if (WARN_ON(IS_ERR(rb_hammer))) { 6052 pr_cont("FAILED\n"); 6053 ret = PTR_ERR(rb_hammer); 6054 goto out_free; 6055 } 6056 6057 ring_buffer_record_on(buffer); 6058 /* 6059 * Show buffer is enabled before setting rb_test_started. 6060 * Yes there's a small race window where events could be 6061 * dropped and the thread wont catch it. But when a ring 6062 * buffer gets enabled, there will always be some kind of 6063 * delay before other CPUs see it. Thus, we don't care about 6064 * those dropped events. We care about events dropped after 6065 * the threads see that the buffer is active. 6066 */ 6067 smp_wmb(); 6068 rb_test_started = true; 6069 6070 set_current_state(TASK_INTERRUPTIBLE); 6071 /* Just run for 10 seconds */; 6072 schedule_timeout(10 * HZ); 6073 6074 kthread_stop(rb_hammer); 6075 6076 out_free: 6077 for_each_online_cpu(cpu) { 6078 if (!rb_threads[cpu]) 6079 break; 6080 kthread_stop(rb_threads[cpu]); 6081 } 6082 if (ret) { 6083 ring_buffer_free(buffer); 6084 return ret; 6085 } 6086 6087 /* Report! */ 6088 pr_info("finished\n"); 6089 for_each_online_cpu(cpu) { 6090 struct ring_buffer_event *event; 6091 struct rb_test_data *data = &rb_data[cpu]; 6092 struct rb_item *item; 6093 unsigned long total_events; 6094 unsigned long total_dropped; 6095 unsigned long total_written; 6096 unsigned long total_alloc; 6097 unsigned long total_read = 0; 6098 unsigned long total_size = 0; 6099 unsigned long total_len = 0; 6100 unsigned long total_lost = 0; 6101 unsigned long lost; 6102 int big_event_size; 6103 int small_event_size; 6104 6105 ret = -1; 6106 6107 total_events = data->events + data->events_nested; 6108 total_written = data->bytes_written + data->bytes_written_nested; 6109 total_alloc = data->bytes_alloc + data->bytes_alloc_nested; 6110 total_dropped = data->bytes_dropped + data->bytes_dropped_nested; 6111 6112 big_event_size = data->max_size + data->max_size_nested; 6113 small_event_size = data->min_size + data->min_size_nested; 6114 6115 pr_info("CPU %d:\n", cpu); 6116 pr_info(" events: %ld\n", total_events); 6117 pr_info(" dropped bytes: %ld\n", total_dropped); 6118 pr_info(" alloced bytes: %ld\n", total_alloc); 6119 pr_info(" written bytes: %ld\n", total_written); 6120 pr_info(" biggest event: %d\n", big_event_size); 6121 pr_info(" smallest event: %d\n", small_event_size); 6122 6123 if (RB_WARN_ON(buffer, total_dropped)) 6124 break; 6125 6126 ret = 0; 6127 6128 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { 6129 total_lost += lost; 6130 item = ring_buffer_event_data(event); 6131 total_len += ring_buffer_event_length(event); 6132 total_size += item->size + sizeof(struct rb_item); 6133 if (memcmp(&item->str[0], rb_string, item->size) != 0) { 6134 pr_info("FAILED!\n"); 6135 pr_info("buffer had: %.*s\n", item->size, item->str); 6136 pr_info("expected: %.*s\n", item->size, rb_string); 6137 RB_WARN_ON(buffer, 1); 6138 ret = -1; 6139 break; 6140 } 6141 total_read++; 6142 } 6143 if (ret) 6144 break; 6145 6146 ret = -1; 6147 6148 pr_info(" read events: %ld\n", total_read); 6149 pr_info(" lost events: %ld\n", total_lost); 6150 pr_info(" total events: %ld\n", total_lost + total_read); 6151 pr_info(" recorded len bytes: %ld\n", total_len); 6152 pr_info(" recorded size bytes: %ld\n", total_size); 6153 if (total_lost) { 6154 pr_info(" With dropped events, record len and size may not match\n" 6155 " alloced and written from above\n"); 6156 } else { 6157 if (RB_WARN_ON(buffer, total_len != total_alloc || 6158 total_size != total_written)) 6159 break; 6160 } 6161 if (RB_WARN_ON(buffer, total_lost + total_read != total_events)) 6162 break; 6163 6164 ret = 0; 6165 } 6166 if (!ret) 6167 pr_info("Ring buffer PASSED!\n"); 6168 6169 ring_buffer_free(buffer); 6170 return 0; 6171 } 6172 6173 late_initcall(test_ringbuffer); 6174 #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */ 6175