1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Generic ring buffer 4 * 5 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> 6 */ 7 #include <linux/trace_recursion.h> 8 #include <linux/trace_events.h> 9 #include <linux/ring_buffer.h> 10 #include <linux/trace_clock.h> 11 #include <linux/sched/clock.h> 12 #include <linux/trace_seq.h> 13 #include <linux/spinlock.h> 14 #include <linux/irq_work.h> 15 #include <linux/security.h> 16 #include <linux/uaccess.h> 17 #include <linux/hardirq.h> 18 #include <linux/kthread.h> /* for self test */ 19 #include <linux/module.h> 20 #include <linux/percpu.h> 21 #include <linux/mutex.h> 22 #include <linux/delay.h> 23 #include <linux/slab.h> 24 #include <linux/init.h> 25 #include <linux/hash.h> 26 #include <linux/list.h> 27 #include <linux/cpu.h> 28 #include <linux/oom.h> 29 30 #include <asm/local.h> 31 32 /* 33 * The "absolute" timestamp in the buffer is only 59 bits. 34 * If a clock has the 5 MSBs set, it needs to be saved and 35 * reinserted. 36 */ 37 #define TS_MSB (0xf8ULL << 56) 38 #define ABS_TS_MASK (~TS_MSB) 39 40 static void update_pages_handler(struct work_struct *work); 41 42 /* 43 * The ring buffer header is special. We must manually up keep it. 44 */ 45 int ring_buffer_print_entry_header(struct trace_seq *s) 46 { 47 trace_seq_puts(s, "# compressed entry header\n"); 48 trace_seq_puts(s, "\ttype_len : 5 bits\n"); 49 trace_seq_puts(s, "\ttime_delta : 27 bits\n"); 50 trace_seq_puts(s, "\tarray : 32 bits\n"); 51 trace_seq_putc(s, '\n'); 52 trace_seq_printf(s, "\tpadding : type == %d\n", 53 RINGBUF_TYPE_PADDING); 54 trace_seq_printf(s, "\ttime_extend : type == %d\n", 55 RINGBUF_TYPE_TIME_EXTEND); 56 trace_seq_printf(s, "\ttime_stamp : type == %d\n", 57 RINGBUF_TYPE_TIME_STAMP); 58 trace_seq_printf(s, "\tdata max type_len == %d\n", 59 RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 60 61 return !trace_seq_has_overflowed(s); 62 } 63 64 /* 65 * The ring buffer is made up of a list of pages. A separate list of pages is 66 * allocated for each CPU. A writer may only write to a buffer that is 67 * associated with the CPU it is currently executing on. A reader may read 68 * from any per cpu buffer. 69 * 70 * The reader is special. For each per cpu buffer, the reader has its own 71 * reader page. When a reader has read the entire reader page, this reader 72 * page is swapped with another page in the ring buffer. 73 * 74 * Now, as long as the writer is off the reader page, the reader can do what 75 * ever it wants with that page. The writer will never write to that page 76 * again (as long as it is out of the ring buffer). 77 * 78 * Here's some silly ASCII art. 79 * 80 * +------+ 81 * |reader| RING BUFFER 82 * |page | 83 * +------+ +---+ +---+ +---+ 84 * | |-->| |-->| | 85 * +---+ +---+ +---+ 86 * ^ | 87 * | | 88 * +---------------+ 89 * 90 * 91 * +------+ 92 * |reader| RING BUFFER 93 * |page |------------------v 94 * +------+ +---+ +---+ +---+ 95 * | |-->| |-->| | 96 * +---+ +---+ +---+ 97 * ^ | 98 * | | 99 * +---------------+ 100 * 101 * 102 * +------+ 103 * |reader| RING BUFFER 104 * |page |------------------v 105 * +------+ +---+ +---+ +---+ 106 * ^ | |-->| |-->| | 107 * | +---+ +---+ +---+ 108 * | | 109 * | | 110 * +------------------------------+ 111 * 112 * 113 * +------+ 114 * |buffer| RING BUFFER 115 * |page |------------------v 116 * +------+ +---+ +---+ +---+ 117 * ^ | | | |-->| | 118 * | New +---+ +---+ +---+ 119 * | Reader------^ | 120 * | page | 121 * +------------------------------+ 122 * 123 * 124 * After we make this swap, the reader can hand this page off to the splice 125 * code and be done with it. It can even allocate a new page if it needs to 126 * and swap that into the ring buffer. 127 * 128 * We will be using cmpxchg soon to make all this lockless. 129 * 130 */ 131 132 /* Used for individual buffers (after the counter) */ 133 #define RB_BUFFER_OFF (1 << 20) 134 135 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) 136 137 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) 138 #define RB_ALIGNMENT 4U 139 #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 140 #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ 141 142 #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS 143 # define RB_FORCE_8BYTE_ALIGNMENT 0 144 # define RB_ARCH_ALIGNMENT RB_ALIGNMENT 145 #else 146 # define RB_FORCE_8BYTE_ALIGNMENT 1 147 # define RB_ARCH_ALIGNMENT 8U 148 #endif 149 150 #define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT) 151 152 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ 153 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX 154 155 enum { 156 RB_LEN_TIME_EXTEND = 8, 157 RB_LEN_TIME_STAMP = 8, 158 }; 159 160 #define skip_time_extend(event) \ 161 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND)) 162 163 #define extended_time(event) \ 164 (event->type_len >= RINGBUF_TYPE_TIME_EXTEND) 165 166 static inline int rb_null_event(struct ring_buffer_event *event) 167 { 168 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; 169 } 170 171 static void rb_event_set_padding(struct ring_buffer_event *event) 172 { 173 /* padding has a NULL time_delta */ 174 event->type_len = RINGBUF_TYPE_PADDING; 175 event->time_delta = 0; 176 } 177 178 static unsigned 179 rb_event_data_length(struct ring_buffer_event *event) 180 { 181 unsigned length; 182 183 if (event->type_len) 184 length = event->type_len * RB_ALIGNMENT; 185 else 186 length = event->array[0]; 187 return length + RB_EVNT_HDR_SIZE; 188 } 189 190 /* 191 * Return the length of the given event. Will return 192 * the length of the time extend if the event is a 193 * time extend. 194 */ 195 static inline unsigned 196 rb_event_length(struct ring_buffer_event *event) 197 { 198 switch (event->type_len) { 199 case RINGBUF_TYPE_PADDING: 200 if (rb_null_event(event)) 201 /* undefined */ 202 return -1; 203 return event->array[0] + RB_EVNT_HDR_SIZE; 204 205 case RINGBUF_TYPE_TIME_EXTEND: 206 return RB_LEN_TIME_EXTEND; 207 208 case RINGBUF_TYPE_TIME_STAMP: 209 return RB_LEN_TIME_STAMP; 210 211 case RINGBUF_TYPE_DATA: 212 return rb_event_data_length(event); 213 default: 214 WARN_ON_ONCE(1); 215 } 216 /* not hit */ 217 return 0; 218 } 219 220 /* 221 * Return total length of time extend and data, 222 * or just the event length for all other events. 223 */ 224 static inline unsigned 225 rb_event_ts_length(struct ring_buffer_event *event) 226 { 227 unsigned len = 0; 228 229 if (extended_time(event)) { 230 /* time extends include the data event after it */ 231 len = RB_LEN_TIME_EXTEND; 232 event = skip_time_extend(event); 233 } 234 return len + rb_event_length(event); 235 } 236 237 /** 238 * ring_buffer_event_length - return the length of the event 239 * @event: the event to get the length of 240 * 241 * Returns the size of the data load of a data event. 242 * If the event is something other than a data event, it 243 * returns the size of the event itself. With the exception 244 * of a TIME EXTEND, where it still returns the size of the 245 * data load of the data event after it. 246 */ 247 unsigned ring_buffer_event_length(struct ring_buffer_event *event) 248 { 249 unsigned length; 250 251 if (extended_time(event)) 252 event = skip_time_extend(event); 253 254 length = rb_event_length(event); 255 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 256 return length; 257 length -= RB_EVNT_HDR_SIZE; 258 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) 259 length -= sizeof(event->array[0]); 260 return length; 261 } 262 EXPORT_SYMBOL_GPL(ring_buffer_event_length); 263 264 /* inline for ring buffer fast paths */ 265 static __always_inline void * 266 rb_event_data(struct ring_buffer_event *event) 267 { 268 if (extended_time(event)) 269 event = skip_time_extend(event); 270 WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 271 /* If length is in len field, then array[0] has the data */ 272 if (event->type_len) 273 return (void *)&event->array[0]; 274 /* Otherwise length is in array[0] and array[1] has the data */ 275 return (void *)&event->array[1]; 276 } 277 278 /** 279 * ring_buffer_event_data - return the data of the event 280 * @event: the event to get the data from 281 */ 282 void *ring_buffer_event_data(struct ring_buffer_event *event) 283 { 284 return rb_event_data(event); 285 } 286 EXPORT_SYMBOL_GPL(ring_buffer_event_data); 287 288 #define for_each_buffer_cpu(buffer, cpu) \ 289 for_each_cpu(cpu, buffer->cpumask) 290 291 #define for_each_online_buffer_cpu(buffer, cpu) \ 292 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask) 293 294 #define TS_SHIFT 27 295 #define TS_MASK ((1ULL << TS_SHIFT) - 1) 296 #define TS_DELTA_TEST (~TS_MASK) 297 298 static u64 rb_event_time_stamp(struct ring_buffer_event *event) 299 { 300 u64 ts; 301 302 ts = event->array[0]; 303 ts <<= TS_SHIFT; 304 ts += event->time_delta; 305 306 return ts; 307 } 308 309 /* Flag when events were overwritten */ 310 #define RB_MISSED_EVENTS (1 << 31) 311 /* Missed count stored at end */ 312 #define RB_MISSED_STORED (1 << 30) 313 314 struct buffer_data_page { 315 u64 time_stamp; /* page time stamp */ 316 local_t commit; /* write committed index */ 317 unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */ 318 }; 319 320 /* 321 * Note, the buffer_page list must be first. The buffer pages 322 * are allocated in cache lines, which means that each buffer 323 * page will be at the beginning of a cache line, and thus 324 * the least significant bits will be zero. We use this to 325 * add flags in the list struct pointers, to make the ring buffer 326 * lockless. 327 */ 328 struct buffer_page { 329 struct list_head list; /* list of buffer pages */ 330 local_t write; /* index for next write */ 331 unsigned read; /* index for next read */ 332 local_t entries; /* entries on this page */ 333 unsigned long real_end; /* real end of data */ 334 struct buffer_data_page *page; /* Actual data page */ 335 }; 336 337 /* 338 * The buffer page counters, write and entries, must be reset 339 * atomically when crossing page boundaries. To synchronize this 340 * update, two counters are inserted into the number. One is 341 * the actual counter for the write position or count on the page. 342 * 343 * The other is a counter of updaters. Before an update happens 344 * the update partition of the counter is incremented. This will 345 * allow the updater to update the counter atomically. 346 * 347 * The counter is 20 bits, and the state data is 12. 348 */ 349 #define RB_WRITE_MASK 0xfffff 350 #define RB_WRITE_INTCNT (1 << 20) 351 352 static void rb_init_page(struct buffer_data_page *bpage) 353 { 354 local_set(&bpage->commit, 0); 355 } 356 357 static void free_buffer_page(struct buffer_page *bpage) 358 { 359 free_page((unsigned long)bpage->page); 360 kfree(bpage); 361 } 362 363 /* 364 * We need to fit the time_stamp delta into 27 bits. 365 */ 366 static inline int test_time_stamp(u64 delta) 367 { 368 if (delta & TS_DELTA_TEST) 369 return 1; 370 return 0; 371 } 372 373 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE) 374 375 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */ 376 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) 377 378 int ring_buffer_print_page_header(struct trace_seq *s) 379 { 380 struct buffer_data_page field; 381 382 trace_seq_printf(s, "\tfield: u64 timestamp;\t" 383 "offset:0;\tsize:%u;\tsigned:%u;\n", 384 (unsigned int)sizeof(field.time_stamp), 385 (unsigned int)is_signed_type(u64)); 386 387 trace_seq_printf(s, "\tfield: local_t commit;\t" 388 "offset:%u;\tsize:%u;\tsigned:%u;\n", 389 (unsigned int)offsetof(typeof(field), commit), 390 (unsigned int)sizeof(field.commit), 391 (unsigned int)is_signed_type(long)); 392 393 trace_seq_printf(s, "\tfield: int overwrite;\t" 394 "offset:%u;\tsize:%u;\tsigned:%u;\n", 395 (unsigned int)offsetof(typeof(field), commit), 396 1, 397 (unsigned int)is_signed_type(long)); 398 399 trace_seq_printf(s, "\tfield: char data;\t" 400 "offset:%u;\tsize:%u;\tsigned:%u;\n", 401 (unsigned int)offsetof(typeof(field), data), 402 (unsigned int)BUF_PAGE_SIZE, 403 (unsigned int)is_signed_type(char)); 404 405 return !trace_seq_has_overflowed(s); 406 } 407 408 struct rb_irq_work { 409 struct irq_work work; 410 wait_queue_head_t waiters; 411 wait_queue_head_t full_waiters; 412 long wait_index; 413 bool waiters_pending; 414 bool full_waiters_pending; 415 bool wakeup_full; 416 }; 417 418 /* 419 * Structure to hold event state and handle nested events. 420 */ 421 struct rb_event_info { 422 u64 ts; 423 u64 delta; 424 u64 before; 425 u64 after; 426 unsigned long length; 427 struct buffer_page *tail_page; 428 int add_timestamp; 429 }; 430 431 /* 432 * Used for the add_timestamp 433 * NONE 434 * EXTEND - wants a time extend 435 * ABSOLUTE - the buffer requests all events to have absolute time stamps 436 * FORCE - force a full time stamp. 437 */ 438 enum { 439 RB_ADD_STAMP_NONE = 0, 440 RB_ADD_STAMP_EXTEND = BIT(1), 441 RB_ADD_STAMP_ABSOLUTE = BIT(2), 442 RB_ADD_STAMP_FORCE = BIT(3) 443 }; 444 /* 445 * Used for which event context the event is in. 446 * TRANSITION = 0 447 * NMI = 1 448 * IRQ = 2 449 * SOFTIRQ = 3 450 * NORMAL = 4 451 * 452 * See trace_recursive_lock() comment below for more details. 453 */ 454 enum { 455 RB_CTX_TRANSITION, 456 RB_CTX_NMI, 457 RB_CTX_IRQ, 458 RB_CTX_SOFTIRQ, 459 RB_CTX_NORMAL, 460 RB_CTX_MAX 461 }; 462 463 #if BITS_PER_LONG == 32 464 #define RB_TIME_32 465 #endif 466 467 /* To test on 64 bit machines */ 468 //#define RB_TIME_32 469 470 #ifdef RB_TIME_32 471 472 struct rb_time_struct { 473 local_t cnt; 474 local_t top; 475 local_t bottom; 476 local_t msb; 477 }; 478 #else 479 #include <asm/local64.h> 480 struct rb_time_struct { 481 local64_t time; 482 }; 483 #endif 484 typedef struct rb_time_struct rb_time_t; 485 486 #define MAX_NEST 5 487 488 /* 489 * head_page == tail_page && head == tail then buffer is empty. 490 */ 491 struct ring_buffer_per_cpu { 492 int cpu; 493 atomic_t record_disabled; 494 atomic_t resize_disabled; 495 struct trace_buffer *buffer; 496 raw_spinlock_t reader_lock; /* serialize readers */ 497 arch_spinlock_t lock; 498 struct lock_class_key lock_key; 499 struct buffer_data_page *free_page; 500 unsigned long nr_pages; 501 unsigned int current_context; 502 struct list_head *pages; 503 struct buffer_page *head_page; /* read from head */ 504 struct buffer_page *tail_page; /* write to tail */ 505 struct buffer_page *commit_page; /* committed pages */ 506 struct buffer_page *reader_page; 507 unsigned long lost_events; 508 unsigned long last_overrun; 509 unsigned long nest; 510 local_t entries_bytes; 511 local_t entries; 512 local_t overrun; 513 local_t commit_overrun; 514 local_t dropped_events; 515 local_t committing; 516 local_t commits; 517 local_t pages_touched; 518 local_t pages_lost; 519 local_t pages_read; 520 long last_pages_touch; 521 size_t shortest_full; 522 unsigned long read; 523 unsigned long read_bytes; 524 rb_time_t write_stamp; 525 rb_time_t before_stamp; 526 u64 event_stamp[MAX_NEST]; 527 u64 read_stamp; 528 /* ring buffer pages to update, > 0 to add, < 0 to remove */ 529 long nr_pages_to_update; 530 struct list_head new_pages; /* new pages to add */ 531 struct work_struct update_pages_work; 532 struct completion update_done; 533 534 struct rb_irq_work irq_work; 535 }; 536 537 struct trace_buffer { 538 unsigned flags; 539 int cpus; 540 atomic_t record_disabled; 541 cpumask_var_t cpumask; 542 543 struct lock_class_key *reader_lock_key; 544 545 struct mutex mutex; 546 547 struct ring_buffer_per_cpu **buffers; 548 549 struct hlist_node node; 550 u64 (*clock)(void); 551 552 struct rb_irq_work irq_work; 553 bool time_stamp_abs; 554 }; 555 556 struct ring_buffer_iter { 557 struct ring_buffer_per_cpu *cpu_buffer; 558 unsigned long head; 559 unsigned long next_event; 560 struct buffer_page *head_page; 561 struct buffer_page *cache_reader_page; 562 unsigned long cache_read; 563 u64 read_stamp; 564 u64 page_stamp; 565 struct ring_buffer_event *event; 566 int missed_events; 567 }; 568 569 #ifdef RB_TIME_32 570 571 /* 572 * On 32 bit machines, local64_t is very expensive. As the ring 573 * buffer doesn't need all the features of a true 64 bit atomic, 574 * on 32 bit, it uses these functions (64 still uses local64_t). 575 * 576 * For the ring buffer, 64 bit required operations for the time is 577 * the following: 578 * 579 * - Reads may fail if it interrupted a modification of the time stamp. 580 * It will succeed if it did not interrupt another write even if 581 * the read itself is interrupted by a write. 582 * It returns whether it was successful or not. 583 * 584 * - Writes always succeed and will overwrite other writes and writes 585 * that were done by events interrupting the current write. 586 * 587 * - A write followed by a read of the same time stamp will always succeed, 588 * but may not contain the same value. 589 * 590 * - A cmpxchg will fail if it interrupted another write or cmpxchg. 591 * Other than that, it acts like a normal cmpxchg. 592 * 593 * The 60 bit time stamp is broken up by 30 bits in a top and bottom half 594 * (bottom being the least significant 30 bits of the 60 bit time stamp). 595 * 596 * The two most significant bits of each half holds a 2 bit counter (0-3). 597 * Each update will increment this counter by one. 598 * When reading the top and bottom, if the two counter bits match then the 599 * top and bottom together make a valid 60 bit number. 600 */ 601 #define RB_TIME_SHIFT 30 602 #define RB_TIME_VAL_MASK ((1 << RB_TIME_SHIFT) - 1) 603 #define RB_TIME_MSB_SHIFT 60 604 605 static inline int rb_time_cnt(unsigned long val) 606 { 607 return (val >> RB_TIME_SHIFT) & 3; 608 } 609 610 static inline u64 rb_time_val(unsigned long top, unsigned long bottom) 611 { 612 u64 val; 613 614 val = top & RB_TIME_VAL_MASK; 615 val <<= RB_TIME_SHIFT; 616 val |= bottom & RB_TIME_VAL_MASK; 617 618 return val; 619 } 620 621 static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt) 622 { 623 unsigned long top, bottom, msb; 624 unsigned long c; 625 626 /* 627 * If the read is interrupted by a write, then the cnt will 628 * be different. Loop until both top and bottom have been read 629 * without interruption. 630 */ 631 do { 632 c = local_read(&t->cnt); 633 top = local_read(&t->top); 634 bottom = local_read(&t->bottom); 635 msb = local_read(&t->msb); 636 } while (c != local_read(&t->cnt)); 637 638 *cnt = rb_time_cnt(top); 639 640 /* If top and bottom counts don't match, this interrupted a write */ 641 if (*cnt != rb_time_cnt(bottom)) 642 return false; 643 644 /* The shift to msb will lose its cnt bits */ 645 *ret = rb_time_val(top, bottom) | ((u64)msb << RB_TIME_MSB_SHIFT); 646 return true; 647 } 648 649 static bool rb_time_read(rb_time_t *t, u64 *ret) 650 { 651 unsigned long cnt; 652 653 return __rb_time_read(t, ret, &cnt); 654 } 655 656 static inline unsigned long rb_time_val_cnt(unsigned long val, unsigned long cnt) 657 { 658 return (val & RB_TIME_VAL_MASK) | ((cnt & 3) << RB_TIME_SHIFT); 659 } 660 661 static inline void rb_time_split(u64 val, unsigned long *top, unsigned long *bottom, 662 unsigned long *msb) 663 { 664 *top = (unsigned long)((val >> RB_TIME_SHIFT) & RB_TIME_VAL_MASK); 665 *bottom = (unsigned long)(val & RB_TIME_VAL_MASK); 666 *msb = (unsigned long)(val >> RB_TIME_MSB_SHIFT); 667 } 668 669 static inline void rb_time_val_set(local_t *t, unsigned long val, unsigned long cnt) 670 { 671 val = rb_time_val_cnt(val, cnt); 672 local_set(t, val); 673 } 674 675 static void rb_time_set(rb_time_t *t, u64 val) 676 { 677 unsigned long cnt, top, bottom, msb; 678 679 rb_time_split(val, &top, &bottom, &msb); 680 681 /* Writes always succeed with a valid number even if it gets interrupted. */ 682 do { 683 cnt = local_inc_return(&t->cnt); 684 rb_time_val_set(&t->top, top, cnt); 685 rb_time_val_set(&t->bottom, bottom, cnt); 686 rb_time_val_set(&t->msb, val >> RB_TIME_MSB_SHIFT, cnt); 687 } while (cnt != local_read(&t->cnt)); 688 } 689 690 static inline bool 691 rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set) 692 { 693 unsigned long ret; 694 695 ret = local_cmpxchg(l, expect, set); 696 return ret == expect; 697 } 698 699 static int rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set) 700 { 701 unsigned long cnt, top, bottom, msb; 702 unsigned long cnt2, top2, bottom2, msb2; 703 u64 val; 704 705 /* The cmpxchg always fails if it interrupted an update */ 706 if (!__rb_time_read(t, &val, &cnt2)) 707 return false; 708 709 if (val != expect) 710 return false; 711 712 cnt = local_read(&t->cnt); 713 if ((cnt & 3) != cnt2) 714 return false; 715 716 cnt2 = cnt + 1; 717 718 rb_time_split(val, &top, &bottom, &msb); 719 top = rb_time_val_cnt(top, cnt); 720 bottom = rb_time_val_cnt(bottom, cnt); 721 722 rb_time_split(set, &top2, &bottom2, &msb2); 723 top2 = rb_time_val_cnt(top2, cnt2); 724 bottom2 = rb_time_val_cnt(bottom2, cnt2); 725 726 if (!rb_time_read_cmpxchg(&t->cnt, cnt, cnt2)) 727 return false; 728 if (!rb_time_read_cmpxchg(&t->msb, msb, msb2)) 729 return false; 730 if (!rb_time_read_cmpxchg(&t->top, top, top2)) 731 return false; 732 if (!rb_time_read_cmpxchg(&t->bottom, bottom, bottom2)) 733 return false; 734 return true; 735 } 736 737 #else /* 64 bits */ 738 739 /* local64_t always succeeds */ 740 741 static inline bool rb_time_read(rb_time_t *t, u64 *ret) 742 { 743 *ret = local64_read(&t->time); 744 return true; 745 } 746 static void rb_time_set(rb_time_t *t, u64 val) 747 { 748 local64_set(&t->time, val); 749 } 750 751 static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set) 752 { 753 u64 val; 754 val = local64_cmpxchg(&t->time, expect, set); 755 return val == expect; 756 } 757 #endif 758 759 /* 760 * Enable this to make sure that the event passed to 761 * ring_buffer_event_time_stamp() is not committed and also 762 * is on the buffer that it passed in. 763 */ 764 //#define RB_VERIFY_EVENT 765 #ifdef RB_VERIFY_EVENT 766 static struct list_head *rb_list_head(struct list_head *list); 767 static void verify_event(struct ring_buffer_per_cpu *cpu_buffer, 768 void *event) 769 { 770 struct buffer_page *page = cpu_buffer->commit_page; 771 struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page); 772 struct list_head *next; 773 long commit, write; 774 unsigned long addr = (unsigned long)event; 775 bool done = false; 776 int stop = 0; 777 778 /* Make sure the event exists and is not committed yet */ 779 do { 780 if (page == tail_page || WARN_ON_ONCE(stop++ > 100)) 781 done = true; 782 commit = local_read(&page->page->commit); 783 write = local_read(&page->write); 784 if (addr >= (unsigned long)&page->page->data[commit] && 785 addr < (unsigned long)&page->page->data[write]) 786 return; 787 788 next = rb_list_head(page->list.next); 789 page = list_entry(next, struct buffer_page, list); 790 } while (!done); 791 WARN_ON_ONCE(1); 792 } 793 #else 794 static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer, 795 void *event) 796 { 797 } 798 #endif 799 800 /* 801 * The absolute time stamp drops the 5 MSBs and some clocks may 802 * require them. The rb_fix_abs_ts() will take a previous full 803 * time stamp, and add the 5 MSB of that time stamp on to the 804 * saved absolute time stamp. Then they are compared in case of 805 * the unlikely event that the latest time stamp incremented 806 * the 5 MSB. 807 */ 808 static inline u64 rb_fix_abs_ts(u64 abs, u64 save_ts) 809 { 810 if (save_ts & TS_MSB) { 811 abs |= save_ts & TS_MSB; 812 /* Check for overflow */ 813 if (unlikely(abs < save_ts)) 814 abs += 1ULL << 59; 815 } 816 return abs; 817 } 818 819 static inline u64 rb_time_stamp(struct trace_buffer *buffer); 820 821 /** 822 * ring_buffer_event_time_stamp - return the event's current time stamp 823 * @buffer: The buffer that the event is on 824 * @event: the event to get the time stamp of 825 * 826 * Note, this must be called after @event is reserved, and before it is 827 * committed to the ring buffer. And must be called from the same 828 * context where the event was reserved (normal, softirq, irq, etc). 829 * 830 * Returns the time stamp associated with the current event. 831 * If the event has an extended time stamp, then that is used as 832 * the time stamp to return. 833 * In the highly unlikely case that the event was nested more than 834 * the max nesting, then the write_stamp of the buffer is returned, 835 * otherwise current time is returned, but that really neither of 836 * the last two cases should ever happen. 837 */ 838 u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer, 839 struct ring_buffer_event *event) 840 { 841 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; 842 unsigned int nest; 843 u64 ts; 844 845 /* If the event includes an absolute time, then just use that */ 846 if (event->type_len == RINGBUF_TYPE_TIME_STAMP) { 847 ts = rb_event_time_stamp(event); 848 return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp); 849 } 850 851 nest = local_read(&cpu_buffer->committing); 852 verify_event(cpu_buffer, event); 853 if (WARN_ON_ONCE(!nest)) 854 goto fail; 855 856 /* Read the current saved nesting level time stamp */ 857 if (likely(--nest < MAX_NEST)) 858 return cpu_buffer->event_stamp[nest]; 859 860 /* Shouldn't happen, warn if it does */ 861 WARN_ONCE(1, "nest (%d) greater than max", nest); 862 863 fail: 864 /* Can only fail on 32 bit */ 865 if (!rb_time_read(&cpu_buffer->write_stamp, &ts)) 866 /* Screw it, just read the current time */ 867 ts = rb_time_stamp(cpu_buffer->buffer); 868 869 return ts; 870 } 871 872 /** 873 * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer 874 * @buffer: The ring_buffer to get the number of pages from 875 * @cpu: The cpu of the ring_buffer to get the number of pages from 876 * 877 * Returns the number of pages used by a per_cpu buffer of the ring buffer. 878 */ 879 size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu) 880 { 881 return buffer->buffers[cpu]->nr_pages; 882 } 883 884 /** 885 * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer 886 * @buffer: The ring_buffer to get the number of pages from 887 * @cpu: The cpu of the ring_buffer to get the number of pages from 888 * 889 * Returns the number of pages that have content in the ring buffer. 890 */ 891 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) 892 { 893 size_t read; 894 size_t lost; 895 size_t cnt; 896 897 read = local_read(&buffer->buffers[cpu]->pages_read); 898 lost = local_read(&buffer->buffers[cpu]->pages_lost); 899 cnt = local_read(&buffer->buffers[cpu]->pages_touched); 900 901 if (WARN_ON_ONCE(cnt < lost)) 902 return 0; 903 904 cnt -= lost; 905 906 /* The reader can read an empty page, but not more than that */ 907 if (cnt < read) { 908 WARN_ON_ONCE(read > cnt + 1); 909 return 0; 910 } 911 912 return cnt - read; 913 } 914 915 static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full) 916 { 917 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 918 size_t nr_pages; 919 size_t dirty; 920 921 nr_pages = cpu_buffer->nr_pages; 922 if (!nr_pages || !full) 923 return true; 924 925 dirty = ring_buffer_nr_dirty_pages(buffer, cpu); 926 927 return (dirty * 100) > (full * nr_pages); 928 } 929 930 /* 931 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input 932 * 933 * Schedules a delayed work to wake up any task that is blocked on the 934 * ring buffer waiters queue. 935 */ 936 static void rb_wake_up_waiters(struct irq_work *work) 937 { 938 struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work); 939 940 wake_up_all(&rbwork->waiters); 941 if (rbwork->full_waiters_pending || rbwork->wakeup_full) { 942 rbwork->wakeup_full = false; 943 rbwork->full_waiters_pending = false; 944 wake_up_all(&rbwork->full_waiters); 945 } 946 } 947 948 /** 949 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer 950 * @buffer: The ring buffer to wake waiters on 951 * 952 * In the case of a file that represents a ring buffer is closing, 953 * it is prudent to wake up any waiters that are on this. 954 */ 955 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu) 956 { 957 struct ring_buffer_per_cpu *cpu_buffer; 958 struct rb_irq_work *rbwork; 959 960 if (!buffer) 961 return; 962 963 if (cpu == RING_BUFFER_ALL_CPUS) { 964 965 /* Wake up individual ones too. One level recursion */ 966 for_each_buffer_cpu(buffer, cpu) 967 ring_buffer_wake_waiters(buffer, cpu); 968 969 rbwork = &buffer->irq_work; 970 } else { 971 if (WARN_ON_ONCE(!buffer->buffers)) 972 return; 973 if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) 974 return; 975 976 cpu_buffer = buffer->buffers[cpu]; 977 /* The CPU buffer may not have been initialized yet */ 978 if (!cpu_buffer) 979 return; 980 rbwork = &cpu_buffer->irq_work; 981 } 982 983 rbwork->wait_index++; 984 /* make sure the waiters see the new index */ 985 smp_wmb(); 986 987 rb_wake_up_waiters(&rbwork->work); 988 } 989 990 /** 991 * ring_buffer_wait - wait for input to the ring buffer 992 * @buffer: buffer to wait on 993 * @cpu: the cpu buffer to wait on 994 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS 995 * 996 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 997 * as data is added to any of the @buffer's cpu buffers. Otherwise 998 * it will wait for data to be added to a specific cpu buffer. 999 */ 1000 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) 1001 { 1002 struct ring_buffer_per_cpu *cpu_buffer; 1003 DEFINE_WAIT(wait); 1004 struct rb_irq_work *work; 1005 long wait_index; 1006 int ret = 0; 1007 1008 /* 1009 * Depending on what the caller is waiting for, either any 1010 * data in any cpu buffer, or a specific buffer, put the 1011 * caller on the appropriate wait queue. 1012 */ 1013 if (cpu == RING_BUFFER_ALL_CPUS) { 1014 work = &buffer->irq_work; 1015 /* Full only makes sense on per cpu reads */ 1016 full = 0; 1017 } else { 1018 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1019 return -ENODEV; 1020 cpu_buffer = buffer->buffers[cpu]; 1021 work = &cpu_buffer->irq_work; 1022 } 1023 1024 wait_index = READ_ONCE(work->wait_index); 1025 1026 while (true) { 1027 if (full) 1028 prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE); 1029 else 1030 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); 1031 1032 /* 1033 * The events can happen in critical sections where 1034 * checking a work queue can cause deadlocks. 1035 * After adding a task to the queue, this flag is set 1036 * only to notify events to try to wake up the queue 1037 * using irq_work. 1038 * 1039 * We don't clear it even if the buffer is no longer 1040 * empty. The flag only causes the next event to run 1041 * irq_work to do the work queue wake up. The worse 1042 * that can happen if we race with !trace_empty() is that 1043 * an event will cause an irq_work to try to wake up 1044 * an empty queue. 1045 * 1046 * There's no reason to protect this flag either, as 1047 * the work queue and irq_work logic will do the necessary 1048 * synchronization for the wake ups. The only thing 1049 * that is necessary is that the wake up happens after 1050 * a task has been queued. It's OK for spurious wake ups. 1051 */ 1052 if (full) 1053 work->full_waiters_pending = true; 1054 else 1055 work->waiters_pending = true; 1056 1057 if (signal_pending(current)) { 1058 ret = -EINTR; 1059 break; 1060 } 1061 1062 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) 1063 break; 1064 1065 if (cpu != RING_BUFFER_ALL_CPUS && 1066 !ring_buffer_empty_cpu(buffer, cpu)) { 1067 unsigned long flags; 1068 bool pagebusy; 1069 bool done; 1070 1071 if (!full) 1072 break; 1073 1074 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 1075 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; 1076 done = !pagebusy && full_hit(buffer, cpu, full); 1077 1078 if (!cpu_buffer->shortest_full || 1079 cpu_buffer->shortest_full > full) 1080 cpu_buffer->shortest_full = full; 1081 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 1082 if (done) 1083 break; 1084 } 1085 1086 schedule(); 1087 1088 /* Make sure to see the new wait index */ 1089 smp_rmb(); 1090 if (wait_index != work->wait_index) 1091 break; 1092 } 1093 1094 if (full) 1095 finish_wait(&work->full_waiters, &wait); 1096 else 1097 finish_wait(&work->waiters, &wait); 1098 1099 return ret; 1100 } 1101 1102 /** 1103 * ring_buffer_poll_wait - poll on buffer input 1104 * @buffer: buffer to wait on 1105 * @cpu: the cpu buffer to wait on 1106 * @filp: the file descriptor 1107 * @poll_table: The poll descriptor 1108 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS 1109 * 1110 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 1111 * as data is added to any of the @buffer's cpu buffers. Otherwise 1112 * it will wait for data to be added to a specific cpu buffer. 1113 * 1114 * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers, 1115 * zero otherwise. 1116 */ 1117 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, 1118 struct file *filp, poll_table *poll_table, int full) 1119 { 1120 struct ring_buffer_per_cpu *cpu_buffer; 1121 struct rb_irq_work *work; 1122 1123 if (cpu == RING_BUFFER_ALL_CPUS) { 1124 work = &buffer->irq_work; 1125 full = 0; 1126 } else { 1127 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1128 return -EINVAL; 1129 1130 cpu_buffer = buffer->buffers[cpu]; 1131 work = &cpu_buffer->irq_work; 1132 } 1133 1134 if (full) { 1135 poll_wait(filp, &work->full_waiters, poll_table); 1136 work->full_waiters_pending = true; 1137 } else { 1138 poll_wait(filp, &work->waiters, poll_table); 1139 work->waiters_pending = true; 1140 } 1141 1142 /* 1143 * There's a tight race between setting the waiters_pending and 1144 * checking if the ring buffer is empty. Once the waiters_pending bit 1145 * is set, the next event will wake the task up, but we can get stuck 1146 * if there's only a single event in. 1147 * 1148 * FIXME: Ideally, we need a memory barrier on the writer side as well, 1149 * but adding a memory barrier to all events will cause too much of a 1150 * performance hit in the fast path. We only need a memory barrier when 1151 * the buffer goes from empty to having content. But as this race is 1152 * extremely small, and it's not a problem if another event comes in, we 1153 * will fix it later. 1154 */ 1155 smp_mb(); 1156 1157 if (full) 1158 return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0; 1159 1160 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || 1161 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) 1162 return EPOLLIN | EPOLLRDNORM; 1163 return 0; 1164 } 1165 1166 /* buffer may be either ring_buffer or ring_buffer_per_cpu */ 1167 #define RB_WARN_ON(b, cond) \ 1168 ({ \ 1169 int _____ret = unlikely(cond); \ 1170 if (_____ret) { \ 1171 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \ 1172 struct ring_buffer_per_cpu *__b = \ 1173 (void *)b; \ 1174 atomic_inc(&__b->buffer->record_disabled); \ 1175 } else \ 1176 atomic_inc(&b->record_disabled); \ 1177 WARN_ON(1); \ 1178 } \ 1179 _____ret; \ 1180 }) 1181 1182 /* Up this if you want to test the TIME_EXTENTS and normalization */ 1183 #define DEBUG_SHIFT 0 1184 1185 static inline u64 rb_time_stamp(struct trace_buffer *buffer) 1186 { 1187 u64 ts; 1188 1189 /* Skip retpolines :-( */ 1190 if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local)) 1191 ts = trace_clock_local(); 1192 else 1193 ts = buffer->clock(); 1194 1195 /* shift to debug/test normalization and TIME_EXTENTS */ 1196 return ts << DEBUG_SHIFT; 1197 } 1198 1199 u64 ring_buffer_time_stamp(struct trace_buffer *buffer) 1200 { 1201 u64 time; 1202 1203 preempt_disable_notrace(); 1204 time = rb_time_stamp(buffer); 1205 preempt_enable_notrace(); 1206 1207 return time; 1208 } 1209 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); 1210 1211 void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer, 1212 int cpu, u64 *ts) 1213 { 1214 /* Just stupid testing the normalize function and deltas */ 1215 *ts >>= DEBUG_SHIFT; 1216 } 1217 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); 1218 1219 /* 1220 * Making the ring buffer lockless makes things tricky. 1221 * Although writes only happen on the CPU that they are on, 1222 * and they only need to worry about interrupts. Reads can 1223 * happen on any CPU. 1224 * 1225 * The reader page is always off the ring buffer, but when the 1226 * reader finishes with a page, it needs to swap its page with 1227 * a new one from the buffer. The reader needs to take from 1228 * the head (writes go to the tail). But if a writer is in overwrite 1229 * mode and wraps, it must push the head page forward. 1230 * 1231 * Here lies the problem. 1232 * 1233 * The reader must be careful to replace only the head page, and 1234 * not another one. As described at the top of the file in the 1235 * ASCII art, the reader sets its old page to point to the next 1236 * page after head. It then sets the page after head to point to 1237 * the old reader page. But if the writer moves the head page 1238 * during this operation, the reader could end up with the tail. 1239 * 1240 * We use cmpxchg to help prevent this race. We also do something 1241 * special with the page before head. We set the LSB to 1. 1242 * 1243 * When the writer must push the page forward, it will clear the 1244 * bit that points to the head page, move the head, and then set 1245 * the bit that points to the new head page. 1246 * 1247 * We also don't want an interrupt coming in and moving the head 1248 * page on another writer. Thus we use the second LSB to catch 1249 * that too. Thus: 1250 * 1251 * head->list->prev->next bit 1 bit 0 1252 * ------- ------- 1253 * Normal page 0 0 1254 * Points to head page 0 1 1255 * New head page 1 0 1256 * 1257 * Note we can not trust the prev pointer of the head page, because: 1258 * 1259 * +----+ +-----+ +-----+ 1260 * | |------>| T |---X--->| N | 1261 * | |<------| | | | 1262 * +----+ +-----+ +-----+ 1263 * ^ ^ | 1264 * | +-----+ | | 1265 * +----------| R |----------+ | 1266 * | |<-----------+ 1267 * +-----+ 1268 * 1269 * Key: ---X--> HEAD flag set in pointer 1270 * T Tail page 1271 * R Reader page 1272 * N Next page 1273 * 1274 * (see __rb_reserve_next() to see where this happens) 1275 * 1276 * What the above shows is that the reader just swapped out 1277 * the reader page with a page in the buffer, but before it 1278 * could make the new header point back to the new page added 1279 * it was preempted by a writer. The writer moved forward onto 1280 * the new page added by the reader and is about to move forward 1281 * again. 1282 * 1283 * You can see, it is legitimate for the previous pointer of 1284 * the head (or any page) not to point back to itself. But only 1285 * temporarily. 1286 */ 1287 1288 #define RB_PAGE_NORMAL 0UL 1289 #define RB_PAGE_HEAD 1UL 1290 #define RB_PAGE_UPDATE 2UL 1291 1292 1293 #define RB_FLAG_MASK 3UL 1294 1295 /* PAGE_MOVED is not part of the mask */ 1296 #define RB_PAGE_MOVED 4UL 1297 1298 /* 1299 * rb_list_head - remove any bit 1300 */ 1301 static struct list_head *rb_list_head(struct list_head *list) 1302 { 1303 unsigned long val = (unsigned long)list; 1304 1305 return (struct list_head *)(val & ~RB_FLAG_MASK); 1306 } 1307 1308 /* 1309 * rb_is_head_page - test if the given page is the head page 1310 * 1311 * Because the reader may move the head_page pointer, we can 1312 * not trust what the head page is (it may be pointing to 1313 * the reader page). But if the next page is a header page, 1314 * its flags will be non zero. 1315 */ 1316 static inline int 1317 rb_is_head_page(struct buffer_page *page, struct list_head *list) 1318 { 1319 unsigned long val; 1320 1321 val = (unsigned long)list->next; 1322 1323 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) 1324 return RB_PAGE_MOVED; 1325 1326 return val & RB_FLAG_MASK; 1327 } 1328 1329 /* 1330 * rb_is_reader_page 1331 * 1332 * The unique thing about the reader page, is that, if the 1333 * writer is ever on it, the previous pointer never points 1334 * back to the reader page. 1335 */ 1336 static bool rb_is_reader_page(struct buffer_page *page) 1337 { 1338 struct list_head *list = page->list.prev; 1339 1340 return rb_list_head(list->next) != &page->list; 1341 } 1342 1343 /* 1344 * rb_set_list_to_head - set a list_head to be pointing to head. 1345 */ 1346 static void rb_set_list_to_head(struct list_head *list) 1347 { 1348 unsigned long *ptr; 1349 1350 ptr = (unsigned long *)&list->next; 1351 *ptr |= RB_PAGE_HEAD; 1352 *ptr &= ~RB_PAGE_UPDATE; 1353 } 1354 1355 /* 1356 * rb_head_page_activate - sets up head page 1357 */ 1358 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) 1359 { 1360 struct buffer_page *head; 1361 1362 head = cpu_buffer->head_page; 1363 if (!head) 1364 return; 1365 1366 /* 1367 * Set the previous list pointer to have the HEAD flag. 1368 */ 1369 rb_set_list_to_head(head->list.prev); 1370 } 1371 1372 static void rb_list_head_clear(struct list_head *list) 1373 { 1374 unsigned long *ptr = (unsigned long *)&list->next; 1375 1376 *ptr &= ~RB_FLAG_MASK; 1377 } 1378 1379 /* 1380 * rb_head_page_deactivate - clears head page ptr (for free list) 1381 */ 1382 static void 1383 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) 1384 { 1385 struct list_head *hd; 1386 1387 /* Go through the whole list and clear any pointers found. */ 1388 rb_list_head_clear(cpu_buffer->pages); 1389 1390 list_for_each(hd, cpu_buffer->pages) 1391 rb_list_head_clear(hd); 1392 } 1393 1394 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, 1395 struct buffer_page *head, 1396 struct buffer_page *prev, 1397 int old_flag, int new_flag) 1398 { 1399 struct list_head *list; 1400 unsigned long val = (unsigned long)&head->list; 1401 unsigned long ret; 1402 1403 list = &prev->list; 1404 1405 val &= ~RB_FLAG_MASK; 1406 1407 ret = cmpxchg((unsigned long *)&list->next, 1408 val | old_flag, val | new_flag); 1409 1410 /* check if the reader took the page */ 1411 if ((ret & ~RB_FLAG_MASK) != val) 1412 return RB_PAGE_MOVED; 1413 1414 return ret & RB_FLAG_MASK; 1415 } 1416 1417 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, 1418 struct buffer_page *head, 1419 struct buffer_page *prev, 1420 int old_flag) 1421 { 1422 return rb_head_page_set(cpu_buffer, head, prev, 1423 old_flag, RB_PAGE_UPDATE); 1424 } 1425 1426 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, 1427 struct buffer_page *head, 1428 struct buffer_page *prev, 1429 int old_flag) 1430 { 1431 return rb_head_page_set(cpu_buffer, head, prev, 1432 old_flag, RB_PAGE_HEAD); 1433 } 1434 1435 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, 1436 struct buffer_page *head, 1437 struct buffer_page *prev, 1438 int old_flag) 1439 { 1440 return rb_head_page_set(cpu_buffer, head, prev, 1441 old_flag, RB_PAGE_NORMAL); 1442 } 1443 1444 static inline void rb_inc_page(struct buffer_page **bpage) 1445 { 1446 struct list_head *p = rb_list_head((*bpage)->list.next); 1447 1448 *bpage = list_entry(p, struct buffer_page, list); 1449 } 1450 1451 static struct buffer_page * 1452 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) 1453 { 1454 struct buffer_page *head; 1455 struct buffer_page *page; 1456 struct list_head *list; 1457 int i; 1458 1459 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) 1460 return NULL; 1461 1462 /* sanity check */ 1463 list = cpu_buffer->pages; 1464 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) 1465 return NULL; 1466 1467 page = head = cpu_buffer->head_page; 1468 /* 1469 * It is possible that the writer moves the header behind 1470 * where we started, and we miss in one loop. 1471 * A second loop should grab the header, but we'll do 1472 * three loops just because I'm paranoid. 1473 */ 1474 for (i = 0; i < 3; i++) { 1475 do { 1476 if (rb_is_head_page(page, page->list.prev)) { 1477 cpu_buffer->head_page = page; 1478 return page; 1479 } 1480 rb_inc_page(&page); 1481 } while (page != head); 1482 } 1483 1484 RB_WARN_ON(cpu_buffer, 1); 1485 1486 return NULL; 1487 } 1488 1489 static int rb_head_page_replace(struct buffer_page *old, 1490 struct buffer_page *new) 1491 { 1492 unsigned long *ptr = (unsigned long *)&old->list.prev->next; 1493 unsigned long val; 1494 unsigned long ret; 1495 1496 val = *ptr & ~RB_FLAG_MASK; 1497 val |= RB_PAGE_HEAD; 1498 1499 ret = cmpxchg(ptr, val, (unsigned long)&new->list); 1500 1501 return ret == val; 1502 } 1503 1504 /* 1505 * rb_tail_page_update - move the tail page forward 1506 */ 1507 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, 1508 struct buffer_page *tail_page, 1509 struct buffer_page *next_page) 1510 { 1511 unsigned long old_entries; 1512 unsigned long old_write; 1513 1514 /* 1515 * The tail page now needs to be moved forward. 1516 * 1517 * We need to reset the tail page, but without messing 1518 * with possible erasing of data brought in by interrupts 1519 * that have moved the tail page and are currently on it. 1520 * 1521 * We add a counter to the write field to denote this. 1522 */ 1523 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); 1524 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); 1525 1526 local_inc(&cpu_buffer->pages_touched); 1527 /* 1528 * Just make sure we have seen our old_write and synchronize 1529 * with any interrupts that come in. 1530 */ 1531 barrier(); 1532 1533 /* 1534 * If the tail page is still the same as what we think 1535 * it is, then it is up to us to update the tail 1536 * pointer. 1537 */ 1538 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { 1539 /* Zero the write counter */ 1540 unsigned long val = old_write & ~RB_WRITE_MASK; 1541 unsigned long eval = old_entries & ~RB_WRITE_MASK; 1542 1543 /* 1544 * This will only succeed if an interrupt did 1545 * not come in and change it. In which case, we 1546 * do not want to modify it. 1547 * 1548 * We add (void) to let the compiler know that we do not care 1549 * about the return value of these functions. We use the 1550 * cmpxchg to only update if an interrupt did not already 1551 * do it for us. If the cmpxchg fails, we don't care. 1552 */ 1553 (void)local_cmpxchg(&next_page->write, old_write, val); 1554 (void)local_cmpxchg(&next_page->entries, old_entries, eval); 1555 1556 /* 1557 * No need to worry about races with clearing out the commit. 1558 * it only can increment when a commit takes place. But that 1559 * only happens in the outer most nested commit. 1560 */ 1561 local_set(&next_page->page->commit, 0); 1562 1563 /* Again, either we update tail_page or an interrupt does */ 1564 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page); 1565 } 1566 } 1567 1568 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, 1569 struct buffer_page *bpage) 1570 { 1571 unsigned long val = (unsigned long)bpage; 1572 1573 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK)) 1574 return 1; 1575 1576 return 0; 1577 } 1578 1579 /** 1580 * rb_check_pages - integrity check of buffer pages 1581 * @cpu_buffer: CPU buffer with pages to test 1582 * 1583 * As a safety measure we check to make sure the data pages have not 1584 * been corrupted. 1585 */ 1586 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) 1587 { 1588 struct list_head *head = rb_list_head(cpu_buffer->pages); 1589 struct list_head *tmp; 1590 1591 if (RB_WARN_ON(cpu_buffer, 1592 rb_list_head(rb_list_head(head->next)->prev) != head)) 1593 return -1; 1594 1595 if (RB_WARN_ON(cpu_buffer, 1596 rb_list_head(rb_list_head(head->prev)->next) != head)) 1597 return -1; 1598 1599 for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) { 1600 if (RB_WARN_ON(cpu_buffer, 1601 rb_list_head(rb_list_head(tmp->next)->prev) != tmp)) 1602 return -1; 1603 1604 if (RB_WARN_ON(cpu_buffer, 1605 rb_list_head(rb_list_head(tmp->prev)->next) != tmp)) 1606 return -1; 1607 } 1608 1609 return 0; 1610 } 1611 1612 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 1613 long nr_pages, struct list_head *pages) 1614 { 1615 struct buffer_page *bpage, *tmp; 1616 bool user_thread = current->mm != NULL; 1617 gfp_t mflags; 1618 long i; 1619 1620 /* 1621 * Check if the available memory is there first. 1622 * Note, si_mem_available() only gives us a rough estimate of available 1623 * memory. It may not be accurate. But we don't care, we just want 1624 * to prevent doing any allocation when it is obvious that it is 1625 * not going to succeed. 1626 */ 1627 i = si_mem_available(); 1628 if (i < nr_pages) 1629 return -ENOMEM; 1630 1631 /* 1632 * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails 1633 * gracefully without invoking oom-killer and the system is not 1634 * destabilized. 1635 */ 1636 mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL; 1637 1638 /* 1639 * If a user thread allocates too much, and si_mem_available() 1640 * reports there's enough memory, even though there is not. 1641 * Make sure the OOM killer kills this thread. This can happen 1642 * even with RETRY_MAYFAIL because another task may be doing 1643 * an allocation after this task has taken all memory. 1644 * This is the task the OOM killer needs to take out during this 1645 * loop, even if it was triggered by an allocation somewhere else. 1646 */ 1647 if (user_thread) 1648 set_current_oom_origin(); 1649 for (i = 0; i < nr_pages; i++) { 1650 struct page *page; 1651 1652 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1653 mflags, cpu_to_node(cpu_buffer->cpu)); 1654 if (!bpage) 1655 goto free_pages; 1656 1657 rb_check_bpage(cpu_buffer, bpage); 1658 1659 list_add(&bpage->list, pages); 1660 1661 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, 0); 1662 if (!page) 1663 goto free_pages; 1664 bpage->page = page_address(page); 1665 rb_init_page(bpage->page); 1666 1667 if (user_thread && fatal_signal_pending(current)) 1668 goto free_pages; 1669 } 1670 if (user_thread) 1671 clear_current_oom_origin(); 1672 1673 return 0; 1674 1675 free_pages: 1676 list_for_each_entry_safe(bpage, tmp, pages, list) { 1677 list_del_init(&bpage->list); 1678 free_buffer_page(bpage); 1679 } 1680 if (user_thread) 1681 clear_current_oom_origin(); 1682 1683 return -ENOMEM; 1684 } 1685 1686 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 1687 unsigned long nr_pages) 1688 { 1689 LIST_HEAD(pages); 1690 1691 WARN_ON(!nr_pages); 1692 1693 if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages)) 1694 return -ENOMEM; 1695 1696 /* 1697 * The ring buffer page list is a circular list that does not 1698 * start and end with a list head. All page list items point to 1699 * other pages. 1700 */ 1701 cpu_buffer->pages = pages.next; 1702 list_del(&pages); 1703 1704 cpu_buffer->nr_pages = nr_pages; 1705 1706 rb_check_pages(cpu_buffer); 1707 1708 return 0; 1709 } 1710 1711 static struct ring_buffer_per_cpu * 1712 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu) 1713 { 1714 struct ring_buffer_per_cpu *cpu_buffer; 1715 struct buffer_page *bpage; 1716 struct page *page; 1717 int ret; 1718 1719 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), 1720 GFP_KERNEL, cpu_to_node(cpu)); 1721 if (!cpu_buffer) 1722 return NULL; 1723 1724 cpu_buffer->cpu = cpu; 1725 cpu_buffer->buffer = buffer; 1726 raw_spin_lock_init(&cpu_buffer->reader_lock); 1727 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1728 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 1729 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); 1730 init_completion(&cpu_buffer->update_done); 1731 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); 1732 init_waitqueue_head(&cpu_buffer->irq_work.waiters); 1733 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); 1734 1735 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1736 GFP_KERNEL, cpu_to_node(cpu)); 1737 if (!bpage) 1738 goto fail_free_buffer; 1739 1740 rb_check_bpage(cpu_buffer, bpage); 1741 1742 cpu_buffer->reader_page = bpage; 1743 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); 1744 if (!page) 1745 goto fail_free_reader; 1746 bpage->page = page_address(page); 1747 rb_init_page(bpage->page); 1748 1749 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 1750 INIT_LIST_HEAD(&cpu_buffer->new_pages); 1751 1752 ret = rb_allocate_pages(cpu_buffer, nr_pages); 1753 if (ret < 0) 1754 goto fail_free_reader; 1755 1756 cpu_buffer->head_page 1757 = list_entry(cpu_buffer->pages, struct buffer_page, list); 1758 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; 1759 1760 rb_head_page_activate(cpu_buffer); 1761 1762 return cpu_buffer; 1763 1764 fail_free_reader: 1765 free_buffer_page(cpu_buffer->reader_page); 1766 1767 fail_free_buffer: 1768 kfree(cpu_buffer); 1769 return NULL; 1770 } 1771 1772 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 1773 { 1774 struct list_head *head = cpu_buffer->pages; 1775 struct buffer_page *bpage, *tmp; 1776 1777 free_buffer_page(cpu_buffer->reader_page); 1778 1779 if (head) { 1780 rb_head_page_deactivate(cpu_buffer); 1781 1782 list_for_each_entry_safe(bpage, tmp, head, list) { 1783 list_del_init(&bpage->list); 1784 free_buffer_page(bpage); 1785 } 1786 bpage = list_entry(head, struct buffer_page, list); 1787 free_buffer_page(bpage); 1788 } 1789 1790 kfree(cpu_buffer); 1791 } 1792 1793 /** 1794 * __ring_buffer_alloc - allocate a new ring_buffer 1795 * @size: the size in bytes per cpu that is needed. 1796 * @flags: attributes to set for the ring buffer. 1797 * @key: ring buffer reader_lock_key. 1798 * 1799 * Currently the only flag that is available is the RB_FL_OVERWRITE 1800 * flag. This flag means that the buffer will overwrite old data 1801 * when the buffer wraps. If this flag is not set, the buffer will 1802 * drop data when the tail hits the head. 1803 */ 1804 struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, 1805 struct lock_class_key *key) 1806 { 1807 struct trace_buffer *buffer; 1808 long nr_pages; 1809 int bsize; 1810 int cpu; 1811 int ret; 1812 1813 /* keep it in its own cache line */ 1814 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), 1815 GFP_KERNEL); 1816 if (!buffer) 1817 return NULL; 1818 1819 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) 1820 goto fail_free_buffer; 1821 1822 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 1823 buffer->flags = flags; 1824 buffer->clock = trace_clock_local; 1825 buffer->reader_lock_key = key; 1826 1827 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); 1828 init_waitqueue_head(&buffer->irq_work.waiters); 1829 1830 /* need at least two pages */ 1831 if (nr_pages < 2) 1832 nr_pages = 2; 1833 1834 buffer->cpus = nr_cpu_ids; 1835 1836 bsize = sizeof(void *) * nr_cpu_ids; 1837 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), 1838 GFP_KERNEL); 1839 if (!buffer->buffers) 1840 goto fail_free_cpumask; 1841 1842 cpu = raw_smp_processor_id(); 1843 cpumask_set_cpu(cpu, buffer->cpumask); 1844 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 1845 if (!buffer->buffers[cpu]) 1846 goto fail_free_buffers; 1847 1848 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); 1849 if (ret < 0) 1850 goto fail_free_buffers; 1851 1852 mutex_init(&buffer->mutex); 1853 1854 return buffer; 1855 1856 fail_free_buffers: 1857 for_each_buffer_cpu(buffer, cpu) { 1858 if (buffer->buffers[cpu]) 1859 rb_free_cpu_buffer(buffer->buffers[cpu]); 1860 } 1861 kfree(buffer->buffers); 1862 1863 fail_free_cpumask: 1864 free_cpumask_var(buffer->cpumask); 1865 1866 fail_free_buffer: 1867 kfree(buffer); 1868 return NULL; 1869 } 1870 EXPORT_SYMBOL_GPL(__ring_buffer_alloc); 1871 1872 /** 1873 * ring_buffer_free - free a ring buffer. 1874 * @buffer: the buffer to free. 1875 */ 1876 void 1877 ring_buffer_free(struct trace_buffer *buffer) 1878 { 1879 int cpu; 1880 1881 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); 1882 1883 for_each_buffer_cpu(buffer, cpu) 1884 rb_free_cpu_buffer(buffer->buffers[cpu]); 1885 1886 kfree(buffer->buffers); 1887 free_cpumask_var(buffer->cpumask); 1888 1889 kfree(buffer); 1890 } 1891 EXPORT_SYMBOL_GPL(ring_buffer_free); 1892 1893 void ring_buffer_set_clock(struct trace_buffer *buffer, 1894 u64 (*clock)(void)) 1895 { 1896 buffer->clock = clock; 1897 } 1898 1899 void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs) 1900 { 1901 buffer->time_stamp_abs = abs; 1902 } 1903 1904 bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer) 1905 { 1906 return buffer->time_stamp_abs; 1907 } 1908 1909 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); 1910 1911 static inline unsigned long rb_page_entries(struct buffer_page *bpage) 1912 { 1913 return local_read(&bpage->entries) & RB_WRITE_MASK; 1914 } 1915 1916 static inline unsigned long rb_page_write(struct buffer_page *bpage) 1917 { 1918 return local_read(&bpage->write) & RB_WRITE_MASK; 1919 } 1920 1921 static int 1922 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) 1923 { 1924 struct list_head *tail_page, *to_remove, *next_page; 1925 struct buffer_page *to_remove_page, *tmp_iter_page; 1926 struct buffer_page *last_page, *first_page; 1927 unsigned long nr_removed; 1928 unsigned long head_bit; 1929 int page_entries; 1930 1931 head_bit = 0; 1932 1933 raw_spin_lock_irq(&cpu_buffer->reader_lock); 1934 atomic_inc(&cpu_buffer->record_disabled); 1935 /* 1936 * We don't race with the readers since we have acquired the reader 1937 * lock. We also don't race with writers after disabling recording. 1938 * This makes it easy to figure out the first and the last page to be 1939 * removed from the list. We unlink all the pages in between including 1940 * the first and last pages. This is done in a busy loop so that we 1941 * lose the least number of traces. 1942 * The pages are freed after we restart recording and unlock readers. 1943 */ 1944 tail_page = &cpu_buffer->tail_page->list; 1945 1946 /* 1947 * tail page might be on reader page, we remove the next page 1948 * from the ring buffer 1949 */ 1950 if (cpu_buffer->tail_page == cpu_buffer->reader_page) 1951 tail_page = rb_list_head(tail_page->next); 1952 to_remove = tail_page; 1953 1954 /* start of pages to remove */ 1955 first_page = list_entry(rb_list_head(to_remove->next), 1956 struct buffer_page, list); 1957 1958 for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) { 1959 to_remove = rb_list_head(to_remove)->next; 1960 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD; 1961 } 1962 1963 next_page = rb_list_head(to_remove)->next; 1964 1965 /* 1966 * Now we remove all pages between tail_page and next_page. 1967 * Make sure that we have head_bit value preserved for the 1968 * next page 1969 */ 1970 tail_page->next = (struct list_head *)((unsigned long)next_page | 1971 head_bit); 1972 next_page = rb_list_head(next_page); 1973 next_page->prev = tail_page; 1974 1975 /* make sure pages points to a valid page in the ring buffer */ 1976 cpu_buffer->pages = next_page; 1977 1978 /* update head page */ 1979 if (head_bit) 1980 cpu_buffer->head_page = list_entry(next_page, 1981 struct buffer_page, list); 1982 1983 /* 1984 * change read pointer to make sure any read iterators reset 1985 * themselves 1986 */ 1987 cpu_buffer->read = 0; 1988 1989 /* pages are removed, resume tracing and then free the pages */ 1990 atomic_dec(&cpu_buffer->record_disabled); 1991 raw_spin_unlock_irq(&cpu_buffer->reader_lock); 1992 1993 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); 1994 1995 /* last buffer page to remove */ 1996 last_page = list_entry(rb_list_head(to_remove), struct buffer_page, 1997 list); 1998 tmp_iter_page = first_page; 1999 2000 do { 2001 cond_resched(); 2002 2003 to_remove_page = tmp_iter_page; 2004 rb_inc_page(&tmp_iter_page); 2005 2006 /* update the counters */ 2007 page_entries = rb_page_entries(to_remove_page); 2008 if (page_entries) { 2009 /* 2010 * If something was added to this page, it was full 2011 * since it is not the tail page. So we deduct the 2012 * bytes consumed in ring buffer from here. 2013 * Increment overrun to account for the lost events. 2014 */ 2015 local_add(page_entries, &cpu_buffer->overrun); 2016 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); 2017 local_inc(&cpu_buffer->pages_lost); 2018 } 2019 2020 /* 2021 * We have already removed references to this list item, just 2022 * free up the buffer_page and its page 2023 */ 2024 free_buffer_page(to_remove_page); 2025 nr_removed--; 2026 2027 } while (to_remove_page != last_page); 2028 2029 RB_WARN_ON(cpu_buffer, nr_removed); 2030 2031 return nr_removed == 0; 2032 } 2033 2034 static int 2035 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) 2036 { 2037 struct list_head *pages = &cpu_buffer->new_pages; 2038 int retries, success; 2039 unsigned long flags; 2040 2041 /* Can be called at early boot up, where interrupts must not been enabled */ 2042 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2043 /* 2044 * We are holding the reader lock, so the reader page won't be swapped 2045 * in the ring buffer. Now we are racing with the writer trying to 2046 * move head page and the tail page. 2047 * We are going to adapt the reader page update process where: 2048 * 1. We first splice the start and end of list of new pages between 2049 * the head page and its previous page. 2050 * 2. We cmpxchg the prev_page->next to point from head page to the 2051 * start of new pages list. 2052 * 3. Finally, we update the head->prev to the end of new list. 2053 * 2054 * We will try this process 10 times, to make sure that we don't keep 2055 * spinning. 2056 */ 2057 retries = 10; 2058 success = 0; 2059 while (retries--) { 2060 struct list_head *head_page, *prev_page, *r; 2061 struct list_head *last_page, *first_page; 2062 struct list_head *head_page_with_bit; 2063 2064 head_page = &rb_set_head_page(cpu_buffer)->list; 2065 if (!head_page) 2066 break; 2067 prev_page = head_page->prev; 2068 2069 first_page = pages->next; 2070 last_page = pages->prev; 2071 2072 head_page_with_bit = (struct list_head *) 2073 ((unsigned long)head_page | RB_PAGE_HEAD); 2074 2075 last_page->next = head_page_with_bit; 2076 first_page->prev = prev_page; 2077 2078 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page); 2079 2080 if (r == head_page_with_bit) { 2081 /* 2082 * yay, we replaced the page pointer to our new list, 2083 * now, we just have to update to head page's prev 2084 * pointer to point to end of list 2085 */ 2086 head_page->prev = last_page; 2087 success = 1; 2088 break; 2089 } 2090 } 2091 2092 if (success) 2093 INIT_LIST_HEAD(pages); 2094 /* 2095 * If we weren't successful in adding in new pages, warn and stop 2096 * tracing 2097 */ 2098 RB_WARN_ON(cpu_buffer, !success); 2099 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2100 2101 /* free pages if they weren't inserted */ 2102 if (!success) { 2103 struct buffer_page *bpage, *tmp; 2104 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 2105 list) { 2106 list_del_init(&bpage->list); 2107 free_buffer_page(bpage); 2108 } 2109 } 2110 return success; 2111 } 2112 2113 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) 2114 { 2115 int success; 2116 2117 if (cpu_buffer->nr_pages_to_update > 0) 2118 success = rb_insert_pages(cpu_buffer); 2119 else 2120 success = rb_remove_pages(cpu_buffer, 2121 -cpu_buffer->nr_pages_to_update); 2122 2123 if (success) 2124 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; 2125 } 2126 2127 static void update_pages_handler(struct work_struct *work) 2128 { 2129 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, 2130 struct ring_buffer_per_cpu, update_pages_work); 2131 rb_update_pages(cpu_buffer); 2132 complete(&cpu_buffer->update_done); 2133 } 2134 2135 /** 2136 * ring_buffer_resize - resize the ring buffer 2137 * @buffer: the buffer to resize. 2138 * @size: the new size. 2139 * @cpu_id: the cpu buffer to resize 2140 * 2141 * Minimum size is 2 * BUF_PAGE_SIZE. 2142 * 2143 * Returns 0 on success and < 0 on failure. 2144 */ 2145 int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, 2146 int cpu_id) 2147 { 2148 struct ring_buffer_per_cpu *cpu_buffer; 2149 unsigned long nr_pages; 2150 int cpu, err; 2151 2152 /* 2153 * Always succeed at resizing a non-existent buffer: 2154 */ 2155 if (!buffer) 2156 return 0; 2157 2158 /* Make sure the requested buffer exists */ 2159 if (cpu_id != RING_BUFFER_ALL_CPUS && 2160 !cpumask_test_cpu(cpu_id, buffer->cpumask)) 2161 return 0; 2162 2163 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 2164 2165 /* we need a minimum of two pages */ 2166 if (nr_pages < 2) 2167 nr_pages = 2; 2168 2169 /* prevent another thread from changing buffer sizes */ 2170 mutex_lock(&buffer->mutex); 2171 2172 2173 if (cpu_id == RING_BUFFER_ALL_CPUS) { 2174 /* 2175 * Don't succeed if resizing is disabled, as a reader might be 2176 * manipulating the ring buffer and is expecting a sane state while 2177 * this is true. 2178 */ 2179 for_each_buffer_cpu(buffer, cpu) { 2180 cpu_buffer = buffer->buffers[cpu]; 2181 if (atomic_read(&cpu_buffer->resize_disabled)) { 2182 err = -EBUSY; 2183 goto out_err_unlock; 2184 } 2185 } 2186 2187 /* calculate the pages to update */ 2188 for_each_buffer_cpu(buffer, cpu) { 2189 cpu_buffer = buffer->buffers[cpu]; 2190 2191 cpu_buffer->nr_pages_to_update = nr_pages - 2192 cpu_buffer->nr_pages; 2193 /* 2194 * nothing more to do for removing pages or no update 2195 */ 2196 if (cpu_buffer->nr_pages_to_update <= 0) 2197 continue; 2198 /* 2199 * to add pages, make sure all new pages can be 2200 * allocated without receiving ENOMEM 2201 */ 2202 INIT_LIST_HEAD(&cpu_buffer->new_pages); 2203 if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, 2204 &cpu_buffer->new_pages)) { 2205 /* not enough memory for new pages */ 2206 err = -ENOMEM; 2207 goto out_err; 2208 } 2209 } 2210 2211 cpus_read_lock(); 2212 /* 2213 * Fire off all the required work handlers 2214 * We can't schedule on offline CPUs, but it's not necessary 2215 * since we can change their buffer sizes without any race. 2216 */ 2217 for_each_buffer_cpu(buffer, cpu) { 2218 cpu_buffer = buffer->buffers[cpu]; 2219 if (!cpu_buffer->nr_pages_to_update) 2220 continue; 2221 2222 /* Can't run something on an offline CPU. */ 2223 if (!cpu_online(cpu)) { 2224 rb_update_pages(cpu_buffer); 2225 cpu_buffer->nr_pages_to_update = 0; 2226 } else { 2227 /* Run directly if possible. */ 2228 migrate_disable(); 2229 if (cpu != smp_processor_id()) { 2230 migrate_enable(); 2231 schedule_work_on(cpu, 2232 &cpu_buffer->update_pages_work); 2233 } else { 2234 update_pages_handler(&cpu_buffer->update_pages_work); 2235 migrate_enable(); 2236 } 2237 } 2238 } 2239 2240 /* wait for all the updates to complete */ 2241 for_each_buffer_cpu(buffer, cpu) { 2242 cpu_buffer = buffer->buffers[cpu]; 2243 if (!cpu_buffer->nr_pages_to_update) 2244 continue; 2245 2246 if (cpu_online(cpu)) 2247 wait_for_completion(&cpu_buffer->update_done); 2248 cpu_buffer->nr_pages_to_update = 0; 2249 } 2250 2251 cpus_read_unlock(); 2252 } else { 2253 cpu_buffer = buffer->buffers[cpu_id]; 2254 2255 if (nr_pages == cpu_buffer->nr_pages) 2256 goto out; 2257 2258 /* 2259 * Don't succeed if resizing is disabled, as a reader might be 2260 * manipulating the ring buffer and is expecting a sane state while 2261 * this is true. 2262 */ 2263 if (atomic_read(&cpu_buffer->resize_disabled)) { 2264 err = -EBUSY; 2265 goto out_err_unlock; 2266 } 2267 2268 cpu_buffer->nr_pages_to_update = nr_pages - 2269 cpu_buffer->nr_pages; 2270 2271 INIT_LIST_HEAD(&cpu_buffer->new_pages); 2272 if (cpu_buffer->nr_pages_to_update > 0 && 2273 __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, 2274 &cpu_buffer->new_pages)) { 2275 err = -ENOMEM; 2276 goto out_err; 2277 } 2278 2279 cpus_read_lock(); 2280 2281 /* Can't run something on an offline CPU. */ 2282 if (!cpu_online(cpu_id)) 2283 rb_update_pages(cpu_buffer); 2284 else { 2285 /* Run directly if possible. */ 2286 migrate_disable(); 2287 if (cpu_id == smp_processor_id()) { 2288 rb_update_pages(cpu_buffer); 2289 migrate_enable(); 2290 } else { 2291 migrate_enable(); 2292 schedule_work_on(cpu_id, 2293 &cpu_buffer->update_pages_work); 2294 wait_for_completion(&cpu_buffer->update_done); 2295 } 2296 } 2297 2298 cpu_buffer->nr_pages_to_update = 0; 2299 cpus_read_unlock(); 2300 } 2301 2302 out: 2303 /* 2304 * The ring buffer resize can happen with the ring buffer 2305 * enabled, so that the update disturbs the tracing as little 2306 * as possible. But if the buffer is disabled, we do not need 2307 * to worry about that, and we can take the time to verify 2308 * that the buffer is not corrupt. 2309 */ 2310 if (atomic_read(&buffer->record_disabled)) { 2311 atomic_inc(&buffer->record_disabled); 2312 /* 2313 * Even though the buffer was disabled, we must make sure 2314 * that it is truly disabled before calling rb_check_pages. 2315 * There could have been a race between checking 2316 * record_disable and incrementing it. 2317 */ 2318 synchronize_rcu(); 2319 for_each_buffer_cpu(buffer, cpu) { 2320 cpu_buffer = buffer->buffers[cpu]; 2321 rb_check_pages(cpu_buffer); 2322 } 2323 atomic_dec(&buffer->record_disabled); 2324 } 2325 2326 mutex_unlock(&buffer->mutex); 2327 return 0; 2328 2329 out_err: 2330 for_each_buffer_cpu(buffer, cpu) { 2331 struct buffer_page *bpage, *tmp; 2332 2333 cpu_buffer = buffer->buffers[cpu]; 2334 cpu_buffer->nr_pages_to_update = 0; 2335 2336 if (list_empty(&cpu_buffer->new_pages)) 2337 continue; 2338 2339 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 2340 list) { 2341 list_del_init(&bpage->list); 2342 free_buffer_page(bpage); 2343 } 2344 } 2345 out_err_unlock: 2346 mutex_unlock(&buffer->mutex); 2347 return err; 2348 } 2349 EXPORT_SYMBOL_GPL(ring_buffer_resize); 2350 2351 void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val) 2352 { 2353 mutex_lock(&buffer->mutex); 2354 if (val) 2355 buffer->flags |= RB_FL_OVERWRITE; 2356 else 2357 buffer->flags &= ~RB_FL_OVERWRITE; 2358 mutex_unlock(&buffer->mutex); 2359 } 2360 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite); 2361 2362 static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) 2363 { 2364 return bpage->page->data + index; 2365 } 2366 2367 static __always_inline struct ring_buffer_event * 2368 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) 2369 { 2370 return __rb_page_index(cpu_buffer->reader_page, 2371 cpu_buffer->reader_page->read); 2372 } 2373 2374 static __always_inline unsigned rb_page_commit(struct buffer_page *bpage) 2375 { 2376 return local_read(&bpage->page->commit); 2377 } 2378 2379 static struct ring_buffer_event * 2380 rb_iter_head_event(struct ring_buffer_iter *iter) 2381 { 2382 struct ring_buffer_event *event; 2383 struct buffer_page *iter_head_page = iter->head_page; 2384 unsigned long commit; 2385 unsigned length; 2386 2387 if (iter->head != iter->next_event) 2388 return iter->event; 2389 2390 /* 2391 * When the writer goes across pages, it issues a cmpxchg which 2392 * is a mb(), which will synchronize with the rmb here. 2393 * (see rb_tail_page_update() and __rb_reserve_next()) 2394 */ 2395 commit = rb_page_commit(iter_head_page); 2396 smp_rmb(); 2397 event = __rb_page_index(iter_head_page, iter->head); 2398 length = rb_event_length(event); 2399 2400 /* 2401 * READ_ONCE() doesn't work on functions and we don't want the 2402 * compiler doing any crazy optimizations with length. 2403 */ 2404 barrier(); 2405 2406 if ((iter->head + length) > commit || length > BUF_MAX_DATA_SIZE) 2407 /* Writer corrupted the read? */ 2408 goto reset; 2409 2410 memcpy(iter->event, event, length); 2411 /* 2412 * If the page stamp is still the same after this rmb() then the 2413 * event was safely copied without the writer entering the page. 2414 */ 2415 smp_rmb(); 2416 2417 /* Make sure the page didn't change since we read this */ 2418 if (iter->page_stamp != iter_head_page->page->time_stamp || 2419 commit > rb_page_commit(iter_head_page)) 2420 goto reset; 2421 2422 iter->next_event = iter->head + length; 2423 return iter->event; 2424 reset: 2425 /* Reset to the beginning */ 2426 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; 2427 iter->head = 0; 2428 iter->next_event = 0; 2429 iter->missed_events = 1; 2430 return NULL; 2431 } 2432 2433 /* Size is determined by what has been committed */ 2434 static __always_inline unsigned rb_page_size(struct buffer_page *bpage) 2435 { 2436 return rb_page_commit(bpage); 2437 } 2438 2439 static __always_inline unsigned 2440 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) 2441 { 2442 return rb_page_commit(cpu_buffer->commit_page); 2443 } 2444 2445 static __always_inline unsigned 2446 rb_event_index(struct ring_buffer_event *event) 2447 { 2448 unsigned long addr = (unsigned long)event; 2449 2450 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; 2451 } 2452 2453 static void rb_inc_iter(struct ring_buffer_iter *iter) 2454 { 2455 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 2456 2457 /* 2458 * The iterator could be on the reader page (it starts there). 2459 * But the head could have moved, since the reader was 2460 * found. Check for this case and assign the iterator 2461 * to the head page instead of next. 2462 */ 2463 if (iter->head_page == cpu_buffer->reader_page) 2464 iter->head_page = rb_set_head_page(cpu_buffer); 2465 else 2466 rb_inc_page(&iter->head_page); 2467 2468 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; 2469 iter->head = 0; 2470 iter->next_event = 0; 2471 } 2472 2473 /* 2474 * rb_handle_head_page - writer hit the head page 2475 * 2476 * Returns: +1 to retry page 2477 * 0 to continue 2478 * -1 on error 2479 */ 2480 static int 2481 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, 2482 struct buffer_page *tail_page, 2483 struct buffer_page *next_page) 2484 { 2485 struct buffer_page *new_head; 2486 int entries; 2487 int type; 2488 int ret; 2489 2490 entries = rb_page_entries(next_page); 2491 2492 /* 2493 * The hard part is here. We need to move the head 2494 * forward, and protect against both readers on 2495 * other CPUs and writers coming in via interrupts. 2496 */ 2497 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, 2498 RB_PAGE_HEAD); 2499 2500 /* 2501 * type can be one of four: 2502 * NORMAL - an interrupt already moved it for us 2503 * HEAD - we are the first to get here. 2504 * UPDATE - we are the interrupt interrupting 2505 * a current move. 2506 * MOVED - a reader on another CPU moved the next 2507 * pointer to its reader page. Give up 2508 * and try again. 2509 */ 2510 2511 switch (type) { 2512 case RB_PAGE_HEAD: 2513 /* 2514 * We changed the head to UPDATE, thus 2515 * it is our responsibility to update 2516 * the counters. 2517 */ 2518 local_add(entries, &cpu_buffer->overrun); 2519 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); 2520 local_inc(&cpu_buffer->pages_lost); 2521 2522 /* 2523 * The entries will be zeroed out when we move the 2524 * tail page. 2525 */ 2526 2527 /* still more to do */ 2528 break; 2529 2530 case RB_PAGE_UPDATE: 2531 /* 2532 * This is an interrupt that interrupt the 2533 * previous update. Still more to do. 2534 */ 2535 break; 2536 case RB_PAGE_NORMAL: 2537 /* 2538 * An interrupt came in before the update 2539 * and processed this for us. 2540 * Nothing left to do. 2541 */ 2542 return 1; 2543 case RB_PAGE_MOVED: 2544 /* 2545 * The reader is on another CPU and just did 2546 * a swap with our next_page. 2547 * Try again. 2548 */ 2549 return 1; 2550 default: 2551 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ 2552 return -1; 2553 } 2554 2555 /* 2556 * Now that we are here, the old head pointer is 2557 * set to UPDATE. This will keep the reader from 2558 * swapping the head page with the reader page. 2559 * The reader (on another CPU) will spin till 2560 * we are finished. 2561 * 2562 * We just need to protect against interrupts 2563 * doing the job. We will set the next pointer 2564 * to HEAD. After that, we set the old pointer 2565 * to NORMAL, but only if it was HEAD before. 2566 * otherwise we are an interrupt, and only 2567 * want the outer most commit to reset it. 2568 */ 2569 new_head = next_page; 2570 rb_inc_page(&new_head); 2571 2572 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, 2573 RB_PAGE_NORMAL); 2574 2575 /* 2576 * Valid returns are: 2577 * HEAD - an interrupt came in and already set it. 2578 * NORMAL - One of two things: 2579 * 1) We really set it. 2580 * 2) A bunch of interrupts came in and moved 2581 * the page forward again. 2582 */ 2583 switch (ret) { 2584 case RB_PAGE_HEAD: 2585 case RB_PAGE_NORMAL: 2586 /* OK */ 2587 break; 2588 default: 2589 RB_WARN_ON(cpu_buffer, 1); 2590 return -1; 2591 } 2592 2593 /* 2594 * It is possible that an interrupt came in, 2595 * set the head up, then more interrupts came in 2596 * and moved it again. When we get back here, 2597 * the page would have been set to NORMAL but we 2598 * just set it back to HEAD. 2599 * 2600 * How do you detect this? Well, if that happened 2601 * the tail page would have moved. 2602 */ 2603 if (ret == RB_PAGE_NORMAL) { 2604 struct buffer_page *buffer_tail_page; 2605 2606 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); 2607 /* 2608 * If the tail had moved passed next, then we need 2609 * to reset the pointer. 2610 */ 2611 if (buffer_tail_page != tail_page && 2612 buffer_tail_page != next_page) 2613 rb_head_page_set_normal(cpu_buffer, new_head, 2614 next_page, 2615 RB_PAGE_HEAD); 2616 } 2617 2618 /* 2619 * If this was the outer most commit (the one that 2620 * changed the original pointer from HEAD to UPDATE), 2621 * then it is up to us to reset it to NORMAL. 2622 */ 2623 if (type == RB_PAGE_HEAD) { 2624 ret = rb_head_page_set_normal(cpu_buffer, next_page, 2625 tail_page, 2626 RB_PAGE_UPDATE); 2627 if (RB_WARN_ON(cpu_buffer, 2628 ret != RB_PAGE_UPDATE)) 2629 return -1; 2630 } 2631 2632 return 0; 2633 } 2634 2635 static inline void 2636 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, 2637 unsigned long tail, struct rb_event_info *info) 2638 { 2639 struct buffer_page *tail_page = info->tail_page; 2640 struct ring_buffer_event *event; 2641 unsigned long length = info->length; 2642 2643 /* 2644 * Only the event that crossed the page boundary 2645 * must fill the old tail_page with padding. 2646 */ 2647 if (tail >= BUF_PAGE_SIZE) { 2648 /* 2649 * If the page was filled, then we still need 2650 * to update the real_end. Reset it to zero 2651 * and the reader will ignore it. 2652 */ 2653 if (tail == BUF_PAGE_SIZE) 2654 tail_page->real_end = 0; 2655 2656 local_sub(length, &tail_page->write); 2657 return; 2658 } 2659 2660 event = __rb_page_index(tail_page, tail); 2661 2662 /* account for padding bytes */ 2663 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); 2664 2665 /* 2666 * Save the original length to the meta data. 2667 * This will be used by the reader to add lost event 2668 * counter. 2669 */ 2670 tail_page->real_end = tail; 2671 2672 /* 2673 * If this event is bigger than the minimum size, then 2674 * we need to be careful that we don't subtract the 2675 * write counter enough to allow another writer to slip 2676 * in on this page. 2677 * We put in a discarded commit instead, to make sure 2678 * that this space is not used again. 2679 * 2680 * If we are less than the minimum size, we don't need to 2681 * worry about it. 2682 */ 2683 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) { 2684 /* No room for any events */ 2685 2686 /* Mark the rest of the page with padding */ 2687 rb_event_set_padding(event); 2688 2689 /* Make sure the padding is visible before the write update */ 2690 smp_wmb(); 2691 2692 /* Set the write back to the previous setting */ 2693 local_sub(length, &tail_page->write); 2694 return; 2695 } 2696 2697 /* Put in a discarded event */ 2698 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; 2699 event->type_len = RINGBUF_TYPE_PADDING; 2700 /* time delta must be non zero */ 2701 event->time_delta = 1; 2702 2703 /* Make sure the padding is visible before the tail_page->write update */ 2704 smp_wmb(); 2705 2706 /* Set write to end of buffer */ 2707 length = (tail + length) - BUF_PAGE_SIZE; 2708 local_sub(length, &tail_page->write); 2709 } 2710 2711 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer); 2712 2713 /* 2714 * This is the slow path, force gcc not to inline it. 2715 */ 2716 static noinline struct ring_buffer_event * 2717 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, 2718 unsigned long tail, struct rb_event_info *info) 2719 { 2720 struct buffer_page *tail_page = info->tail_page; 2721 struct buffer_page *commit_page = cpu_buffer->commit_page; 2722 struct trace_buffer *buffer = cpu_buffer->buffer; 2723 struct buffer_page *next_page; 2724 int ret; 2725 2726 next_page = tail_page; 2727 2728 rb_inc_page(&next_page); 2729 2730 /* 2731 * If for some reason, we had an interrupt storm that made 2732 * it all the way around the buffer, bail, and warn 2733 * about it. 2734 */ 2735 if (unlikely(next_page == commit_page)) { 2736 local_inc(&cpu_buffer->commit_overrun); 2737 goto out_reset; 2738 } 2739 2740 /* 2741 * This is where the fun begins! 2742 * 2743 * We are fighting against races between a reader that 2744 * could be on another CPU trying to swap its reader 2745 * page with the buffer head. 2746 * 2747 * We are also fighting against interrupts coming in and 2748 * moving the head or tail on us as well. 2749 * 2750 * If the next page is the head page then we have filled 2751 * the buffer, unless the commit page is still on the 2752 * reader page. 2753 */ 2754 if (rb_is_head_page(next_page, &tail_page->list)) { 2755 2756 /* 2757 * If the commit is not on the reader page, then 2758 * move the header page. 2759 */ 2760 if (!rb_is_reader_page(cpu_buffer->commit_page)) { 2761 /* 2762 * If we are not in overwrite mode, 2763 * this is easy, just stop here. 2764 */ 2765 if (!(buffer->flags & RB_FL_OVERWRITE)) { 2766 local_inc(&cpu_buffer->dropped_events); 2767 goto out_reset; 2768 } 2769 2770 ret = rb_handle_head_page(cpu_buffer, 2771 tail_page, 2772 next_page); 2773 if (ret < 0) 2774 goto out_reset; 2775 if (ret) 2776 goto out_again; 2777 } else { 2778 /* 2779 * We need to be careful here too. The 2780 * commit page could still be on the reader 2781 * page. We could have a small buffer, and 2782 * have filled up the buffer with events 2783 * from interrupts and such, and wrapped. 2784 * 2785 * Note, if the tail page is also on the 2786 * reader_page, we let it move out. 2787 */ 2788 if (unlikely((cpu_buffer->commit_page != 2789 cpu_buffer->tail_page) && 2790 (cpu_buffer->commit_page == 2791 cpu_buffer->reader_page))) { 2792 local_inc(&cpu_buffer->commit_overrun); 2793 goto out_reset; 2794 } 2795 } 2796 } 2797 2798 rb_tail_page_update(cpu_buffer, tail_page, next_page); 2799 2800 out_again: 2801 2802 rb_reset_tail(cpu_buffer, tail, info); 2803 2804 /* Commit what we have for now. */ 2805 rb_end_commit(cpu_buffer); 2806 /* rb_end_commit() decs committing */ 2807 local_inc(&cpu_buffer->committing); 2808 2809 /* fail and let the caller try again */ 2810 return ERR_PTR(-EAGAIN); 2811 2812 out_reset: 2813 /* reset write */ 2814 rb_reset_tail(cpu_buffer, tail, info); 2815 2816 return NULL; 2817 } 2818 2819 /* Slow path */ 2820 static struct ring_buffer_event * 2821 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs) 2822 { 2823 if (abs) 2824 event->type_len = RINGBUF_TYPE_TIME_STAMP; 2825 else 2826 event->type_len = RINGBUF_TYPE_TIME_EXTEND; 2827 2828 /* Not the first event on the page, or not delta? */ 2829 if (abs || rb_event_index(event)) { 2830 event->time_delta = delta & TS_MASK; 2831 event->array[0] = delta >> TS_SHIFT; 2832 } else { 2833 /* nope, just zero it */ 2834 event->time_delta = 0; 2835 event->array[0] = 0; 2836 } 2837 2838 return skip_time_extend(event); 2839 } 2840 2841 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 2842 static inline bool sched_clock_stable(void) 2843 { 2844 return true; 2845 } 2846 #endif 2847 2848 static void 2849 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer, 2850 struct rb_event_info *info) 2851 { 2852 u64 write_stamp; 2853 2854 WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s", 2855 (unsigned long long)info->delta, 2856 (unsigned long long)info->ts, 2857 (unsigned long long)info->before, 2858 (unsigned long long)info->after, 2859 (unsigned long long)(rb_time_read(&cpu_buffer->write_stamp, &write_stamp) ? write_stamp : 0), 2860 sched_clock_stable() ? "" : 2861 "If you just came from a suspend/resume,\n" 2862 "please switch to the trace global clock:\n" 2863 " echo global > /sys/kernel/tracing/trace_clock\n" 2864 "or add trace_clock=global to the kernel command line\n"); 2865 } 2866 2867 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer, 2868 struct ring_buffer_event **event, 2869 struct rb_event_info *info, 2870 u64 *delta, 2871 unsigned int *length) 2872 { 2873 bool abs = info->add_timestamp & 2874 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE); 2875 2876 if (unlikely(info->delta > (1ULL << 59))) { 2877 /* 2878 * Some timers can use more than 59 bits, and when a timestamp 2879 * is added to the buffer, it will lose those bits. 2880 */ 2881 if (abs && (info->ts & TS_MSB)) { 2882 info->delta &= ABS_TS_MASK; 2883 2884 /* did the clock go backwards */ 2885 } else if (info->before == info->after && info->before > info->ts) { 2886 /* not interrupted */ 2887 static int once; 2888 2889 /* 2890 * This is possible with a recalibrating of the TSC. 2891 * Do not produce a call stack, but just report it. 2892 */ 2893 if (!once) { 2894 once++; 2895 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n", 2896 info->before, info->ts); 2897 } 2898 } else 2899 rb_check_timestamp(cpu_buffer, info); 2900 if (!abs) 2901 info->delta = 0; 2902 } 2903 *event = rb_add_time_stamp(*event, info->delta, abs); 2904 *length -= RB_LEN_TIME_EXTEND; 2905 *delta = 0; 2906 } 2907 2908 /** 2909 * rb_update_event - update event type and data 2910 * @cpu_buffer: The per cpu buffer of the @event 2911 * @event: the event to update 2912 * @info: The info to update the @event with (contains length and delta) 2913 * 2914 * Update the type and data fields of the @event. The length 2915 * is the actual size that is written to the ring buffer, 2916 * and with this, we can determine what to place into the 2917 * data field. 2918 */ 2919 static void 2920 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, 2921 struct ring_buffer_event *event, 2922 struct rb_event_info *info) 2923 { 2924 unsigned length = info->length; 2925 u64 delta = info->delta; 2926 unsigned int nest = local_read(&cpu_buffer->committing) - 1; 2927 2928 if (!WARN_ON_ONCE(nest >= MAX_NEST)) 2929 cpu_buffer->event_stamp[nest] = info->ts; 2930 2931 /* 2932 * If we need to add a timestamp, then we 2933 * add it to the start of the reserved space. 2934 */ 2935 if (unlikely(info->add_timestamp)) 2936 rb_add_timestamp(cpu_buffer, &event, info, &delta, &length); 2937 2938 event->time_delta = delta; 2939 length -= RB_EVNT_HDR_SIZE; 2940 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) { 2941 event->type_len = 0; 2942 event->array[0] = length; 2943 } else 2944 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); 2945 } 2946 2947 static unsigned rb_calculate_event_length(unsigned length) 2948 { 2949 struct ring_buffer_event event; /* Used only for sizeof array */ 2950 2951 /* zero length can cause confusions */ 2952 if (!length) 2953 length++; 2954 2955 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) 2956 length += sizeof(event.array[0]); 2957 2958 length += RB_EVNT_HDR_SIZE; 2959 length = ALIGN(length, RB_ARCH_ALIGNMENT); 2960 2961 /* 2962 * In case the time delta is larger than the 27 bits for it 2963 * in the header, we need to add a timestamp. If another 2964 * event comes in when trying to discard this one to increase 2965 * the length, then the timestamp will be added in the allocated 2966 * space of this event. If length is bigger than the size needed 2967 * for the TIME_EXTEND, then padding has to be used. The events 2968 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal 2969 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding. 2970 * As length is a multiple of 4, we only need to worry if it 2971 * is 12 (RB_LEN_TIME_EXTEND + 4). 2972 */ 2973 if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT) 2974 length += RB_ALIGNMENT; 2975 2976 return length; 2977 } 2978 2979 static u64 rb_time_delta(struct ring_buffer_event *event) 2980 { 2981 switch (event->type_len) { 2982 case RINGBUF_TYPE_PADDING: 2983 return 0; 2984 2985 case RINGBUF_TYPE_TIME_EXTEND: 2986 return rb_event_time_stamp(event); 2987 2988 case RINGBUF_TYPE_TIME_STAMP: 2989 return 0; 2990 2991 case RINGBUF_TYPE_DATA: 2992 return event->time_delta; 2993 default: 2994 return 0; 2995 } 2996 } 2997 2998 static inline int 2999 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, 3000 struct ring_buffer_event *event) 3001 { 3002 unsigned long new_index, old_index; 3003 struct buffer_page *bpage; 3004 unsigned long index; 3005 unsigned long addr; 3006 u64 write_stamp; 3007 u64 delta; 3008 3009 new_index = rb_event_index(event); 3010 old_index = new_index + rb_event_ts_length(event); 3011 addr = (unsigned long)event; 3012 addr &= PAGE_MASK; 3013 3014 bpage = READ_ONCE(cpu_buffer->tail_page); 3015 3016 delta = rb_time_delta(event); 3017 3018 if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp)) 3019 return 0; 3020 3021 /* Make sure the write stamp is read before testing the location */ 3022 barrier(); 3023 3024 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { 3025 unsigned long write_mask = 3026 local_read(&bpage->write) & ~RB_WRITE_MASK; 3027 unsigned long event_length = rb_event_length(event); 3028 3029 /* Something came in, can't discard */ 3030 if (!rb_time_cmpxchg(&cpu_buffer->write_stamp, 3031 write_stamp, write_stamp - delta)) 3032 return 0; 3033 3034 /* 3035 * It's possible that the event time delta is zero 3036 * (has the same time stamp as the previous event) 3037 * in which case write_stamp and before_stamp could 3038 * be the same. In such a case, force before_stamp 3039 * to be different than write_stamp. It doesn't 3040 * matter what it is, as long as its different. 3041 */ 3042 if (!delta) 3043 rb_time_set(&cpu_buffer->before_stamp, 0); 3044 3045 /* 3046 * If an event were to come in now, it would see that the 3047 * write_stamp and the before_stamp are different, and assume 3048 * that this event just added itself before updating 3049 * the write stamp. The interrupting event will fix the 3050 * write stamp for us, and use the before stamp as its delta. 3051 */ 3052 3053 /* 3054 * This is on the tail page. It is possible that 3055 * a write could come in and move the tail page 3056 * and write to the next page. That is fine 3057 * because we just shorten what is on this page. 3058 */ 3059 old_index += write_mask; 3060 new_index += write_mask; 3061 index = local_cmpxchg(&bpage->write, old_index, new_index); 3062 if (index == old_index) { 3063 /* update counters */ 3064 local_sub(event_length, &cpu_buffer->entries_bytes); 3065 return 1; 3066 } 3067 } 3068 3069 /* could not discard */ 3070 return 0; 3071 } 3072 3073 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) 3074 { 3075 local_inc(&cpu_buffer->committing); 3076 local_inc(&cpu_buffer->commits); 3077 } 3078 3079 static __always_inline void 3080 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) 3081 { 3082 unsigned long max_count; 3083 3084 /* 3085 * We only race with interrupts and NMIs on this CPU. 3086 * If we own the commit event, then we can commit 3087 * all others that interrupted us, since the interruptions 3088 * are in stack format (they finish before they come 3089 * back to us). This allows us to do a simple loop to 3090 * assign the commit to the tail. 3091 */ 3092 again: 3093 max_count = cpu_buffer->nr_pages * 100; 3094 3095 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { 3096 if (RB_WARN_ON(cpu_buffer, !(--max_count))) 3097 return; 3098 if (RB_WARN_ON(cpu_buffer, 3099 rb_is_reader_page(cpu_buffer->tail_page))) 3100 return; 3101 /* 3102 * No need for a memory barrier here, as the update 3103 * of the tail_page did it for this page. 3104 */ 3105 local_set(&cpu_buffer->commit_page->page->commit, 3106 rb_page_write(cpu_buffer->commit_page)); 3107 rb_inc_page(&cpu_buffer->commit_page); 3108 /* add barrier to keep gcc from optimizing too much */ 3109 barrier(); 3110 } 3111 while (rb_commit_index(cpu_buffer) != 3112 rb_page_write(cpu_buffer->commit_page)) { 3113 3114 /* Make sure the readers see the content of what is committed. */ 3115 smp_wmb(); 3116 local_set(&cpu_buffer->commit_page->page->commit, 3117 rb_page_write(cpu_buffer->commit_page)); 3118 RB_WARN_ON(cpu_buffer, 3119 local_read(&cpu_buffer->commit_page->page->commit) & 3120 ~RB_WRITE_MASK); 3121 barrier(); 3122 } 3123 3124 /* again, keep gcc from optimizing */ 3125 barrier(); 3126 3127 /* 3128 * If an interrupt came in just after the first while loop 3129 * and pushed the tail page forward, we will be left with 3130 * a dangling commit that will never go forward. 3131 */ 3132 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) 3133 goto again; 3134 } 3135 3136 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) 3137 { 3138 unsigned long commits; 3139 3140 if (RB_WARN_ON(cpu_buffer, 3141 !local_read(&cpu_buffer->committing))) 3142 return; 3143 3144 again: 3145 commits = local_read(&cpu_buffer->commits); 3146 /* synchronize with interrupts */ 3147 barrier(); 3148 if (local_read(&cpu_buffer->committing) == 1) 3149 rb_set_commit_to_write(cpu_buffer); 3150 3151 local_dec(&cpu_buffer->committing); 3152 3153 /* synchronize with interrupts */ 3154 barrier(); 3155 3156 /* 3157 * Need to account for interrupts coming in between the 3158 * updating of the commit page and the clearing of the 3159 * committing counter. 3160 */ 3161 if (unlikely(local_read(&cpu_buffer->commits) != commits) && 3162 !local_read(&cpu_buffer->committing)) { 3163 local_inc(&cpu_buffer->committing); 3164 goto again; 3165 } 3166 } 3167 3168 static inline void rb_event_discard(struct ring_buffer_event *event) 3169 { 3170 if (extended_time(event)) 3171 event = skip_time_extend(event); 3172 3173 /* array[0] holds the actual length for the discarded event */ 3174 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; 3175 event->type_len = RINGBUF_TYPE_PADDING; 3176 /* time delta must be non zero */ 3177 if (!event->time_delta) 3178 event->time_delta = 1; 3179 } 3180 3181 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer) 3182 { 3183 local_inc(&cpu_buffer->entries); 3184 rb_end_commit(cpu_buffer); 3185 } 3186 3187 static __always_inline void 3188 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) 3189 { 3190 if (buffer->irq_work.waiters_pending) { 3191 buffer->irq_work.waiters_pending = false; 3192 /* irq_work_queue() supplies it's own memory barriers */ 3193 irq_work_queue(&buffer->irq_work.work); 3194 } 3195 3196 if (cpu_buffer->irq_work.waiters_pending) { 3197 cpu_buffer->irq_work.waiters_pending = false; 3198 /* irq_work_queue() supplies it's own memory barriers */ 3199 irq_work_queue(&cpu_buffer->irq_work.work); 3200 } 3201 3202 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched)) 3203 return; 3204 3205 if (cpu_buffer->reader_page == cpu_buffer->commit_page) 3206 return; 3207 3208 if (!cpu_buffer->irq_work.full_waiters_pending) 3209 return; 3210 3211 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); 3212 3213 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) 3214 return; 3215 3216 cpu_buffer->irq_work.wakeup_full = true; 3217 cpu_buffer->irq_work.full_waiters_pending = false; 3218 /* irq_work_queue() supplies it's own memory barriers */ 3219 irq_work_queue(&cpu_buffer->irq_work.work); 3220 } 3221 3222 #ifdef CONFIG_RING_BUFFER_RECORD_RECURSION 3223 # define do_ring_buffer_record_recursion() \ 3224 do_ftrace_record_recursion(_THIS_IP_, _RET_IP_) 3225 #else 3226 # define do_ring_buffer_record_recursion() do { } while (0) 3227 #endif 3228 3229 /* 3230 * The lock and unlock are done within a preempt disable section. 3231 * The current_context per_cpu variable can only be modified 3232 * by the current task between lock and unlock. But it can 3233 * be modified more than once via an interrupt. To pass this 3234 * information from the lock to the unlock without having to 3235 * access the 'in_interrupt()' functions again (which do show 3236 * a bit of overhead in something as critical as function tracing, 3237 * we use a bitmask trick. 3238 * 3239 * bit 1 = NMI context 3240 * bit 2 = IRQ context 3241 * bit 3 = SoftIRQ context 3242 * bit 4 = normal context. 3243 * 3244 * This works because this is the order of contexts that can 3245 * preempt other contexts. A SoftIRQ never preempts an IRQ 3246 * context. 3247 * 3248 * When the context is determined, the corresponding bit is 3249 * checked and set (if it was set, then a recursion of that context 3250 * happened). 3251 * 3252 * On unlock, we need to clear this bit. To do so, just subtract 3253 * 1 from the current_context and AND it to itself. 3254 * 3255 * (binary) 3256 * 101 - 1 = 100 3257 * 101 & 100 = 100 (clearing bit zero) 3258 * 3259 * 1010 - 1 = 1001 3260 * 1010 & 1001 = 1000 (clearing bit 1) 3261 * 3262 * The least significant bit can be cleared this way, and it 3263 * just so happens that it is the same bit corresponding to 3264 * the current context. 3265 * 3266 * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit 3267 * is set when a recursion is detected at the current context, and if 3268 * the TRANSITION bit is already set, it will fail the recursion. 3269 * This is needed because there's a lag between the changing of 3270 * interrupt context and updating the preempt count. In this case, 3271 * a false positive will be found. To handle this, one extra recursion 3272 * is allowed, and this is done by the TRANSITION bit. If the TRANSITION 3273 * bit is already set, then it is considered a recursion and the function 3274 * ends. Otherwise, the TRANSITION bit is set, and that bit is returned. 3275 * 3276 * On the trace_recursive_unlock(), the TRANSITION bit will be the first 3277 * to be cleared. Even if it wasn't the context that set it. That is, 3278 * if an interrupt comes in while NORMAL bit is set and the ring buffer 3279 * is called before preempt_count() is updated, since the check will 3280 * be on the NORMAL bit, the TRANSITION bit will then be set. If an 3281 * NMI then comes in, it will set the NMI bit, but when the NMI code 3282 * does the trace_recursive_unlock() it will clear the TRANSITION bit 3283 * and leave the NMI bit set. But this is fine, because the interrupt 3284 * code that set the TRANSITION bit will then clear the NMI bit when it 3285 * calls trace_recursive_unlock(). If another NMI comes in, it will 3286 * set the TRANSITION bit and continue. 3287 * 3288 * Note: The TRANSITION bit only handles a single transition between context. 3289 */ 3290 3291 static __always_inline int 3292 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) 3293 { 3294 unsigned int val = cpu_buffer->current_context; 3295 int bit = interrupt_context_level(); 3296 3297 bit = RB_CTX_NORMAL - bit; 3298 3299 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { 3300 /* 3301 * It is possible that this was called by transitioning 3302 * between interrupt context, and preempt_count() has not 3303 * been updated yet. In this case, use the TRANSITION bit. 3304 */ 3305 bit = RB_CTX_TRANSITION; 3306 if (val & (1 << (bit + cpu_buffer->nest))) { 3307 do_ring_buffer_record_recursion(); 3308 return 1; 3309 } 3310 } 3311 3312 val |= (1 << (bit + cpu_buffer->nest)); 3313 cpu_buffer->current_context = val; 3314 3315 return 0; 3316 } 3317 3318 static __always_inline void 3319 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) 3320 { 3321 cpu_buffer->current_context &= 3322 cpu_buffer->current_context - (1 << cpu_buffer->nest); 3323 } 3324 3325 /* The recursive locking above uses 5 bits */ 3326 #define NESTED_BITS 5 3327 3328 /** 3329 * ring_buffer_nest_start - Allow to trace while nested 3330 * @buffer: The ring buffer to modify 3331 * 3332 * The ring buffer has a safety mechanism to prevent recursion. 3333 * But there may be a case where a trace needs to be done while 3334 * tracing something else. In this case, calling this function 3335 * will allow this function to nest within a currently active 3336 * ring_buffer_lock_reserve(). 3337 * 3338 * Call this function before calling another ring_buffer_lock_reserve() and 3339 * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit(). 3340 */ 3341 void ring_buffer_nest_start(struct trace_buffer *buffer) 3342 { 3343 struct ring_buffer_per_cpu *cpu_buffer; 3344 int cpu; 3345 3346 /* Enabled by ring_buffer_nest_end() */ 3347 preempt_disable_notrace(); 3348 cpu = raw_smp_processor_id(); 3349 cpu_buffer = buffer->buffers[cpu]; 3350 /* This is the shift value for the above recursive locking */ 3351 cpu_buffer->nest += NESTED_BITS; 3352 } 3353 3354 /** 3355 * ring_buffer_nest_end - Allow to trace while nested 3356 * @buffer: The ring buffer to modify 3357 * 3358 * Must be called after ring_buffer_nest_start() and after the 3359 * ring_buffer_unlock_commit(). 3360 */ 3361 void ring_buffer_nest_end(struct trace_buffer *buffer) 3362 { 3363 struct ring_buffer_per_cpu *cpu_buffer; 3364 int cpu; 3365 3366 /* disabled by ring_buffer_nest_start() */ 3367 cpu = raw_smp_processor_id(); 3368 cpu_buffer = buffer->buffers[cpu]; 3369 /* This is the shift value for the above recursive locking */ 3370 cpu_buffer->nest -= NESTED_BITS; 3371 preempt_enable_notrace(); 3372 } 3373 3374 /** 3375 * ring_buffer_unlock_commit - commit a reserved 3376 * @buffer: The buffer to commit to 3377 * @event: The event pointer to commit. 3378 * 3379 * This commits the data to the ring buffer, and releases any locks held. 3380 * 3381 * Must be paired with ring_buffer_lock_reserve. 3382 */ 3383 int ring_buffer_unlock_commit(struct trace_buffer *buffer) 3384 { 3385 struct ring_buffer_per_cpu *cpu_buffer; 3386 int cpu = raw_smp_processor_id(); 3387 3388 cpu_buffer = buffer->buffers[cpu]; 3389 3390 rb_commit(cpu_buffer); 3391 3392 rb_wakeups(buffer, cpu_buffer); 3393 3394 trace_recursive_unlock(cpu_buffer); 3395 3396 preempt_enable_notrace(); 3397 3398 return 0; 3399 } 3400 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); 3401 3402 /* Special value to validate all deltas on a page. */ 3403 #define CHECK_FULL_PAGE 1L 3404 3405 #ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS 3406 static void dump_buffer_page(struct buffer_data_page *bpage, 3407 struct rb_event_info *info, 3408 unsigned long tail) 3409 { 3410 struct ring_buffer_event *event; 3411 u64 ts, delta; 3412 int e; 3413 3414 ts = bpage->time_stamp; 3415 pr_warn(" [%lld] PAGE TIME STAMP\n", ts); 3416 3417 for (e = 0; e < tail; e += rb_event_length(event)) { 3418 3419 event = (struct ring_buffer_event *)(bpage->data + e); 3420 3421 switch (event->type_len) { 3422 3423 case RINGBUF_TYPE_TIME_EXTEND: 3424 delta = rb_event_time_stamp(event); 3425 ts += delta; 3426 pr_warn(" [%lld] delta:%lld TIME EXTEND\n", ts, delta); 3427 break; 3428 3429 case RINGBUF_TYPE_TIME_STAMP: 3430 delta = rb_event_time_stamp(event); 3431 ts = rb_fix_abs_ts(delta, ts); 3432 pr_warn(" [%lld] absolute:%lld TIME STAMP\n", ts, delta); 3433 break; 3434 3435 case RINGBUF_TYPE_PADDING: 3436 ts += event->time_delta; 3437 pr_warn(" [%lld] delta:%d PADDING\n", ts, event->time_delta); 3438 break; 3439 3440 case RINGBUF_TYPE_DATA: 3441 ts += event->time_delta; 3442 pr_warn(" [%lld] delta:%d\n", ts, event->time_delta); 3443 break; 3444 3445 default: 3446 break; 3447 } 3448 } 3449 } 3450 3451 static DEFINE_PER_CPU(atomic_t, checking); 3452 static atomic_t ts_dump; 3453 3454 /* 3455 * Check if the current event time stamp matches the deltas on 3456 * the buffer page. 3457 */ 3458 static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, 3459 struct rb_event_info *info, 3460 unsigned long tail) 3461 { 3462 struct ring_buffer_event *event; 3463 struct buffer_data_page *bpage; 3464 u64 ts, delta; 3465 bool full = false; 3466 int e; 3467 3468 bpage = info->tail_page->page; 3469 3470 if (tail == CHECK_FULL_PAGE) { 3471 full = true; 3472 tail = local_read(&bpage->commit); 3473 } else if (info->add_timestamp & 3474 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) { 3475 /* Ignore events with absolute time stamps */ 3476 return; 3477 } 3478 3479 /* 3480 * Do not check the first event (skip possible extends too). 3481 * Also do not check if previous events have not been committed. 3482 */ 3483 if (tail <= 8 || tail > local_read(&bpage->commit)) 3484 return; 3485 3486 /* 3487 * If this interrupted another event, 3488 */ 3489 if (atomic_inc_return(this_cpu_ptr(&checking)) != 1) 3490 goto out; 3491 3492 ts = bpage->time_stamp; 3493 3494 for (e = 0; e < tail; e += rb_event_length(event)) { 3495 3496 event = (struct ring_buffer_event *)(bpage->data + e); 3497 3498 switch (event->type_len) { 3499 3500 case RINGBUF_TYPE_TIME_EXTEND: 3501 delta = rb_event_time_stamp(event); 3502 ts += delta; 3503 break; 3504 3505 case RINGBUF_TYPE_TIME_STAMP: 3506 delta = rb_event_time_stamp(event); 3507 ts = rb_fix_abs_ts(delta, ts); 3508 break; 3509 3510 case RINGBUF_TYPE_PADDING: 3511 if (event->time_delta == 1) 3512 break; 3513 fallthrough; 3514 case RINGBUF_TYPE_DATA: 3515 ts += event->time_delta; 3516 break; 3517 3518 default: 3519 RB_WARN_ON(cpu_buffer, 1); 3520 } 3521 } 3522 if ((full && ts > info->ts) || 3523 (!full && ts + info->delta != info->ts)) { 3524 /* If another report is happening, ignore this one */ 3525 if (atomic_inc_return(&ts_dump) != 1) { 3526 atomic_dec(&ts_dump); 3527 goto out; 3528 } 3529 atomic_inc(&cpu_buffer->record_disabled); 3530 /* There's some cases in boot up that this can happen */ 3531 WARN_ON_ONCE(system_state != SYSTEM_BOOTING); 3532 pr_warn("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s\n", 3533 cpu_buffer->cpu, 3534 ts + info->delta, info->ts, info->delta, 3535 info->before, info->after, 3536 full ? " (full)" : ""); 3537 dump_buffer_page(bpage, info, tail); 3538 atomic_dec(&ts_dump); 3539 /* Do not re-enable checking */ 3540 return; 3541 } 3542 out: 3543 atomic_dec(this_cpu_ptr(&checking)); 3544 } 3545 #else 3546 static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, 3547 struct rb_event_info *info, 3548 unsigned long tail) 3549 { 3550 } 3551 #endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */ 3552 3553 static struct ring_buffer_event * 3554 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 3555 struct rb_event_info *info) 3556 { 3557 struct ring_buffer_event *event; 3558 struct buffer_page *tail_page; 3559 unsigned long tail, write, w; 3560 bool a_ok; 3561 bool b_ok; 3562 3563 /* Don't let the compiler play games with cpu_buffer->tail_page */ 3564 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); 3565 3566 /*A*/ w = local_read(&tail_page->write) & RB_WRITE_MASK; 3567 barrier(); 3568 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); 3569 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3570 barrier(); 3571 info->ts = rb_time_stamp(cpu_buffer->buffer); 3572 3573 if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) { 3574 info->delta = info->ts; 3575 } else { 3576 /* 3577 * If interrupting an event time update, we may need an 3578 * absolute timestamp. 3579 * Don't bother if this is the start of a new page (w == 0). 3580 */ 3581 if (unlikely(!a_ok || !b_ok || (info->before != info->after && w))) { 3582 info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND; 3583 info->length += RB_LEN_TIME_EXTEND; 3584 } else { 3585 info->delta = info->ts - info->after; 3586 if (unlikely(test_time_stamp(info->delta))) { 3587 info->add_timestamp |= RB_ADD_STAMP_EXTEND; 3588 info->length += RB_LEN_TIME_EXTEND; 3589 } 3590 } 3591 } 3592 3593 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts); 3594 3595 /*C*/ write = local_add_return(info->length, &tail_page->write); 3596 3597 /* set write to only the index of the write */ 3598 write &= RB_WRITE_MASK; 3599 3600 tail = write - info->length; 3601 3602 /* See if we shot pass the end of this buffer page */ 3603 if (unlikely(write > BUF_PAGE_SIZE)) { 3604 /* before and after may now different, fix it up*/ 3605 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); 3606 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3607 if (a_ok && b_ok && info->before != info->after) 3608 (void)rb_time_cmpxchg(&cpu_buffer->before_stamp, 3609 info->before, info->after); 3610 if (a_ok && b_ok) 3611 check_buffer(cpu_buffer, info, CHECK_FULL_PAGE); 3612 return rb_move_tail(cpu_buffer, tail, info); 3613 } 3614 3615 if (likely(tail == w)) { 3616 u64 save_before; 3617 bool s_ok; 3618 3619 /* Nothing interrupted us between A and C */ 3620 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts); 3621 barrier(); 3622 /*E*/ s_ok = rb_time_read(&cpu_buffer->before_stamp, &save_before); 3623 RB_WARN_ON(cpu_buffer, !s_ok); 3624 if (likely(!(info->add_timestamp & 3625 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)))) 3626 /* This did not interrupt any time update */ 3627 info->delta = info->ts - info->after; 3628 else 3629 /* Just use full timestamp for interrupting event */ 3630 info->delta = info->ts; 3631 barrier(); 3632 check_buffer(cpu_buffer, info, tail); 3633 if (unlikely(info->ts != save_before)) { 3634 /* SLOW PATH - Interrupted between C and E */ 3635 3636 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3637 RB_WARN_ON(cpu_buffer, !a_ok); 3638 3639 /* Write stamp must only go forward */ 3640 if (save_before > info->after) { 3641 /* 3642 * We do not care about the result, only that 3643 * it gets updated atomically. 3644 */ 3645 (void)rb_time_cmpxchg(&cpu_buffer->write_stamp, 3646 info->after, save_before); 3647 } 3648 } 3649 } else { 3650 u64 ts; 3651 /* SLOW PATH - Interrupted between A and C */ 3652 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3653 /* Was interrupted before here, write_stamp must be valid */ 3654 RB_WARN_ON(cpu_buffer, !a_ok); 3655 ts = rb_time_stamp(cpu_buffer->buffer); 3656 barrier(); 3657 /*E*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) && 3658 info->after < ts && 3659 rb_time_cmpxchg(&cpu_buffer->write_stamp, 3660 info->after, ts)) { 3661 /* Nothing came after this event between C and E */ 3662 info->delta = ts - info->after; 3663 } else { 3664 /* 3665 * Interrupted between C and E: 3666 * Lost the previous events time stamp. Just set the 3667 * delta to zero, and this will be the same time as 3668 * the event this event interrupted. And the events that 3669 * came after this will still be correct (as they would 3670 * have built their delta on the previous event. 3671 */ 3672 info->delta = 0; 3673 } 3674 info->ts = ts; 3675 info->add_timestamp &= ~RB_ADD_STAMP_FORCE; 3676 } 3677 3678 /* 3679 * If this is the first commit on the page, then it has the same 3680 * timestamp as the page itself. 3681 */ 3682 if (unlikely(!tail && !(info->add_timestamp & 3683 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)))) 3684 info->delta = 0; 3685 3686 /* We reserved something on the buffer */ 3687 3688 event = __rb_page_index(tail_page, tail); 3689 rb_update_event(cpu_buffer, event, info); 3690 3691 local_inc(&tail_page->entries); 3692 3693 /* 3694 * If this is the first commit on the page, then update 3695 * its timestamp. 3696 */ 3697 if (unlikely(!tail)) 3698 tail_page->page->time_stamp = info->ts; 3699 3700 /* account for these added bytes */ 3701 local_add(info->length, &cpu_buffer->entries_bytes); 3702 3703 return event; 3704 } 3705 3706 static __always_inline struct ring_buffer_event * 3707 rb_reserve_next_event(struct trace_buffer *buffer, 3708 struct ring_buffer_per_cpu *cpu_buffer, 3709 unsigned long length) 3710 { 3711 struct ring_buffer_event *event; 3712 struct rb_event_info info; 3713 int nr_loops = 0; 3714 int add_ts_default; 3715 3716 rb_start_commit(cpu_buffer); 3717 /* The commit page can not change after this */ 3718 3719 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 3720 /* 3721 * Due to the ability to swap a cpu buffer from a buffer 3722 * it is possible it was swapped before we committed. 3723 * (committing stops a swap). We check for it here and 3724 * if it happened, we have to fail the write. 3725 */ 3726 barrier(); 3727 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { 3728 local_dec(&cpu_buffer->committing); 3729 local_dec(&cpu_buffer->commits); 3730 return NULL; 3731 } 3732 #endif 3733 3734 info.length = rb_calculate_event_length(length); 3735 3736 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { 3737 add_ts_default = RB_ADD_STAMP_ABSOLUTE; 3738 info.length += RB_LEN_TIME_EXTEND; 3739 } else { 3740 add_ts_default = RB_ADD_STAMP_NONE; 3741 } 3742 3743 again: 3744 info.add_timestamp = add_ts_default; 3745 info.delta = 0; 3746 3747 /* 3748 * We allow for interrupts to reenter here and do a trace. 3749 * If one does, it will cause this original code to loop 3750 * back here. Even with heavy interrupts happening, this 3751 * should only happen a few times in a row. If this happens 3752 * 1000 times in a row, there must be either an interrupt 3753 * storm or we have something buggy. 3754 * Bail! 3755 */ 3756 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) 3757 goto out_fail; 3758 3759 event = __rb_reserve_next(cpu_buffer, &info); 3760 3761 if (unlikely(PTR_ERR(event) == -EAGAIN)) { 3762 if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND)) 3763 info.length -= RB_LEN_TIME_EXTEND; 3764 goto again; 3765 } 3766 3767 if (likely(event)) 3768 return event; 3769 out_fail: 3770 rb_end_commit(cpu_buffer); 3771 return NULL; 3772 } 3773 3774 /** 3775 * ring_buffer_lock_reserve - reserve a part of the buffer 3776 * @buffer: the ring buffer to reserve from 3777 * @length: the length of the data to reserve (excluding event header) 3778 * 3779 * Returns a reserved event on the ring buffer to copy directly to. 3780 * The user of this interface will need to get the body to write into 3781 * and can use the ring_buffer_event_data() interface. 3782 * 3783 * The length is the length of the data needed, not the event length 3784 * which also includes the event header. 3785 * 3786 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned. 3787 * If NULL is returned, then nothing has been allocated or locked. 3788 */ 3789 struct ring_buffer_event * 3790 ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length) 3791 { 3792 struct ring_buffer_per_cpu *cpu_buffer; 3793 struct ring_buffer_event *event; 3794 int cpu; 3795 3796 /* If we are tracing schedule, we don't want to recurse */ 3797 preempt_disable_notrace(); 3798 3799 if (unlikely(atomic_read(&buffer->record_disabled))) 3800 goto out; 3801 3802 cpu = raw_smp_processor_id(); 3803 3804 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) 3805 goto out; 3806 3807 cpu_buffer = buffer->buffers[cpu]; 3808 3809 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) 3810 goto out; 3811 3812 if (unlikely(length > BUF_MAX_DATA_SIZE)) 3813 goto out; 3814 3815 if (unlikely(trace_recursive_lock(cpu_buffer))) 3816 goto out; 3817 3818 event = rb_reserve_next_event(buffer, cpu_buffer, length); 3819 if (!event) 3820 goto out_unlock; 3821 3822 return event; 3823 3824 out_unlock: 3825 trace_recursive_unlock(cpu_buffer); 3826 out: 3827 preempt_enable_notrace(); 3828 return NULL; 3829 } 3830 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); 3831 3832 /* 3833 * Decrement the entries to the page that an event is on. 3834 * The event does not even need to exist, only the pointer 3835 * to the page it is on. This may only be called before the commit 3836 * takes place. 3837 */ 3838 static inline void 3839 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, 3840 struct ring_buffer_event *event) 3841 { 3842 unsigned long addr = (unsigned long)event; 3843 struct buffer_page *bpage = cpu_buffer->commit_page; 3844 struct buffer_page *start; 3845 3846 addr &= PAGE_MASK; 3847 3848 /* Do the likely case first */ 3849 if (likely(bpage->page == (void *)addr)) { 3850 local_dec(&bpage->entries); 3851 return; 3852 } 3853 3854 /* 3855 * Because the commit page may be on the reader page we 3856 * start with the next page and check the end loop there. 3857 */ 3858 rb_inc_page(&bpage); 3859 start = bpage; 3860 do { 3861 if (bpage->page == (void *)addr) { 3862 local_dec(&bpage->entries); 3863 return; 3864 } 3865 rb_inc_page(&bpage); 3866 } while (bpage != start); 3867 3868 /* commit not part of this buffer?? */ 3869 RB_WARN_ON(cpu_buffer, 1); 3870 } 3871 3872 /** 3873 * ring_buffer_discard_commit - discard an event that has not been committed 3874 * @buffer: the ring buffer 3875 * @event: non committed event to discard 3876 * 3877 * Sometimes an event that is in the ring buffer needs to be ignored. 3878 * This function lets the user discard an event in the ring buffer 3879 * and then that event will not be read later. 3880 * 3881 * This function only works if it is called before the item has been 3882 * committed. It will try to free the event from the ring buffer 3883 * if another event has not been added behind it. 3884 * 3885 * If another event has been added behind it, it will set the event 3886 * up as discarded, and perform the commit. 3887 * 3888 * If this function is called, do not call ring_buffer_unlock_commit on 3889 * the event. 3890 */ 3891 void ring_buffer_discard_commit(struct trace_buffer *buffer, 3892 struct ring_buffer_event *event) 3893 { 3894 struct ring_buffer_per_cpu *cpu_buffer; 3895 int cpu; 3896 3897 /* The event is discarded regardless */ 3898 rb_event_discard(event); 3899 3900 cpu = smp_processor_id(); 3901 cpu_buffer = buffer->buffers[cpu]; 3902 3903 /* 3904 * This must only be called if the event has not been 3905 * committed yet. Thus we can assume that preemption 3906 * is still disabled. 3907 */ 3908 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); 3909 3910 rb_decrement_entry(cpu_buffer, event); 3911 if (rb_try_to_discard(cpu_buffer, event)) 3912 goto out; 3913 3914 out: 3915 rb_end_commit(cpu_buffer); 3916 3917 trace_recursive_unlock(cpu_buffer); 3918 3919 preempt_enable_notrace(); 3920 3921 } 3922 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); 3923 3924 /** 3925 * ring_buffer_write - write data to the buffer without reserving 3926 * @buffer: The ring buffer to write to. 3927 * @length: The length of the data being written (excluding the event header) 3928 * @data: The data to write to the buffer. 3929 * 3930 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as 3931 * one function. If you already have the data to write to the buffer, it 3932 * may be easier to simply call this function. 3933 * 3934 * Note, like ring_buffer_lock_reserve, the length is the length of the data 3935 * and not the length of the event which would hold the header. 3936 */ 3937 int ring_buffer_write(struct trace_buffer *buffer, 3938 unsigned long length, 3939 void *data) 3940 { 3941 struct ring_buffer_per_cpu *cpu_buffer; 3942 struct ring_buffer_event *event; 3943 void *body; 3944 int ret = -EBUSY; 3945 int cpu; 3946 3947 preempt_disable_notrace(); 3948 3949 if (atomic_read(&buffer->record_disabled)) 3950 goto out; 3951 3952 cpu = raw_smp_processor_id(); 3953 3954 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3955 goto out; 3956 3957 cpu_buffer = buffer->buffers[cpu]; 3958 3959 if (atomic_read(&cpu_buffer->record_disabled)) 3960 goto out; 3961 3962 if (length > BUF_MAX_DATA_SIZE) 3963 goto out; 3964 3965 if (unlikely(trace_recursive_lock(cpu_buffer))) 3966 goto out; 3967 3968 event = rb_reserve_next_event(buffer, cpu_buffer, length); 3969 if (!event) 3970 goto out_unlock; 3971 3972 body = rb_event_data(event); 3973 3974 memcpy(body, data, length); 3975 3976 rb_commit(cpu_buffer); 3977 3978 rb_wakeups(buffer, cpu_buffer); 3979 3980 ret = 0; 3981 3982 out_unlock: 3983 trace_recursive_unlock(cpu_buffer); 3984 3985 out: 3986 preempt_enable_notrace(); 3987 3988 return ret; 3989 } 3990 EXPORT_SYMBOL_GPL(ring_buffer_write); 3991 3992 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 3993 { 3994 struct buffer_page *reader = cpu_buffer->reader_page; 3995 struct buffer_page *head = rb_set_head_page(cpu_buffer); 3996 struct buffer_page *commit = cpu_buffer->commit_page; 3997 3998 /* In case of error, head will be NULL */ 3999 if (unlikely(!head)) 4000 return true; 4001 4002 /* Reader should exhaust content in reader page */ 4003 if (reader->read != rb_page_commit(reader)) 4004 return false; 4005 4006 /* 4007 * If writers are committing on the reader page, knowing all 4008 * committed content has been read, the ring buffer is empty. 4009 */ 4010 if (commit == reader) 4011 return true; 4012 4013 /* 4014 * If writers are committing on a page other than reader page 4015 * and head page, there should always be content to read. 4016 */ 4017 if (commit != head) 4018 return false; 4019 4020 /* 4021 * Writers are committing on the head page, we just need 4022 * to care about there're committed data, and the reader will 4023 * swap reader page with head page when it is to read data. 4024 */ 4025 return rb_page_commit(commit) == 0; 4026 } 4027 4028 /** 4029 * ring_buffer_record_disable - stop all writes into the buffer 4030 * @buffer: The ring buffer to stop writes to. 4031 * 4032 * This prevents all writes to the buffer. Any attempt to write 4033 * to the buffer after this will fail and return NULL. 4034 * 4035 * The caller should call synchronize_rcu() after this. 4036 */ 4037 void ring_buffer_record_disable(struct trace_buffer *buffer) 4038 { 4039 atomic_inc(&buffer->record_disabled); 4040 } 4041 EXPORT_SYMBOL_GPL(ring_buffer_record_disable); 4042 4043 /** 4044 * ring_buffer_record_enable - enable writes to the buffer 4045 * @buffer: The ring buffer to enable writes 4046 * 4047 * Note, multiple disables will need the same number of enables 4048 * to truly enable the writing (much like preempt_disable). 4049 */ 4050 void ring_buffer_record_enable(struct trace_buffer *buffer) 4051 { 4052 atomic_dec(&buffer->record_disabled); 4053 } 4054 EXPORT_SYMBOL_GPL(ring_buffer_record_enable); 4055 4056 /** 4057 * ring_buffer_record_off - stop all writes into the buffer 4058 * @buffer: The ring buffer to stop writes to. 4059 * 4060 * This prevents all writes to the buffer. Any attempt to write 4061 * to the buffer after this will fail and return NULL. 4062 * 4063 * This is different than ring_buffer_record_disable() as 4064 * it works like an on/off switch, where as the disable() version 4065 * must be paired with a enable(). 4066 */ 4067 void ring_buffer_record_off(struct trace_buffer *buffer) 4068 { 4069 unsigned int rd; 4070 unsigned int new_rd; 4071 4072 do { 4073 rd = atomic_read(&buffer->record_disabled); 4074 new_rd = rd | RB_BUFFER_OFF; 4075 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); 4076 } 4077 EXPORT_SYMBOL_GPL(ring_buffer_record_off); 4078 4079 /** 4080 * ring_buffer_record_on - restart writes into the buffer 4081 * @buffer: The ring buffer to start writes to. 4082 * 4083 * This enables all writes to the buffer that was disabled by 4084 * ring_buffer_record_off(). 4085 * 4086 * This is different than ring_buffer_record_enable() as 4087 * it works like an on/off switch, where as the enable() version 4088 * must be paired with a disable(). 4089 */ 4090 void ring_buffer_record_on(struct trace_buffer *buffer) 4091 { 4092 unsigned int rd; 4093 unsigned int new_rd; 4094 4095 do { 4096 rd = atomic_read(&buffer->record_disabled); 4097 new_rd = rd & ~RB_BUFFER_OFF; 4098 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); 4099 } 4100 EXPORT_SYMBOL_GPL(ring_buffer_record_on); 4101 4102 /** 4103 * ring_buffer_record_is_on - return true if the ring buffer can write 4104 * @buffer: The ring buffer to see if write is enabled 4105 * 4106 * Returns true if the ring buffer is in a state that it accepts writes. 4107 */ 4108 bool ring_buffer_record_is_on(struct trace_buffer *buffer) 4109 { 4110 return !atomic_read(&buffer->record_disabled); 4111 } 4112 4113 /** 4114 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable 4115 * @buffer: The ring buffer to see if write is set enabled 4116 * 4117 * Returns true if the ring buffer is set writable by ring_buffer_record_on(). 4118 * Note that this does NOT mean it is in a writable state. 4119 * 4120 * It may return true when the ring buffer has been disabled by 4121 * ring_buffer_record_disable(), as that is a temporary disabling of 4122 * the ring buffer. 4123 */ 4124 bool ring_buffer_record_is_set_on(struct trace_buffer *buffer) 4125 { 4126 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); 4127 } 4128 4129 /** 4130 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 4131 * @buffer: The ring buffer to stop writes to. 4132 * @cpu: The CPU buffer to stop 4133 * 4134 * This prevents all writes to the buffer. Any attempt to write 4135 * to the buffer after this will fail and return NULL. 4136 * 4137 * The caller should call synchronize_rcu() after this. 4138 */ 4139 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu) 4140 { 4141 struct ring_buffer_per_cpu *cpu_buffer; 4142 4143 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4144 return; 4145 4146 cpu_buffer = buffer->buffers[cpu]; 4147 atomic_inc(&cpu_buffer->record_disabled); 4148 } 4149 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); 4150 4151 /** 4152 * ring_buffer_record_enable_cpu - enable writes to the buffer 4153 * @buffer: The ring buffer to enable writes 4154 * @cpu: The CPU to enable. 4155 * 4156 * Note, multiple disables will need the same number of enables 4157 * to truly enable the writing (much like preempt_disable). 4158 */ 4159 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu) 4160 { 4161 struct ring_buffer_per_cpu *cpu_buffer; 4162 4163 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4164 return; 4165 4166 cpu_buffer = buffer->buffers[cpu]; 4167 atomic_dec(&cpu_buffer->record_disabled); 4168 } 4169 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); 4170 4171 /* 4172 * The total entries in the ring buffer is the running counter 4173 * of entries entered into the ring buffer, minus the sum of 4174 * the entries read from the ring buffer and the number of 4175 * entries that were overwritten. 4176 */ 4177 static inline unsigned long 4178 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) 4179 { 4180 return local_read(&cpu_buffer->entries) - 4181 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); 4182 } 4183 4184 /** 4185 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer 4186 * @buffer: The ring buffer 4187 * @cpu: The per CPU buffer to read from. 4188 */ 4189 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu) 4190 { 4191 unsigned long flags; 4192 struct ring_buffer_per_cpu *cpu_buffer; 4193 struct buffer_page *bpage; 4194 u64 ret = 0; 4195 4196 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4197 return 0; 4198 4199 cpu_buffer = buffer->buffers[cpu]; 4200 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4201 /* 4202 * if the tail is on reader_page, oldest time stamp is on the reader 4203 * page 4204 */ 4205 if (cpu_buffer->tail_page == cpu_buffer->reader_page) 4206 bpage = cpu_buffer->reader_page; 4207 else 4208 bpage = rb_set_head_page(cpu_buffer); 4209 if (bpage) 4210 ret = bpage->page->time_stamp; 4211 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4212 4213 return ret; 4214 } 4215 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts); 4216 4217 /** 4218 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer 4219 * @buffer: The ring buffer 4220 * @cpu: The per CPU buffer to read from. 4221 */ 4222 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu) 4223 { 4224 struct ring_buffer_per_cpu *cpu_buffer; 4225 unsigned long ret; 4226 4227 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4228 return 0; 4229 4230 cpu_buffer = buffer->buffers[cpu]; 4231 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; 4232 4233 return ret; 4234 } 4235 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu); 4236 4237 /** 4238 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer 4239 * @buffer: The ring buffer 4240 * @cpu: The per CPU buffer to get the entries from. 4241 */ 4242 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu) 4243 { 4244 struct ring_buffer_per_cpu *cpu_buffer; 4245 4246 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4247 return 0; 4248 4249 cpu_buffer = buffer->buffers[cpu]; 4250 4251 return rb_num_of_entries(cpu_buffer); 4252 } 4253 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); 4254 4255 /** 4256 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring 4257 * buffer wrapping around (only if RB_FL_OVERWRITE is on). 4258 * @buffer: The ring buffer 4259 * @cpu: The per CPU buffer to get the number of overruns from 4260 */ 4261 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu) 4262 { 4263 struct ring_buffer_per_cpu *cpu_buffer; 4264 unsigned long ret; 4265 4266 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4267 return 0; 4268 4269 cpu_buffer = buffer->buffers[cpu]; 4270 ret = local_read(&cpu_buffer->overrun); 4271 4272 return ret; 4273 } 4274 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); 4275 4276 /** 4277 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by 4278 * commits failing due to the buffer wrapping around while there are uncommitted 4279 * events, such as during an interrupt storm. 4280 * @buffer: The ring buffer 4281 * @cpu: The per CPU buffer to get the number of overruns from 4282 */ 4283 unsigned long 4284 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu) 4285 { 4286 struct ring_buffer_per_cpu *cpu_buffer; 4287 unsigned long ret; 4288 4289 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4290 return 0; 4291 4292 cpu_buffer = buffer->buffers[cpu]; 4293 ret = local_read(&cpu_buffer->commit_overrun); 4294 4295 return ret; 4296 } 4297 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); 4298 4299 /** 4300 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by 4301 * the ring buffer filling up (only if RB_FL_OVERWRITE is off). 4302 * @buffer: The ring buffer 4303 * @cpu: The per CPU buffer to get the number of overruns from 4304 */ 4305 unsigned long 4306 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu) 4307 { 4308 struct ring_buffer_per_cpu *cpu_buffer; 4309 unsigned long ret; 4310 4311 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4312 return 0; 4313 4314 cpu_buffer = buffer->buffers[cpu]; 4315 ret = local_read(&cpu_buffer->dropped_events); 4316 4317 return ret; 4318 } 4319 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu); 4320 4321 /** 4322 * ring_buffer_read_events_cpu - get the number of events successfully read 4323 * @buffer: The ring buffer 4324 * @cpu: The per CPU buffer to get the number of events read 4325 */ 4326 unsigned long 4327 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu) 4328 { 4329 struct ring_buffer_per_cpu *cpu_buffer; 4330 4331 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4332 return 0; 4333 4334 cpu_buffer = buffer->buffers[cpu]; 4335 return cpu_buffer->read; 4336 } 4337 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu); 4338 4339 /** 4340 * ring_buffer_entries - get the number of entries in a buffer 4341 * @buffer: The ring buffer 4342 * 4343 * Returns the total number of entries in the ring buffer 4344 * (all CPU entries) 4345 */ 4346 unsigned long ring_buffer_entries(struct trace_buffer *buffer) 4347 { 4348 struct ring_buffer_per_cpu *cpu_buffer; 4349 unsigned long entries = 0; 4350 int cpu; 4351 4352 /* if you care about this being correct, lock the buffer */ 4353 for_each_buffer_cpu(buffer, cpu) { 4354 cpu_buffer = buffer->buffers[cpu]; 4355 entries += rb_num_of_entries(cpu_buffer); 4356 } 4357 4358 return entries; 4359 } 4360 EXPORT_SYMBOL_GPL(ring_buffer_entries); 4361 4362 /** 4363 * ring_buffer_overruns - get the number of overruns in buffer 4364 * @buffer: The ring buffer 4365 * 4366 * Returns the total number of overruns in the ring buffer 4367 * (all CPU entries) 4368 */ 4369 unsigned long ring_buffer_overruns(struct trace_buffer *buffer) 4370 { 4371 struct ring_buffer_per_cpu *cpu_buffer; 4372 unsigned long overruns = 0; 4373 int cpu; 4374 4375 /* if you care about this being correct, lock the buffer */ 4376 for_each_buffer_cpu(buffer, cpu) { 4377 cpu_buffer = buffer->buffers[cpu]; 4378 overruns += local_read(&cpu_buffer->overrun); 4379 } 4380 4381 return overruns; 4382 } 4383 EXPORT_SYMBOL_GPL(ring_buffer_overruns); 4384 4385 static void rb_iter_reset(struct ring_buffer_iter *iter) 4386 { 4387 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 4388 4389 /* Iterator usage is expected to have record disabled */ 4390 iter->head_page = cpu_buffer->reader_page; 4391 iter->head = cpu_buffer->reader_page->read; 4392 iter->next_event = iter->head; 4393 4394 iter->cache_reader_page = iter->head_page; 4395 iter->cache_read = cpu_buffer->read; 4396 4397 if (iter->head) { 4398 iter->read_stamp = cpu_buffer->read_stamp; 4399 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp; 4400 } else { 4401 iter->read_stamp = iter->head_page->page->time_stamp; 4402 iter->page_stamp = iter->read_stamp; 4403 } 4404 } 4405 4406 /** 4407 * ring_buffer_iter_reset - reset an iterator 4408 * @iter: The iterator to reset 4409 * 4410 * Resets the iterator, so that it will start from the beginning 4411 * again. 4412 */ 4413 void ring_buffer_iter_reset(struct ring_buffer_iter *iter) 4414 { 4415 struct ring_buffer_per_cpu *cpu_buffer; 4416 unsigned long flags; 4417 4418 if (!iter) 4419 return; 4420 4421 cpu_buffer = iter->cpu_buffer; 4422 4423 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4424 rb_iter_reset(iter); 4425 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4426 } 4427 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); 4428 4429 /** 4430 * ring_buffer_iter_empty - check if an iterator has no more to read 4431 * @iter: The iterator to check 4432 */ 4433 int ring_buffer_iter_empty(struct ring_buffer_iter *iter) 4434 { 4435 struct ring_buffer_per_cpu *cpu_buffer; 4436 struct buffer_page *reader; 4437 struct buffer_page *head_page; 4438 struct buffer_page *commit_page; 4439 struct buffer_page *curr_commit_page; 4440 unsigned commit; 4441 u64 curr_commit_ts; 4442 u64 commit_ts; 4443 4444 cpu_buffer = iter->cpu_buffer; 4445 reader = cpu_buffer->reader_page; 4446 head_page = cpu_buffer->head_page; 4447 commit_page = cpu_buffer->commit_page; 4448 commit_ts = commit_page->page->time_stamp; 4449 4450 /* 4451 * When the writer goes across pages, it issues a cmpxchg which 4452 * is a mb(), which will synchronize with the rmb here. 4453 * (see rb_tail_page_update()) 4454 */ 4455 smp_rmb(); 4456 commit = rb_page_commit(commit_page); 4457 /* We want to make sure that the commit page doesn't change */ 4458 smp_rmb(); 4459 4460 /* Make sure commit page didn't change */ 4461 curr_commit_page = READ_ONCE(cpu_buffer->commit_page); 4462 curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp); 4463 4464 /* If the commit page changed, then there's more data */ 4465 if (curr_commit_page != commit_page || 4466 curr_commit_ts != commit_ts) 4467 return 0; 4468 4469 /* Still racy, as it may return a false positive, but that's OK */ 4470 return ((iter->head_page == commit_page && iter->head >= commit) || 4471 (iter->head_page == reader && commit_page == head_page && 4472 head_page->read == commit && 4473 iter->head == rb_page_commit(cpu_buffer->reader_page))); 4474 } 4475 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); 4476 4477 static void 4478 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, 4479 struct ring_buffer_event *event) 4480 { 4481 u64 delta; 4482 4483 switch (event->type_len) { 4484 case RINGBUF_TYPE_PADDING: 4485 return; 4486 4487 case RINGBUF_TYPE_TIME_EXTEND: 4488 delta = rb_event_time_stamp(event); 4489 cpu_buffer->read_stamp += delta; 4490 return; 4491 4492 case RINGBUF_TYPE_TIME_STAMP: 4493 delta = rb_event_time_stamp(event); 4494 delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp); 4495 cpu_buffer->read_stamp = delta; 4496 return; 4497 4498 case RINGBUF_TYPE_DATA: 4499 cpu_buffer->read_stamp += event->time_delta; 4500 return; 4501 4502 default: 4503 RB_WARN_ON(cpu_buffer, 1); 4504 } 4505 return; 4506 } 4507 4508 static void 4509 rb_update_iter_read_stamp(struct ring_buffer_iter *iter, 4510 struct ring_buffer_event *event) 4511 { 4512 u64 delta; 4513 4514 switch (event->type_len) { 4515 case RINGBUF_TYPE_PADDING: 4516 return; 4517 4518 case RINGBUF_TYPE_TIME_EXTEND: 4519 delta = rb_event_time_stamp(event); 4520 iter->read_stamp += delta; 4521 return; 4522 4523 case RINGBUF_TYPE_TIME_STAMP: 4524 delta = rb_event_time_stamp(event); 4525 delta = rb_fix_abs_ts(delta, iter->read_stamp); 4526 iter->read_stamp = delta; 4527 return; 4528 4529 case RINGBUF_TYPE_DATA: 4530 iter->read_stamp += event->time_delta; 4531 return; 4532 4533 default: 4534 RB_WARN_ON(iter->cpu_buffer, 1); 4535 } 4536 return; 4537 } 4538 4539 static struct buffer_page * 4540 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 4541 { 4542 struct buffer_page *reader = NULL; 4543 unsigned long overwrite; 4544 unsigned long flags; 4545 int nr_loops = 0; 4546 int ret; 4547 4548 local_irq_save(flags); 4549 arch_spin_lock(&cpu_buffer->lock); 4550 4551 again: 4552 /* 4553 * This should normally only loop twice. But because the 4554 * start of the reader inserts an empty page, it causes 4555 * a case where we will loop three times. There should be no 4556 * reason to loop four times (that I know of). 4557 */ 4558 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { 4559 reader = NULL; 4560 goto out; 4561 } 4562 4563 reader = cpu_buffer->reader_page; 4564 4565 /* If there's more to read, return this page */ 4566 if (cpu_buffer->reader_page->read < rb_page_size(reader)) 4567 goto out; 4568 4569 /* Never should we have an index greater than the size */ 4570 if (RB_WARN_ON(cpu_buffer, 4571 cpu_buffer->reader_page->read > rb_page_size(reader))) 4572 goto out; 4573 4574 /* check if we caught up to the tail */ 4575 reader = NULL; 4576 if (cpu_buffer->commit_page == cpu_buffer->reader_page) 4577 goto out; 4578 4579 /* Don't bother swapping if the ring buffer is empty */ 4580 if (rb_num_of_entries(cpu_buffer) == 0) 4581 goto out; 4582 4583 /* 4584 * Reset the reader page to size zero. 4585 */ 4586 local_set(&cpu_buffer->reader_page->write, 0); 4587 local_set(&cpu_buffer->reader_page->entries, 0); 4588 local_set(&cpu_buffer->reader_page->page->commit, 0); 4589 cpu_buffer->reader_page->real_end = 0; 4590 4591 spin: 4592 /* 4593 * Splice the empty reader page into the list around the head. 4594 */ 4595 reader = rb_set_head_page(cpu_buffer); 4596 if (!reader) 4597 goto out; 4598 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); 4599 cpu_buffer->reader_page->list.prev = reader->list.prev; 4600 4601 /* 4602 * cpu_buffer->pages just needs to point to the buffer, it 4603 * has no specific buffer page to point to. Lets move it out 4604 * of our way so we don't accidentally swap it. 4605 */ 4606 cpu_buffer->pages = reader->list.prev; 4607 4608 /* The reader page will be pointing to the new head */ 4609 rb_set_list_to_head(&cpu_buffer->reader_page->list); 4610 4611 /* 4612 * We want to make sure we read the overruns after we set up our 4613 * pointers to the next object. The writer side does a 4614 * cmpxchg to cross pages which acts as the mb on the writer 4615 * side. Note, the reader will constantly fail the swap 4616 * while the writer is updating the pointers, so this 4617 * guarantees that the overwrite recorded here is the one we 4618 * want to compare with the last_overrun. 4619 */ 4620 smp_mb(); 4621 overwrite = local_read(&(cpu_buffer->overrun)); 4622 4623 /* 4624 * Here's the tricky part. 4625 * 4626 * We need to move the pointer past the header page. 4627 * But we can only do that if a writer is not currently 4628 * moving it. The page before the header page has the 4629 * flag bit '1' set if it is pointing to the page we want. 4630 * but if the writer is in the process of moving it 4631 * than it will be '2' or already moved '0'. 4632 */ 4633 4634 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); 4635 4636 /* 4637 * If we did not convert it, then we must try again. 4638 */ 4639 if (!ret) 4640 goto spin; 4641 4642 /* 4643 * Yay! We succeeded in replacing the page. 4644 * 4645 * Now make the new head point back to the reader page. 4646 */ 4647 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; 4648 rb_inc_page(&cpu_buffer->head_page); 4649 4650 local_inc(&cpu_buffer->pages_read); 4651 4652 /* Finally update the reader page to the new head */ 4653 cpu_buffer->reader_page = reader; 4654 cpu_buffer->reader_page->read = 0; 4655 4656 if (overwrite != cpu_buffer->last_overrun) { 4657 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; 4658 cpu_buffer->last_overrun = overwrite; 4659 } 4660 4661 goto again; 4662 4663 out: 4664 /* Update the read_stamp on the first event */ 4665 if (reader && reader->read == 0) 4666 cpu_buffer->read_stamp = reader->page->time_stamp; 4667 4668 arch_spin_unlock(&cpu_buffer->lock); 4669 local_irq_restore(flags); 4670 4671 /* 4672 * The writer has preempt disable, wait for it. But not forever 4673 * Although, 1 second is pretty much "forever" 4674 */ 4675 #define USECS_WAIT 1000000 4676 for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) { 4677 /* If the write is past the end of page, a writer is still updating it */ 4678 if (likely(!reader || rb_page_write(reader) <= BUF_PAGE_SIZE)) 4679 break; 4680 4681 udelay(1); 4682 4683 /* Get the latest version of the reader write value */ 4684 smp_rmb(); 4685 } 4686 4687 /* The writer is not moving forward? Something is wrong */ 4688 if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT)) 4689 reader = NULL; 4690 4691 /* 4692 * Make sure we see any padding after the write update 4693 * (see rb_reset_tail()). 4694 * 4695 * In addition, a writer may be writing on the reader page 4696 * if the page has not been fully filled, so the read barrier 4697 * is also needed to make sure we see the content of what is 4698 * committed by the writer (see rb_set_commit_to_write()). 4699 */ 4700 smp_rmb(); 4701 4702 4703 return reader; 4704 } 4705 4706 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) 4707 { 4708 struct ring_buffer_event *event; 4709 struct buffer_page *reader; 4710 unsigned length; 4711 4712 reader = rb_get_reader_page(cpu_buffer); 4713 4714 /* This function should not be called when buffer is empty */ 4715 if (RB_WARN_ON(cpu_buffer, !reader)) 4716 return; 4717 4718 event = rb_reader_event(cpu_buffer); 4719 4720 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 4721 cpu_buffer->read++; 4722 4723 rb_update_read_stamp(cpu_buffer, event); 4724 4725 length = rb_event_length(event); 4726 cpu_buffer->reader_page->read += length; 4727 } 4728 4729 static void rb_advance_iter(struct ring_buffer_iter *iter) 4730 { 4731 struct ring_buffer_per_cpu *cpu_buffer; 4732 4733 cpu_buffer = iter->cpu_buffer; 4734 4735 /* If head == next_event then we need to jump to the next event */ 4736 if (iter->head == iter->next_event) { 4737 /* If the event gets overwritten again, there's nothing to do */ 4738 if (rb_iter_head_event(iter) == NULL) 4739 return; 4740 } 4741 4742 iter->head = iter->next_event; 4743 4744 /* 4745 * Check if we are at the end of the buffer. 4746 */ 4747 if (iter->next_event >= rb_page_size(iter->head_page)) { 4748 /* discarded commits can make the page empty */ 4749 if (iter->head_page == cpu_buffer->commit_page) 4750 return; 4751 rb_inc_iter(iter); 4752 return; 4753 } 4754 4755 rb_update_iter_read_stamp(iter, iter->event); 4756 } 4757 4758 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) 4759 { 4760 return cpu_buffer->lost_events; 4761 } 4762 4763 static struct ring_buffer_event * 4764 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, 4765 unsigned long *lost_events) 4766 { 4767 struct ring_buffer_event *event; 4768 struct buffer_page *reader; 4769 int nr_loops = 0; 4770 4771 if (ts) 4772 *ts = 0; 4773 again: 4774 /* 4775 * We repeat when a time extend is encountered. 4776 * Since the time extend is always attached to a data event, 4777 * we should never loop more than once. 4778 * (We never hit the following condition more than twice). 4779 */ 4780 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) 4781 return NULL; 4782 4783 reader = rb_get_reader_page(cpu_buffer); 4784 if (!reader) 4785 return NULL; 4786 4787 event = rb_reader_event(cpu_buffer); 4788 4789 switch (event->type_len) { 4790 case RINGBUF_TYPE_PADDING: 4791 if (rb_null_event(event)) 4792 RB_WARN_ON(cpu_buffer, 1); 4793 /* 4794 * Because the writer could be discarding every 4795 * event it creates (which would probably be bad) 4796 * if we were to go back to "again" then we may never 4797 * catch up, and will trigger the warn on, or lock 4798 * the box. Return the padding, and we will release 4799 * the current locks, and try again. 4800 */ 4801 return event; 4802 4803 case RINGBUF_TYPE_TIME_EXTEND: 4804 /* Internal data, OK to advance */ 4805 rb_advance_reader(cpu_buffer); 4806 goto again; 4807 4808 case RINGBUF_TYPE_TIME_STAMP: 4809 if (ts) { 4810 *ts = rb_event_time_stamp(event); 4811 *ts = rb_fix_abs_ts(*ts, reader->page->time_stamp); 4812 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 4813 cpu_buffer->cpu, ts); 4814 } 4815 /* Internal data, OK to advance */ 4816 rb_advance_reader(cpu_buffer); 4817 goto again; 4818 4819 case RINGBUF_TYPE_DATA: 4820 if (ts && !(*ts)) { 4821 *ts = cpu_buffer->read_stamp + event->time_delta; 4822 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 4823 cpu_buffer->cpu, ts); 4824 } 4825 if (lost_events) 4826 *lost_events = rb_lost_events(cpu_buffer); 4827 return event; 4828 4829 default: 4830 RB_WARN_ON(cpu_buffer, 1); 4831 } 4832 4833 return NULL; 4834 } 4835 EXPORT_SYMBOL_GPL(ring_buffer_peek); 4836 4837 static struct ring_buffer_event * 4838 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 4839 { 4840 struct trace_buffer *buffer; 4841 struct ring_buffer_per_cpu *cpu_buffer; 4842 struct ring_buffer_event *event; 4843 int nr_loops = 0; 4844 4845 if (ts) 4846 *ts = 0; 4847 4848 cpu_buffer = iter->cpu_buffer; 4849 buffer = cpu_buffer->buffer; 4850 4851 /* 4852 * Check if someone performed a consuming read to 4853 * the buffer. A consuming read invalidates the iterator 4854 * and we need to reset the iterator in this case. 4855 */ 4856 if (unlikely(iter->cache_read != cpu_buffer->read || 4857 iter->cache_reader_page != cpu_buffer->reader_page)) 4858 rb_iter_reset(iter); 4859 4860 again: 4861 if (ring_buffer_iter_empty(iter)) 4862 return NULL; 4863 4864 /* 4865 * As the writer can mess with what the iterator is trying 4866 * to read, just give up if we fail to get an event after 4867 * three tries. The iterator is not as reliable when reading 4868 * the ring buffer with an active write as the consumer is. 4869 * Do not warn if the three failures is reached. 4870 */ 4871 if (++nr_loops > 3) 4872 return NULL; 4873 4874 if (rb_per_cpu_empty(cpu_buffer)) 4875 return NULL; 4876 4877 if (iter->head >= rb_page_size(iter->head_page)) { 4878 rb_inc_iter(iter); 4879 goto again; 4880 } 4881 4882 event = rb_iter_head_event(iter); 4883 if (!event) 4884 goto again; 4885 4886 switch (event->type_len) { 4887 case RINGBUF_TYPE_PADDING: 4888 if (rb_null_event(event)) { 4889 rb_inc_iter(iter); 4890 goto again; 4891 } 4892 rb_advance_iter(iter); 4893 return event; 4894 4895 case RINGBUF_TYPE_TIME_EXTEND: 4896 /* Internal data, OK to advance */ 4897 rb_advance_iter(iter); 4898 goto again; 4899 4900 case RINGBUF_TYPE_TIME_STAMP: 4901 if (ts) { 4902 *ts = rb_event_time_stamp(event); 4903 *ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp); 4904 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 4905 cpu_buffer->cpu, ts); 4906 } 4907 /* Internal data, OK to advance */ 4908 rb_advance_iter(iter); 4909 goto again; 4910 4911 case RINGBUF_TYPE_DATA: 4912 if (ts && !(*ts)) { 4913 *ts = iter->read_stamp + event->time_delta; 4914 ring_buffer_normalize_time_stamp(buffer, 4915 cpu_buffer->cpu, ts); 4916 } 4917 return event; 4918 4919 default: 4920 RB_WARN_ON(cpu_buffer, 1); 4921 } 4922 4923 return NULL; 4924 } 4925 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); 4926 4927 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) 4928 { 4929 if (likely(!in_nmi())) { 4930 raw_spin_lock(&cpu_buffer->reader_lock); 4931 return true; 4932 } 4933 4934 /* 4935 * If an NMI die dumps out the content of the ring buffer 4936 * trylock must be used to prevent a deadlock if the NMI 4937 * preempted a task that holds the ring buffer locks. If 4938 * we get the lock then all is fine, if not, then continue 4939 * to do the read, but this can corrupt the ring buffer, 4940 * so it must be permanently disabled from future writes. 4941 * Reading from NMI is a oneshot deal. 4942 */ 4943 if (raw_spin_trylock(&cpu_buffer->reader_lock)) 4944 return true; 4945 4946 /* Continue without locking, but disable the ring buffer */ 4947 atomic_inc(&cpu_buffer->record_disabled); 4948 return false; 4949 } 4950 4951 static inline void 4952 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) 4953 { 4954 if (likely(locked)) 4955 raw_spin_unlock(&cpu_buffer->reader_lock); 4956 return; 4957 } 4958 4959 /** 4960 * ring_buffer_peek - peek at the next event to be read 4961 * @buffer: The ring buffer to read 4962 * @cpu: The cpu to peak at 4963 * @ts: The timestamp counter of this event. 4964 * @lost_events: a variable to store if events were lost (may be NULL) 4965 * 4966 * This will return the event that will be read next, but does 4967 * not consume the data. 4968 */ 4969 struct ring_buffer_event * 4970 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, 4971 unsigned long *lost_events) 4972 { 4973 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 4974 struct ring_buffer_event *event; 4975 unsigned long flags; 4976 bool dolock; 4977 4978 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4979 return NULL; 4980 4981 again: 4982 local_irq_save(flags); 4983 dolock = rb_reader_lock(cpu_buffer); 4984 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 4985 if (event && event->type_len == RINGBUF_TYPE_PADDING) 4986 rb_advance_reader(cpu_buffer); 4987 rb_reader_unlock(cpu_buffer, dolock); 4988 local_irq_restore(flags); 4989 4990 if (event && event->type_len == RINGBUF_TYPE_PADDING) 4991 goto again; 4992 4993 return event; 4994 } 4995 4996 /** ring_buffer_iter_dropped - report if there are dropped events 4997 * @iter: The ring buffer iterator 4998 * 4999 * Returns true if there was dropped events since the last peek. 5000 */ 5001 bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter) 5002 { 5003 bool ret = iter->missed_events != 0; 5004 5005 iter->missed_events = 0; 5006 return ret; 5007 } 5008 EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped); 5009 5010 /** 5011 * ring_buffer_iter_peek - peek at the next event to be read 5012 * @iter: The ring buffer iterator 5013 * @ts: The timestamp counter of this event. 5014 * 5015 * This will return the event that will be read next, but does 5016 * not increment the iterator. 5017 */ 5018 struct ring_buffer_event * 5019 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 5020 { 5021 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5022 struct ring_buffer_event *event; 5023 unsigned long flags; 5024 5025 again: 5026 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5027 event = rb_iter_peek(iter, ts); 5028 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5029 5030 if (event && event->type_len == RINGBUF_TYPE_PADDING) 5031 goto again; 5032 5033 return event; 5034 } 5035 5036 /** 5037 * ring_buffer_consume - return an event and consume it 5038 * @buffer: The ring buffer to get the next event from 5039 * @cpu: the cpu to read the buffer from 5040 * @ts: a variable to store the timestamp (may be NULL) 5041 * @lost_events: a variable to store if events were lost (may be NULL) 5042 * 5043 * Returns the next event in the ring buffer, and that event is consumed. 5044 * Meaning, that sequential reads will keep returning a different event, 5045 * and eventually empty the ring buffer if the producer is slower. 5046 */ 5047 struct ring_buffer_event * 5048 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, 5049 unsigned long *lost_events) 5050 { 5051 struct ring_buffer_per_cpu *cpu_buffer; 5052 struct ring_buffer_event *event = NULL; 5053 unsigned long flags; 5054 bool dolock; 5055 5056 again: 5057 /* might be called in atomic */ 5058 preempt_disable(); 5059 5060 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5061 goto out; 5062 5063 cpu_buffer = buffer->buffers[cpu]; 5064 local_irq_save(flags); 5065 dolock = rb_reader_lock(cpu_buffer); 5066 5067 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 5068 if (event) { 5069 cpu_buffer->lost_events = 0; 5070 rb_advance_reader(cpu_buffer); 5071 } 5072 5073 rb_reader_unlock(cpu_buffer, dolock); 5074 local_irq_restore(flags); 5075 5076 out: 5077 preempt_enable(); 5078 5079 if (event && event->type_len == RINGBUF_TYPE_PADDING) 5080 goto again; 5081 5082 return event; 5083 } 5084 EXPORT_SYMBOL_GPL(ring_buffer_consume); 5085 5086 /** 5087 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer 5088 * @buffer: The ring buffer to read from 5089 * @cpu: The cpu buffer to iterate over 5090 * @flags: gfp flags to use for memory allocation 5091 * 5092 * This performs the initial preparations necessary to iterate 5093 * through the buffer. Memory is allocated, buffer recording 5094 * is disabled, and the iterator pointer is returned to the caller. 5095 * 5096 * Disabling buffer recording prevents the reading from being 5097 * corrupted. This is not a consuming read, so a producer is not 5098 * expected. 5099 * 5100 * After a sequence of ring_buffer_read_prepare calls, the user is 5101 * expected to make at least one call to ring_buffer_read_prepare_sync. 5102 * Afterwards, ring_buffer_read_start is invoked to get things going 5103 * for real. 5104 * 5105 * This overall must be paired with ring_buffer_read_finish. 5106 */ 5107 struct ring_buffer_iter * 5108 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags) 5109 { 5110 struct ring_buffer_per_cpu *cpu_buffer; 5111 struct ring_buffer_iter *iter; 5112 5113 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5114 return NULL; 5115 5116 iter = kzalloc(sizeof(*iter), flags); 5117 if (!iter) 5118 return NULL; 5119 5120 iter->event = kmalloc(BUF_MAX_DATA_SIZE, flags); 5121 if (!iter->event) { 5122 kfree(iter); 5123 return NULL; 5124 } 5125 5126 cpu_buffer = buffer->buffers[cpu]; 5127 5128 iter->cpu_buffer = cpu_buffer; 5129 5130 atomic_inc(&cpu_buffer->resize_disabled); 5131 5132 return iter; 5133 } 5134 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare); 5135 5136 /** 5137 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls 5138 * 5139 * All previously invoked ring_buffer_read_prepare calls to prepare 5140 * iterators will be synchronized. Afterwards, read_buffer_read_start 5141 * calls on those iterators are allowed. 5142 */ 5143 void 5144 ring_buffer_read_prepare_sync(void) 5145 { 5146 synchronize_rcu(); 5147 } 5148 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); 5149 5150 /** 5151 * ring_buffer_read_start - start a non consuming read of the buffer 5152 * @iter: The iterator returned by ring_buffer_read_prepare 5153 * 5154 * This finalizes the startup of an iteration through the buffer. 5155 * The iterator comes from a call to ring_buffer_read_prepare and 5156 * an intervening ring_buffer_read_prepare_sync must have been 5157 * performed. 5158 * 5159 * Must be paired with ring_buffer_read_finish. 5160 */ 5161 void 5162 ring_buffer_read_start(struct ring_buffer_iter *iter) 5163 { 5164 struct ring_buffer_per_cpu *cpu_buffer; 5165 unsigned long flags; 5166 5167 if (!iter) 5168 return; 5169 5170 cpu_buffer = iter->cpu_buffer; 5171 5172 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5173 arch_spin_lock(&cpu_buffer->lock); 5174 rb_iter_reset(iter); 5175 arch_spin_unlock(&cpu_buffer->lock); 5176 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5177 } 5178 EXPORT_SYMBOL_GPL(ring_buffer_read_start); 5179 5180 /** 5181 * ring_buffer_read_finish - finish reading the iterator of the buffer 5182 * @iter: The iterator retrieved by ring_buffer_start 5183 * 5184 * This re-enables the recording to the buffer, and frees the 5185 * iterator. 5186 */ 5187 void 5188 ring_buffer_read_finish(struct ring_buffer_iter *iter) 5189 { 5190 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5191 unsigned long flags; 5192 5193 /* 5194 * Ring buffer is disabled from recording, here's a good place 5195 * to check the integrity of the ring buffer. 5196 * Must prevent readers from trying to read, as the check 5197 * clears the HEAD page and readers require it. 5198 */ 5199 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5200 rb_check_pages(cpu_buffer); 5201 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5202 5203 atomic_dec(&cpu_buffer->resize_disabled); 5204 kfree(iter->event); 5205 kfree(iter); 5206 } 5207 EXPORT_SYMBOL_GPL(ring_buffer_read_finish); 5208 5209 /** 5210 * ring_buffer_iter_advance - advance the iterator to the next location 5211 * @iter: The ring buffer iterator 5212 * 5213 * Move the location of the iterator such that the next read will 5214 * be the next location of the iterator. 5215 */ 5216 void ring_buffer_iter_advance(struct ring_buffer_iter *iter) 5217 { 5218 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5219 unsigned long flags; 5220 5221 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5222 5223 rb_advance_iter(iter); 5224 5225 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5226 } 5227 EXPORT_SYMBOL_GPL(ring_buffer_iter_advance); 5228 5229 /** 5230 * ring_buffer_size - return the size of the ring buffer (in bytes) 5231 * @buffer: The ring buffer. 5232 * @cpu: The CPU to get ring buffer size from. 5233 */ 5234 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu) 5235 { 5236 /* 5237 * Earlier, this method returned 5238 * BUF_PAGE_SIZE * buffer->nr_pages 5239 * Since the nr_pages field is now removed, we have converted this to 5240 * return the per cpu buffer value. 5241 */ 5242 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5243 return 0; 5244 5245 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; 5246 } 5247 EXPORT_SYMBOL_GPL(ring_buffer_size); 5248 5249 static void 5250 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 5251 { 5252 rb_head_page_deactivate(cpu_buffer); 5253 5254 cpu_buffer->head_page 5255 = list_entry(cpu_buffer->pages, struct buffer_page, list); 5256 local_set(&cpu_buffer->head_page->write, 0); 5257 local_set(&cpu_buffer->head_page->entries, 0); 5258 local_set(&cpu_buffer->head_page->page->commit, 0); 5259 5260 cpu_buffer->head_page->read = 0; 5261 5262 cpu_buffer->tail_page = cpu_buffer->head_page; 5263 cpu_buffer->commit_page = cpu_buffer->head_page; 5264 5265 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 5266 INIT_LIST_HEAD(&cpu_buffer->new_pages); 5267 local_set(&cpu_buffer->reader_page->write, 0); 5268 local_set(&cpu_buffer->reader_page->entries, 0); 5269 local_set(&cpu_buffer->reader_page->page->commit, 0); 5270 cpu_buffer->reader_page->read = 0; 5271 5272 local_set(&cpu_buffer->entries_bytes, 0); 5273 local_set(&cpu_buffer->overrun, 0); 5274 local_set(&cpu_buffer->commit_overrun, 0); 5275 local_set(&cpu_buffer->dropped_events, 0); 5276 local_set(&cpu_buffer->entries, 0); 5277 local_set(&cpu_buffer->committing, 0); 5278 local_set(&cpu_buffer->commits, 0); 5279 local_set(&cpu_buffer->pages_touched, 0); 5280 local_set(&cpu_buffer->pages_lost, 0); 5281 local_set(&cpu_buffer->pages_read, 0); 5282 cpu_buffer->last_pages_touch = 0; 5283 cpu_buffer->shortest_full = 0; 5284 cpu_buffer->read = 0; 5285 cpu_buffer->read_bytes = 0; 5286 5287 rb_time_set(&cpu_buffer->write_stamp, 0); 5288 rb_time_set(&cpu_buffer->before_stamp, 0); 5289 5290 memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp)); 5291 5292 cpu_buffer->lost_events = 0; 5293 cpu_buffer->last_overrun = 0; 5294 5295 rb_head_page_activate(cpu_buffer); 5296 } 5297 5298 /* Must have disabled the cpu buffer then done a synchronize_rcu */ 5299 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 5300 { 5301 unsigned long flags; 5302 5303 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5304 5305 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 5306 goto out; 5307 5308 arch_spin_lock(&cpu_buffer->lock); 5309 5310 rb_reset_cpu(cpu_buffer); 5311 5312 arch_spin_unlock(&cpu_buffer->lock); 5313 5314 out: 5315 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5316 } 5317 5318 /** 5319 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer 5320 * @buffer: The ring buffer to reset a per cpu buffer of 5321 * @cpu: The CPU buffer to be reset 5322 */ 5323 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu) 5324 { 5325 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 5326 5327 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5328 return; 5329 5330 /* prevent another thread from changing buffer sizes */ 5331 mutex_lock(&buffer->mutex); 5332 5333 atomic_inc(&cpu_buffer->resize_disabled); 5334 atomic_inc(&cpu_buffer->record_disabled); 5335 5336 /* Make sure all commits have finished */ 5337 synchronize_rcu(); 5338 5339 reset_disabled_cpu_buffer(cpu_buffer); 5340 5341 atomic_dec(&cpu_buffer->record_disabled); 5342 atomic_dec(&cpu_buffer->resize_disabled); 5343 5344 mutex_unlock(&buffer->mutex); 5345 } 5346 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); 5347 5348 /** 5349 * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer 5350 * @buffer: The ring buffer to reset a per cpu buffer of 5351 * @cpu: The CPU buffer to be reset 5352 */ 5353 void ring_buffer_reset_online_cpus(struct trace_buffer *buffer) 5354 { 5355 struct ring_buffer_per_cpu *cpu_buffer; 5356 int cpu; 5357 5358 /* prevent another thread from changing buffer sizes */ 5359 mutex_lock(&buffer->mutex); 5360 5361 for_each_online_buffer_cpu(buffer, cpu) { 5362 cpu_buffer = buffer->buffers[cpu]; 5363 5364 atomic_inc(&cpu_buffer->resize_disabled); 5365 atomic_inc(&cpu_buffer->record_disabled); 5366 } 5367 5368 /* Make sure all commits have finished */ 5369 synchronize_rcu(); 5370 5371 for_each_online_buffer_cpu(buffer, cpu) { 5372 cpu_buffer = buffer->buffers[cpu]; 5373 5374 reset_disabled_cpu_buffer(cpu_buffer); 5375 5376 atomic_dec(&cpu_buffer->record_disabled); 5377 atomic_dec(&cpu_buffer->resize_disabled); 5378 } 5379 5380 mutex_unlock(&buffer->mutex); 5381 } 5382 5383 /** 5384 * ring_buffer_reset - reset a ring buffer 5385 * @buffer: The ring buffer to reset all cpu buffers 5386 */ 5387 void ring_buffer_reset(struct trace_buffer *buffer) 5388 { 5389 struct ring_buffer_per_cpu *cpu_buffer; 5390 int cpu; 5391 5392 /* prevent another thread from changing buffer sizes */ 5393 mutex_lock(&buffer->mutex); 5394 5395 for_each_buffer_cpu(buffer, cpu) { 5396 cpu_buffer = buffer->buffers[cpu]; 5397 5398 atomic_inc(&cpu_buffer->resize_disabled); 5399 atomic_inc(&cpu_buffer->record_disabled); 5400 } 5401 5402 /* Make sure all commits have finished */ 5403 synchronize_rcu(); 5404 5405 for_each_buffer_cpu(buffer, cpu) { 5406 cpu_buffer = buffer->buffers[cpu]; 5407 5408 reset_disabled_cpu_buffer(cpu_buffer); 5409 5410 atomic_dec(&cpu_buffer->record_disabled); 5411 atomic_dec(&cpu_buffer->resize_disabled); 5412 } 5413 5414 mutex_unlock(&buffer->mutex); 5415 } 5416 EXPORT_SYMBOL_GPL(ring_buffer_reset); 5417 5418 /** 5419 * ring_buffer_empty - is the ring buffer empty? 5420 * @buffer: The ring buffer to test 5421 */ 5422 bool ring_buffer_empty(struct trace_buffer *buffer) 5423 { 5424 struct ring_buffer_per_cpu *cpu_buffer; 5425 unsigned long flags; 5426 bool dolock; 5427 int cpu; 5428 int ret; 5429 5430 /* yes this is racy, but if you don't like the race, lock the buffer */ 5431 for_each_buffer_cpu(buffer, cpu) { 5432 cpu_buffer = buffer->buffers[cpu]; 5433 local_irq_save(flags); 5434 dolock = rb_reader_lock(cpu_buffer); 5435 ret = rb_per_cpu_empty(cpu_buffer); 5436 rb_reader_unlock(cpu_buffer, dolock); 5437 local_irq_restore(flags); 5438 5439 if (!ret) 5440 return false; 5441 } 5442 5443 return true; 5444 } 5445 EXPORT_SYMBOL_GPL(ring_buffer_empty); 5446 5447 /** 5448 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? 5449 * @buffer: The ring buffer 5450 * @cpu: The CPU buffer to test 5451 */ 5452 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu) 5453 { 5454 struct ring_buffer_per_cpu *cpu_buffer; 5455 unsigned long flags; 5456 bool dolock; 5457 int ret; 5458 5459 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5460 return true; 5461 5462 cpu_buffer = buffer->buffers[cpu]; 5463 local_irq_save(flags); 5464 dolock = rb_reader_lock(cpu_buffer); 5465 ret = rb_per_cpu_empty(cpu_buffer); 5466 rb_reader_unlock(cpu_buffer, dolock); 5467 local_irq_restore(flags); 5468 5469 return ret; 5470 } 5471 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); 5472 5473 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 5474 /** 5475 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers 5476 * @buffer_a: One buffer to swap with 5477 * @buffer_b: The other buffer to swap with 5478 * @cpu: the CPU of the buffers to swap 5479 * 5480 * This function is useful for tracers that want to take a "snapshot" 5481 * of a CPU buffer and has another back up buffer lying around. 5482 * it is expected that the tracer handles the cpu buffer not being 5483 * used at the moment. 5484 */ 5485 int ring_buffer_swap_cpu(struct trace_buffer *buffer_a, 5486 struct trace_buffer *buffer_b, int cpu) 5487 { 5488 struct ring_buffer_per_cpu *cpu_buffer_a; 5489 struct ring_buffer_per_cpu *cpu_buffer_b; 5490 int ret = -EINVAL; 5491 5492 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || 5493 !cpumask_test_cpu(cpu, buffer_b->cpumask)) 5494 goto out; 5495 5496 cpu_buffer_a = buffer_a->buffers[cpu]; 5497 cpu_buffer_b = buffer_b->buffers[cpu]; 5498 5499 /* At least make sure the two buffers are somewhat the same */ 5500 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages) 5501 goto out; 5502 5503 ret = -EAGAIN; 5504 5505 if (atomic_read(&buffer_a->record_disabled)) 5506 goto out; 5507 5508 if (atomic_read(&buffer_b->record_disabled)) 5509 goto out; 5510 5511 if (atomic_read(&cpu_buffer_a->record_disabled)) 5512 goto out; 5513 5514 if (atomic_read(&cpu_buffer_b->record_disabled)) 5515 goto out; 5516 5517 /* 5518 * We can't do a synchronize_rcu here because this 5519 * function can be called in atomic context. 5520 * Normally this will be called from the same CPU as cpu. 5521 * If not it's up to the caller to protect this. 5522 */ 5523 atomic_inc(&cpu_buffer_a->record_disabled); 5524 atomic_inc(&cpu_buffer_b->record_disabled); 5525 5526 ret = -EBUSY; 5527 if (local_read(&cpu_buffer_a->committing)) 5528 goto out_dec; 5529 if (local_read(&cpu_buffer_b->committing)) 5530 goto out_dec; 5531 5532 buffer_a->buffers[cpu] = cpu_buffer_b; 5533 buffer_b->buffers[cpu] = cpu_buffer_a; 5534 5535 cpu_buffer_b->buffer = buffer_a; 5536 cpu_buffer_a->buffer = buffer_b; 5537 5538 ret = 0; 5539 5540 out_dec: 5541 atomic_dec(&cpu_buffer_a->record_disabled); 5542 atomic_dec(&cpu_buffer_b->record_disabled); 5543 out: 5544 return ret; 5545 } 5546 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); 5547 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */ 5548 5549 /** 5550 * ring_buffer_alloc_read_page - allocate a page to read from buffer 5551 * @buffer: the buffer to allocate for. 5552 * @cpu: the cpu buffer to allocate. 5553 * 5554 * This function is used in conjunction with ring_buffer_read_page. 5555 * When reading a full page from the ring buffer, these functions 5556 * can be used to speed up the process. The calling function should 5557 * allocate a few pages first with this function. Then when it 5558 * needs to get pages from the ring buffer, it passes the result 5559 * of this function into ring_buffer_read_page, which will swap 5560 * the page that was allocated, with the read page of the buffer. 5561 * 5562 * Returns: 5563 * The page allocated, or ERR_PTR 5564 */ 5565 void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu) 5566 { 5567 struct ring_buffer_per_cpu *cpu_buffer; 5568 struct buffer_data_page *bpage = NULL; 5569 unsigned long flags; 5570 struct page *page; 5571 5572 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5573 return ERR_PTR(-ENODEV); 5574 5575 cpu_buffer = buffer->buffers[cpu]; 5576 local_irq_save(flags); 5577 arch_spin_lock(&cpu_buffer->lock); 5578 5579 if (cpu_buffer->free_page) { 5580 bpage = cpu_buffer->free_page; 5581 cpu_buffer->free_page = NULL; 5582 } 5583 5584 arch_spin_unlock(&cpu_buffer->lock); 5585 local_irq_restore(flags); 5586 5587 if (bpage) 5588 goto out; 5589 5590 page = alloc_pages_node(cpu_to_node(cpu), 5591 GFP_KERNEL | __GFP_NORETRY, 0); 5592 if (!page) 5593 return ERR_PTR(-ENOMEM); 5594 5595 bpage = page_address(page); 5596 5597 out: 5598 rb_init_page(bpage); 5599 5600 return bpage; 5601 } 5602 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page); 5603 5604 /** 5605 * ring_buffer_free_read_page - free an allocated read page 5606 * @buffer: the buffer the page was allocate for 5607 * @cpu: the cpu buffer the page came from 5608 * @data: the page to free 5609 * 5610 * Free a page allocated from ring_buffer_alloc_read_page. 5611 */ 5612 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data) 5613 { 5614 struct ring_buffer_per_cpu *cpu_buffer; 5615 struct buffer_data_page *bpage = data; 5616 struct page *page = virt_to_page(bpage); 5617 unsigned long flags; 5618 5619 if (!buffer || !buffer->buffers || !buffer->buffers[cpu]) 5620 return; 5621 5622 cpu_buffer = buffer->buffers[cpu]; 5623 5624 /* If the page is still in use someplace else, we can't reuse it */ 5625 if (page_ref_count(page) > 1) 5626 goto out; 5627 5628 local_irq_save(flags); 5629 arch_spin_lock(&cpu_buffer->lock); 5630 5631 if (!cpu_buffer->free_page) { 5632 cpu_buffer->free_page = bpage; 5633 bpage = NULL; 5634 } 5635 5636 arch_spin_unlock(&cpu_buffer->lock); 5637 local_irq_restore(flags); 5638 5639 out: 5640 free_page((unsigned long)bpage); 5641 } 5642 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); 5643 5644 /** 5645 * ring_buffer_read_page - extract a page from the ring buffer 5646 * @buffer: buffer to extract from 5647 * @data_page: the page to use allocated from ring_buffer_alloc_read_page 5648 * @len: amount to extract 5649 * @cpu: the cpu of the buffer to extract 5650 * @full: should the extraction only happen when the page is full. 5651 * 5652 * This function will pull out a page from the ring buffer and consume it. 5653 * @data_page must be the address of the variable that was returned 5654 * from ring_buffer_alloc_read_page. This is because the page might be used 5655 * to swap with a page in the ring buffer. 5656 * 5657 * for example: 5658 * rpage = ring_buffer_alloc_read_page(buffer, cpu); 5659 * if (IS_ERR(rpage)) 5660 * return PTR_ERR(rpage); 5661 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); 5662 * if (ret >= 0) 5663 * process_page(rpage, ret); 5664 * 5665 * When @full is set, the function will not return true unless 5666 * the writer is off the reader page. 5667 * 5668 * Note: it is up to the calling functions to handle sleeps and wakeups. 5669 * The ring buffer can be used anywhere in the kernel and can not 5670 * blindly call wake_up. The layer that uses the ring buffer must be 5671 * responsible for that. 5672 * 5673 * Returns: 5674 * >=0 if data has been transferred, returns the offset of consumed data. 5675 * <0 if no data has been transferred. 5676 */ 5677 int ring_buffer_read_page(struct trace_buffer *buffer, 5678 void **data_page, size_t len, int cpu, int full) 5679 { 5680 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 5681 struct ring_buffer_event *event; 5682 struct buffer_data_page *bpage; 5683 struct buffer_page *reader; 5684 unsigned long missed_events; 5685 unsigned long flags; 5686 unsigned int commit; 5687 unsigned int read; 5688 u64 save_timestamp; 5689 int ret = -1; 5690 5691 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5692 goto out; 5693 5694 /* 5695 * If len is not big enough to hold the page header, then 5696 * we can not copy anything. 5697 */ 5698 if (len <= BUF_PAGE_HDR_SIZE) 5699 goto out; 5700 5701 len -= BUF_PAGE_HDR_SIZE; 5702 5703 if (!data_page) 5704 goto out; 5705 5706 bpage = *data_page; 5707 if (!bpage) 5708 goto out; 5709 5710 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5711 5712 reader = rb_get_reader_page(cpu_buffer); 5713 if (!reader) 5714 goto out_unlock; 5715 5716 event = rb_reader_event(cpu_buffer); 5717 5718 read = reader->read; 5719 commit = rb_page_commit(reader); 5720 5721 /* Check if any events were dropped */ 5722 missed_events = cpu_buffer->lost_events; 5723 5724 /* 5725 * If this page has been partially read or 5726 * if len is not big enough to read the rest of the page or 5727 * a writer is still on the page, then 5728 * we must copy the data from the page to the buffer. 5729 * Otherwise, we can simply swap the page with the one passed in. 5730 */ 5731 if (read || (len < (commit - read)) || 5732 cpu_buffer->reader_page == cpu_buffer->commit_page) { 5733 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; 5734 unsigned int rpos = read; 5735 unsigned int pos = 0; 5736 unsigned int size; 5737 5738 /* 5739 * If a full page is expected, this can still be returned 5740 * if there's been a previous partial read and the 5741 * rest of the page can be read and the commit page is off 5742 * the reader page. 5743 */ 5744 if (full && 5745 (!read || (len < (commit - read)) || 5746 cpu_buffer->reader_page == cpu_buffer->commit_page)) 5747 goto out_unlock; 5748 5749 if (len > (commit - read)) 5750 len = (commit - read); 5751 5752 /* Always keep the time extend and data together */ 5753 size = rb_event_ts_length(event); 5754 5755 if (len < size) 5756 goto out_unlock; 5757 5758 /* save the current timestamp, since the user will need it */ 5759 save_timestamp = cpu_buffer->read_stamp; 5760 5761 /* Need to copy one event at a time */ 5762 do { 5763 /* We need the size of one event, because 5764 * rb_advance_reader only advances by one event, 5765 * whereas rb_event_ts_length may include the size of 5766 * one or two events. 5767 * We have already ensured there's enough space if this 5768 * is a time extend. */ 5769 size = rb_event_length(event); 5770 memcpy(bpage->data + pos, rpage->data + rpos, size); 5771 5772 len -= size; 5773 5774 rb_advance_reader(cpu_buffer); 5775 rpos = reader->read; 5776 pos += size; 5777 5778 if (rpos >= commit) 5779 break; 5780 5781 event = rb_reader_event(cpu_buffer); 5782 /* Always keep the time extend and data together */ 5783 size = rb_event_ts_length(event); 5784 } while (len >= size); 5785 5786 /* update bpage */ 5787 local_set(&bpage->commit, pos); 5788 bpage->time_stamp = save_timestamp; 5789 5790 /* we copied everything to the beginning */ 5791 read = 0; 5792 } else { 5793 /* update the entry counter */ 5794 cpu_buffer->read += rb_page_entries(reader); 5795 cpu_buffer->read_bytes += BUF_PAGE_SIZE; 5796 5797 /* swap the pages */ 5798 rb_init_page(bpage); 5799 bpage = reader->page; 5800 reader->page = *data_page; 5801 local_set(&reader->write, 0); 5802 local_set(&reader->entries, 0); 5803 reader->read = 0; 5804 *data_page = bpage; 5805 5806 /* 5807 * Use the real_end for the data size, 5808 * This gives us a chance to store the lost events 5809 * on the page. 5810 */ 5811 if (reader->real_end) 5812 local_set(&bpage->commit, reader->real_end); 5813 } 5814 ret = read; 5815 5816 cpu_buffer->lost_events = 0; 5817 5818 commit = local_read(&bpage->commit); 5819 /* 5820 * Set a flag in the commit field if we lost events 5821 */ 5822 if (missed_events) { 5823 /* If there is room at the end of the page to save the 5824 * missed events, then record it there. 5825 */ 5826 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) { 5827 memcpy(&bpage->data[commit], &missed_events, 5828 sizeof(missed_events)); 5829 local_add(RB_MISSED_STORED, &bpage->commit); 5830 commit += sizeof(missed_events); 5831 } 5832 local_add(RB_MISSED_EVENTS, &bpage->commit); 5833 } 5834 5835 /* 5836 * This page may be off to user land. Zero it out here. 5837 */ 5838 if (commit < BUF_PAGE_SIZE) 5839 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); 5840 5841 out_unlock: 5842 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5843 5844 out: 5845 return ret; 5846 } 5847 EXPORT_SYMBOL_GPL(ring_buffer_read_page); 5848 5849 /* 5850 * We only allocate new buffers, never free them if the CPU goes down. 5851 * If we were to free the buffer, then the user would lose any trace that was in 5852 * the buffer. 5853 */ 5854 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node) 5855 { 5856 struct trace_buffer *buffer; 5857 long nr_pages_same; 5858 int cpu_i; 5859 unsigned long nr_pages; 5860 5861 buffer = container_of(node, struct trace_buffer, node); 5862 if (cpumask_test_cpu(cpu, buffer->cpumask)) 5863 return 0; 5864 5865 nr_pages = 0; 5866 nr_pages_same = 1; 5867 /* check if all cpu sizes are same */ 5868 for_each_buffer_cpu(buffer, cpu_i) { 5869 /* fill in the size from first enabled cpu */ 5870 if (nr_pages == 0) 5871 nr_pages = buffer->buffers[cpu_i]->nr_pages; 5872 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { 5873 nr_pages_same = 0; 5874 break; 5875 } 5876 } 5877 /* allocate minimum pages, user can later expand it */ 5878 if (!nr_pages_same) 5879 nr_pages = 2; 5880 buffer->buffers[cpu] = 5881 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 5882 if (!buffer->buffers[cpu]) { 5883 WARN(1, "failed to allocate ring buffer on CPU %u\n", 5884 cpu); 5885 return -ENOMEM; 5886 } 5887 smp_wmb(); 5888 cpumask_set_cpu(cpu, buffer->cpumask); 5889 return 0; 5890 } 5891 5892 #ifdef CONFIG_RING_BUFFER_STARTUP_TEST 5893 /* 5894 * This is a basic integrity check of the ring buffer. 5895 * Late in the boot cycle this test will run when configured in. 5896 * It will kick off a thread per CPU that will go into a loop 5897 * writing to the per cpu ring buffer various sizes of data. 5898 * Some of the data will be large items, some small. 5899 * 5900 * Another thread is created that goes into a spin, sending out 5901 * IPIs to the other CPUs to also write into the ring buffer. 5902 * this is to test the nesting ability of the buffer. 5903 * 5904 * Basic stats are recorded and reported. If something in the 5905 * ring buffer should happen that's not expected, a big warning 5906 * is displayed and all ring buffers are disabled. 5907 */ 5908 static struct task_struct *rb_threads[NR_CPUS] __initdata; 5909 5910 struct rb_test_data { 5911 struct trace_buffer *buffer; 5912 unsigned long events; 5913 unsigned long bytes_written; 5914 unsigned long bytes_alloc; 5915 unsigned long bytes_dropped; 5916 unsigned long events_nested; 5917 unsigned long bytes_written_nested; 5918 unsigned long bytes_alloc_nested; 5919 unsigned long bytes_dropped_nested; 5920 int min_size_nested; 5921 int max_size_nested; 5922 int max_size; 5923 int min_size; 5924 int cpu; 5925 int cnt; 5926 }; 5927 5928 static struct rb_test_data rb_data[NR_CPUS] __initdata; 5929 5930 /* 1 meg per cpu */ 5931 #define RB_TEST_BUFFER_SIZE 1048576 5932 5933 static char rb_string[] __initdata = 5934 "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\" 5935 "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890" 5936 "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv"; 5937 5938 static bool rb_test_started __initdata; 5939 5940 struct rb_item { 5941 int size; 5942 char str[]; 5943 }; 5944 5945 static __init int rb_write_something(struct rb_test_data *data, bool nested) 5946 { 5947 struct ring_buffer_event *event; 5948 struct rb_item *item; 5949 bool started; 5950 int event_len; 5951 int size; 5952 int len; 5953 int cnt; 5954 5955 /* Have nested writes different that what is written */ 5956 cnt = data->cnt + (nested ? 27 : 0); 5957 5958 /* Multiply cnt by ~e, to make some unique increment */ 5959 size = (cnt * 68 / 25) % (sizeof(rb_string) - 1); 5960 5961 len = size + sizeof(struct rb_item); 5962 5963 started = rb_test_started; 5964 /* read rb_test_started before checking buffer enabled */ 5965 smp_rmb(); 5966 5967 event = ring_buffer_lock_reserve(data->buffer, len); 5968 if (!event) { 5969 /* Ignore dropped events before test starts. */ 5970 if (started) { 5971 if (nested) 5972 data->bytes_dropped += len; 5973 else 5974 data->bytes_dropped_nested += len; 5975 } 5976 return len; 5977 } 5978 5979 event_len = ring_buffer_event_length(event); 5980 5981 if (RB_WARN_ON(data->buffer, event_len < len)) 5982 goto out; 5983 5984 item = ring_buffer_event_data(event); 5985 item->size = size; 5986 memcpy(item->str, rb_string, size); 5987 5988 if (nested) { 5989 data->bytes_alloc_nested += event_len; 5990 data->bytes_written_nested += len; 5991 data->events_nested++; 5992 if (!data->min_size_nested || len < data->min_size_nested) 5993 data->min_size_nested = len; 5994 if (len > data->max_size_nested) 5995 data->max_size_nested = len; 5996 } else { 5997 data->bytes_alloc += event_len; 5998 data->bytes_written += len; 5999 data->events++; 6000 if (!data->min_size || len < data->min_size) 6001 data->max_size = len; 6002 if (len > data->max_size) 6003 data->max_size = len; 6004 } 6005 6006 out: 6007 ring_buffer_unlock_commit(data->buffer); 6008 6009 return 0; 6010 } 6011 6012 static __init int rb_test(void *arg) 6013 { 6014 struct rb_test_data *data = arg; 6015 6016 while (!kthread_should_stop()) { 6017 rb_write_something(data, false); 6018 data->cnt++; 6019 6020 set_current_state(TASK_INTERRUPTIBLE); 6021 /* Now sleep between a min of 100-300us and a max of 1ms */ 6022 usleep_range(((data->cnt % 3) + 1) * 100, 1000); 6023 } 6024 6025 return 0; 6026 } 6027 6028 static __init void rb_ipi(void *ignore) 6029 { 6030 struct rb_test_data *data; 6031 int cpu = smp_processor_id(); 6032 6033 data = &rb_data[cpu]; 6034 rb_write_something(data, true); 6035 } 6036 6037 static __init int rb_hammer_test(void *arg) 6038 { 6039 while (!kthread_should_stop()) { 6040 6041 /* Send an IPI to all cpus to write data! */ 6042 smp_call_function(rb_ipi, NULL, 1); 6043 /* No sleep, but for non preempt, let others run */ 6044 schedule(); 6045 } 6046 6047 return 0; 6048 } 6049 6050 static __init int test_ringbuffer(void) 6051 { 6052 struct task_struct *rb_hammer; 6053 struct trace_buffer *buffer; 6054 int cpu; 6055 int ret = 0; 6056 6057 if (security_locked_down(LOCKDOWN_TRACEFS)) { 6058 pr_warn("Lockdown is enabled, skipping ring buffer tests\n"); 6059 return 0; 6060 } 6061 6062 pr_info("Running ring buffer tests...\n"); 6063 6064 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE); 6065 if (WARN_ON(!buffer)) 6066 return 0; 6067 6068 /* Disable buffer so that threads can't write to it yet */ 6069 ring_buffer_record_off(buffer); 6070 6071 for_each_online_cpu(cpu) { 6072 rb_data[cpu].buffer = buffer; 6073 rb_data[cpu].cpu = cpu; 6074 rb_data[cpu].cnt = cpu; 6075 rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu], 6076 cpu, "rbtester/%u"); 6077 if (WARN_ON(IS_ERR(rb_threads[cpu]))) { 6078 pr_cont("FAILED\n"); 6079 ret = PTR_ERR(rb_threads[cpu]); 6080 goto out_free; 6081 } 6082 } 6083 6084 /* Now create the rb hammer! */ 6085 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer"); 6086 if (WARN_ON(IS_ERR(rb_hammer))) { 6087 pr_cont("FAILED\n"); 6088 ret = PTR_ERR(rb_hammer); 6089 goto out_free; 6090 } 6091 6092 ring_buffer_record_on(buffer); 6093 /* 6094 * Show buffer is enabled before setting rb_test_started. 6095 * Yes there's a small race window where events could be 6096 * dropped and the thread wont catch it. But when a ring 6097 * buffer gets enabled, there will always be some kind of 6098 * delay before other CPUs see it. Thus, we don't care about 6099 * those dropped events. We care about events dropped after 6100 * the threads see that the buffer is active. 6101 */ 6102 smp_wmb(); 6103 rb_test_started = true; 6104 6105 set_current_state(TASK_INTERRUPTIBLE); 6106 /* Just run for 10 seconds */; 6107 schedule_timeout(10 * HZ); 6108 6109 kthread_stop(rb_hammer); 6110 6111 out_free: 6112 for_each_online_cpu(cpu) { 6113 if (!rb_threads[cpu]) 6114 break; 6115 kthread_stop(rb_threads[cpu]); 6116 } 6117 if (ret) { 6118 ring_buffer_free(buffer); 6119 return ret; 6120 } 6121 6122 /* Report! */ 6123 pr_info("finished\n"); 6124 for_each_online_cpu(cpu) { 6125 struct ring_buffer_event *event; 6126 struct rb_test_data *data = &rb_data[cpu]; 6127 struct rb_item *item; 6128 unsigned long total_events; 6129 unsigned long total_dropped; 6130 unsigned long total_written; 6131 unsigned long total_alloc; 6132 unsigned long total_read = 0; 6133 unsigned long total_size = 0; 6134 unsigned long total_len = 0; 6135 unsigned long total_lost = 0; 6136 unsigned long lost; 6137 int big_event_size; 6138 int small_event_size; 6139 6140 ret = -1; 6141 6142 total_events = data->events + data->events_nested; 6143 total_written = data->bytes_written + data->bytes_written_nested; 6144 total_alloc = data->bytes_alloc + data->bytes_alloc_nested; 6145 total_dropped = data->bytes_dropped + data->bytes_dropped_nested; 6146 6147 big_event_size = data->max_size + data->max_size_nested; 6148 small_event_size = data->min_size + data->min_size_nested; 6149 6150 pr_info("CPU %d:\n", cpu); 6151 pr_info(" events: %ld\n", total_events); 6152 pr_info(" dropped bytes: %ld\n", total_dropped); 6153 pr_info(" alloced bytes: %ld\n", total_alloc); 6154 pr_info(" written bytes: %ld\n", total_written); 6155 pr_info(" biggest event: %d\n", big_event_size); 6156 pr_info(" smallest event: %d\n", small_event_size); 6157 6158 if (RB_WARN_ON(buffer, total_dropped)) 6159 break; 6160 6161 ret = 0; 6162 6163 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { 6164 total_lost += lost; 6165 item = ring_buffer_event_data(event); 6166 total_len += ring_buffer_event_length(event); 6167 total_size += item->size + sizeof(struct rb_item); 6168 if (memcmp(&item->str[0], rb_string, item->size) != 0) { 6169 pr_info("FAILED!\n"); 6170 pr_info("buffer had: %.*s\n", item->size, item->str); 6171 pr_info("expected: %.*s\n", item->size, rb_string); 6172 RB_WARN_ON(buffer, 1); 6173 ret = -1; 6174 break; 6175 } 6176 total_read++; 6177 } 6178 if (ret) 6179 break; 6180 6181 ret = -1; 6182 6183 pr_info(" read events: %ld\n", total_read); 6184 pr_info(" lost events: %ld\n", total_lost); 6185 pr_info(" total events: %ld\n", total_lost + total_read); 6186 pr_info(" recorded len bytes: %ld\n", total_len); 6187 pr_info(" recorded size bytes: %ld\n", total_size); 6188 if (total_lost) { 6189 pr_info(" With dropped events, record len and size may not match\n" 6190 " alloced and written from above\n"); 6191 } else { 6192 if (RB_WARN_ON(buffer, total_len != total_alloc || 6193 total_size != total_written)) 6194 break; 6195 } 6196 if (RB_WARN_ON(buffer, total_lost + total_read != total_events)) 6197 break; 6198 6199 ret = 0; 6200 } 6201 if (!ret) 6202 pr_info("Ring buffer PASSED!\n"); 6203 6204 ring_buffer_free(buffer); 6205 return 0; 6206 } 6207 6208 late_initcall(test_ringbuffer); 6209 #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */ 6210