1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/kernel.h> 4 #include <linux/irqflags.h> 5 #include <linux/string.h> 6 #include <linux/errno.h> 7 #include <linux/bug.h> 8 #include "printk_ringbuffer.h" 9 10 /** 11 * DOC: printk_ringbuffer overview 12 * 13 * Data Structure 14 * -------------- 15 * The printk_ringbuffer is made up of 3 internal ringbuffers: 16 * 17 * desc_ring 18 * A ring of descriptors. A descriptor contains all record meta data 19 * (sequence number, timestamp, loglevel, etc.) as well as internal state 20 * information about the record and logical positions specifying where in 21 * the other ringbuffers the text and dictionary strings are located. 22 * 23 * text_data_ring 24 * A ring of data blocks. A data block consists of an unsigned long 25 * integer (ID) that maps to a desc_ring index followed by the text 26 * string of the record. 27 * 28 * dict_data_ring 29 * A ring of data blocks. A data block consists of an unsigned long 30 * integer (ID) that maps to a desc_ring index followed by the dictionary 31 * string of the record. 32 * 33 * The internal state information of a descriptor is the key element to allow 34 * readers and writers to locklessly synchronize access to the data. 35 * 36 * Implementation 37 * -------------- 38 * 39 * Descriptor Ring 40 * ~~~~~~~~~~~~~~~ 41 * The descriptor ring is an array of descriptors. A descriptor contains all 42 * the meta data of a printk record as well as blk_lpos structs pointing to 43 * associated text and dictionary data blocks (see "Data Rings" below). Each 44 * descriptor is assigned an ID that maps directly to index values of the 45 * descriptor array and has a state. The ID and the state are bitwise combined 46 * into a single descriptor field named @state_var, allowing ID and state to 47 * be synchronously and atomically updated. 48 * 49 * Descriptors have three states: 50 * 51 * reserved 52 * A writer is modifying the record. 53 * 54 * committed 55 * The record and all its data are complete and available for reading. 56 * 57 * reusable 58 * The record exists, but its text and/or dictionary data may no longer 59 * be available. 60 * 61 * Querying the @state_var of a record requires providing the ID of the 62 * descriptor to query. This can yield a possible fourth (pseudo) state: 63 * 64 * miss 65 * The descriptor being queried has an unexpected ID. 66 * 67 * The descriptor ring has a @tail_id that contains the ID of the oldest 68 * descriptor and @head_id that contains the ID of the newest descriptor. 69 * 70 * When a new descriptor should be created (and the ring is full), the tail 71 * descriptor is invalidated by first transitioning to the reusable state and 72 * then invalidating all tail data blocks up to and including the data blocks 73 * associated with the tail descriptor (for text and dictionary rings). Then 74 * @tail_id is advanced, followed by advancing @head_id. And finally the 75 * @state_var of the new descriptor is initialized to the new ID and reserved 76 * state. 77 * 78 * The @tail_id can only be advanced if the new @tail_id would be in the 79 * committed or reusable queried state. This makes it possible that a valid 80 * sequence number of the tail is always available. 81 * 82 * Data Rings 83 * ~~~~~~~~~~ 84 * The two data rings (text and dictionary) function identically. They exist 85 * separately so that their buffer sizes can be individually set and they do 86 * not affect one another. 87 * 88 * Data rings are byte arrays composed of data blocks. Data blocks are 89 * referenced by blk_lpos structs that point to the logical position of the 90 * beginning of a data block and the beginning of the next adjacent data 91 * block. Logical positions are mapped directly to index values of the byte 92 * array ringbuffer. 93 * 94 * Each data block consists of an ID followed by the writer data. The ID is 95 * the identifier of a descriptor that is associated with the data block. A 96 * given data block is considered valid if all of the following conditions 97 * are met: 98 * 99 * 1) The descriptor associated with the data block is in the committed 100 * queried state. 101 * 102 * 2) The blk_lpos struct within the descriptor associated with the data 103 * block references back to the same data block. 104 * 105 * 3) The data block is within the head/tail logical position range. 106 * 107 * If the writer data of a data block would extend beyond the end of the 108 * byte array, only the ID of the data block is stored at the logical 109 * position and the full data block (ID and writer data) is stored at the 110 * beginning of the byte array. The referencing blk_lpos will point to the 111 * ID before the wrap and the next data block will be at the logical 112 * position adjacent the full data block after the wrap. 113 * 114 * Data rings have a @tail_lpos that points to the beginning of the oldest 115 * data block and a @head_lpos that points to the logical position of the 116 * next (not yet existing) data block. 117 * 118 * When a new data block should be created (and the ring is full), tail data 119 * blocks will first be invalidated by putting their associated descriptors 120 * into the reusable state and then pushing the @tail_lpos forward beyond 121 * them. Then the @head_lpos is pushed forward and is associated with a new 122 * descriptor. If a data block is not valid, the @tail_lpos cannot be 123 * advanced beyond it. 124 * 125 * Usage 126 * ----- 127 * Here are some simple examples demonstrating writers and readers. For the 128 * examples a global ringbuffer (test_rb) is available (which is not the 129 * actual ringbuffer used by printk):: 130 * 131 * DEFINE_PRINTKRB(test_rb, 15, 5, 3); 132 * 133 * This ringbuffer allows up to 32768 records (2 ^ 15) and has a size of 134 * 1 MiB (2 ^ (15 + 5)) for text data and 256 KiB (2 ^ (15 + 3)) for 135 * dictionary data. 136 * 137 * Sample writer code:: 138 * 139 * const char *dictstr = "dictionary text"; 140 * const char *textstr = "message text"; 141 * struct prb_reserved_entry e; 142 * struct printk_record r; 143 * 144 * // specify how much to allocate 145 * prb_rec_init_wr(&r, strlen(textstr) + 1, strlen(dictstr) + 1); 146 * 147 * if (prb_reserve(&e, &test_rb, &r)) { 148 * snprintf(r.text_buf, r.text_buf_size, "%s", textstr); 149 * 150 * // dictionary allocation may have failed 151 * if (r.dict_buf) 152 * snprintf(r.dict_buf, r.dict_buf_size, "%s", dictstr); 153 * 154 * r.info->ts_nsec = local_clock(); 155 * 156 * prb_commit(&e); 157 * } 158 * 159 * Sample reader code:: 160 * 161 * struct printk_info info; 162 * struct printk_record r; 163 * char text_buf[32]; 164 * char dict_buf[32]; 165 * u64 seq; 166 * 167 * prb_rec_init_rd(&r, &info, &text_buf[0], sizeof(text_buf), 168 * &dict_buf[0], sizeof(dict_buf)); 169 * 170 * prb_for_each_record(0, &test_rb, &seq, &r) { 171 * if (info.seq != seq) 172 * pr_warn("lost %llu records\n", info.seq - seq); 173 * 174 * if (info.text_len > r.text_buf_size) { 175 * pr_warn("record %llu text truncated\n", info.seq); 176 * text_buf[r.text_buf_size - 1] = 0; 177 * } 178 * 179 * if (info.dict_len > r.dict_buf_size) { 180 * pr_warn("record %llu dict truncated\n", info.seq); 181 * dict_buf[r.dict_buf_size - 1] = 0; 182 * } 183 * 184 * pr_info("%llu: %llu: %s;%s\n", info.seq, info.ts_nsec, 185 * &text_buf[0], info.dict_len ? &dict_buf[0] : ""); 186 * } 187 * 188 * Note that additional less convenient reader functions are available to 189 * allow complex record access. 190 * 191 * ABA Issues 192 * ~~~~~~~~~~ 193 * To help avoid ABA issues, descriptors are referenced by IDs (array index 194 * values combined with tagged bits counting array wraps) and data blocks are 195 * referenced by logical positions (array index values combined with tagged 196 * bits counting array wraps). However, on 32-bit systems the number of 197 * tagged bits is relatively small such that an ABA incident is (at least 198 * theoretically) possible. For example, if 4 million maximally sized (1KiB) 199 * printk messages were to occur in NMI context on a 32-bit system, the 200 * interrupted context would not be able to recognize that the 32-bit integer 201 * completely wrapped and thus represents a different data block than the one 202 * the interrupted context expects. 203 * 204 * To help combat this possibility, additional state checking is performed 205 * (such as using cmpxchg() even though set() would suffice). These extra 206 * checks are commented as such and will hopefully catch any ABA issue that 207 * a 32-bit system might experience. 208 * 209 * Memory Barriers 210 * ~~~~~~~~~~~~~~~ 211 * Multiple memory barriers are used. To simplify proving correctness and 212 * generating litmus tests, lines of code related to memory barriers 213 * (loads, stores, and the associated memory barriers) are labeled:: 214 * 215 * LMM(function:letter) 216 * 217 * Comments reference the labels using only the "function:letter" part. 218 * 219 * The memory barrier pairs and their ordering are: 220 * 221 * desc_reserve:D / desc_reserve:B 222 * push descriptor tail (id), then push descriptor head (id) 223 * 224 * desc_reserve:D / data_push_tail:B 225 * push data tail (lpos), then set new descriptor reserved (state) 226 * 227 * desc_reserve:D / desc_push_tail:C 228 * push descriptor tail (id), then set new descriptor reserved (state) 229 * 230 * desc_reserve:D / prb_first_seq:C 231 * push descriptor tail (id), then set new descriptor reserved (state) 232 * 233 * desc_reserve:F / desc_read:D 234 * set new descriptor id and reserved (state), then allow writer changes 235 * 236 * data_alloc:A / desc_read:D 237 * set old descriptor reusable (state), then modify new data block area 238 * 239 * data_alloc:A / data_push_tail:B 240 * push data tail (lpos), then modify new data block area 241 * 242 * prb_commit:B / desc_read:B 243 * store writer changes, then set new descriptor committed (state) 244 * 245 * data_push_tail:D / data_push_tail:A 246 * set descriptor reusable (state), then push data tail (lpos) 247 * 248 * desc_push_tail:B / desc_reserve:D 249 * set descriptor reusable (state), then push descriptor tail (id) 250 */ 251 252 #define DATA_SIZE(data_ring) _DATA_SIZE((data_ring)->size_bits) 253 #define DATA_SIZE_MASK(data_ring) (DATA_SIZE(data_ring) - 1) 254 255 #define DESCS_COUNT(desc_ring) _DESCS_COUNT((desc_ring)->count_bits) 256 #define DESCS_COUNT_MASK(desc_ring) (DESCS_COUNT(desc_ring) - 1) 257 258 /* Determine the data array index from a logical position. */ 259 #define DATA_INDEX(data_ring, lpos) ((lpos) & DATA_SIZE_MASK(data_ring)) 260 261 /* Determine the desc array index from an ID or sequence number. */ 262 #define DESC_INDEX(desc_ring, n) ((n) & DESCS_COUNT_MASK(desc_ring)) 263 264 /* Determine how many times the data array has wrapped. */ 265 #define DATA_WRAPS(data_ring, lpos) ((lpos) >> (data_ring)->size_bits) 266 267 /* Get the logical position at index 0 of the current wrap. */ 268 #define DATA_THIS_WRAP_START_LPOS(data_ring, lpos) \ 269 ((lpos) & ~DATA_SIZE_MASK(data_ring)) 270 271 /* Get the ID for the same index of the previous wrap as the given ID. */ 272 #define DESC_ID_PREV_WRAP(desc_ring, id) \ 273 DESC_ID((id) - DESCS_COUNT(desc_ring)) 274 275 /* 276 * A data block: mapped directly to the beginning of the data block area 277 * specified as a logical position within the data ring. 278 * 279 * @id: the ID of the associated descriptor 280 * @data: the writer data 281 * 282 * Note that the size of a data block is only known by its associated 283 * descriptor. 284 */ 285 struct prb_data_block { 286 unsigned long id; 287 char data[0]; 288 }; 289 290 /* 291 * Return the descriptor associated with @n. @n can be either a 292 * descriptor ID or a sequence number. 293 */ 294 static struct prb_desc *to_desc(struct prb_desc_ring *desc_ring, u64 n) 295 { 296 return &desc_ring->descs[DESC_INDEX(desc_ring, n)]; 297 } 298 299 static struct prb_data_block *to_block(struct prb_data_ring *data_ring, 300 unsigned long begin_lpos) 301 { 302 return (void *)&data_ring->data[DATA_INDEX(data_ring, begin_lpos)]; 303 } 304 305 /* 306 * Increase the data size to account for data block meta data plus any 307 * padding so that the adjacent data block is aligned on the ID size. 308 */ 309 static unsigned int to_blk_size(unsigned int size) 310 { 311 struct prb_data_block *db = NULL; 312 313 size += sizeof(*db); 314 size = ALIGN(size, sizeof(db->id)); 315 return size; 316 } 317 318 /* 319 * Sanity checker for reserve size. The ringbuffer code assumes that a data 320 * block does not exceed the maximum possible size that could fit within the 321 * ringbuffer. This function provides that basic size check so that the 322 * assumption is safe. 323 * 324 * Writers are also not allowed to write 0-sized (data-less) records. Such 325 * records are used only internally by the ringbuffer. 326 */ 327 static bool data_check_size(struct prb_data_ring *data_ring, unsigned int size) 328 { 329 struct prb_data_block *db = NULL; 330 331 /* 332 * Writers are not allowed to write data-less records. Such records 333 * are used only internally by the ringbuffer to denote records where 334 * their data failed to allocate or have been lost. 335 */ 336 if (size == 0) 337 return false; 338 339 /* 340 * Ensure the alignment padded size could possibly fit in the data 341 * array. The largest possible data block must still leave room for 342 * at least the ID of the next block. 343 */ 344 size = to_blk_size(size); 345 if (size > DATA_SIZE(data_ring) - sizeof(db->id)) 346 return false; 347 348 return true; 349 } 350 351 /* The possible responses of a descriptor state-query. */ 352 enum desc_state { 353 desc_miss, /* ID mismatch */ 354 desc_reserved, /* reserved, in use by writer */ 355 desc_committed, /* committed, writer is done */ 356 desc_reusable, /* free, not yet used by any writer */ 357 }; 358 359 /* Query the state of a descriptor. */ 360 static enum desc_state get_desc_state(unsigned long id, 361 unsigned long state_val) 362 { 363 if (id != DESC_ID(state_val)) 364 return desc_miss; 365 366 if (state_val & DESC_REUSE_MASK) 367 return desc_reusable; 368 369 if (state_val & DESC_COMMITTED_MASK) 370 return desc_committed; 371 372 return desc_reserved; 373 } 374 375 /* 376 * Get a copy of a specified descriptor and its queried state. A descriptor 377 * that is not in the committed or reusable state must be considered garbage 378 * by the reader. 379 */ 380 static enum desc_state desc_read(struct prb_desc_ring *desc_ring, 381 unsigned long id, struct prb_desc *desc_out) 382 { 383 struct prb_desc *desc = to_desc(desc_ring, id); 384 atomic_long_t *state_var = &desc->state_var; 385 enum desc_state d_state; 386 unsigned long state_val; 387 388 /* Check the descriptor state. */ 389 state_val = atomic_long_read(state_var); /* LMM(desc_read:A) */ 390 d_state = get_desc_state(id, state_val); 391 if (d_state != desc_committed && d_state != desc_reusable) 392 return d_state; 393 394 /* 395 * Guarantee the state is loaded before copying the descriptor 396 * content. This avoids copying obsolete descriptor content that might 397 * not apply to the descriptor state. This pairs with prb_commit:B. 398 * 399 * Memory barrier involvement: 400 * 401 * If desc_read:A reads from prb_commit:B, then desc_read:C reads 402 * from prb_commit:A. 403 * 404 * Relies on: 405 * 406 * WMB from prb_commit:A to prb_commit:B 407 * matching 408 * RMB from desc_read:A to desc_read:C 409 */ 410 smp_rmb(); /* LMM(desc_read:B) */ 411 412 /* 413 * Copy the descriptor data. The data is not valid until the 414 * state has been re-checked. 415 */ 416 memcpy(desc_out, desc, sizeof(*desc_out)); /* LMM(desc_read:C) */ 417 418 /* 419 * 1. Guarantee the descriptor content is loaded before re-checking 420 * the state. This avoids reading an obsolete descriptor state 421 * that may not apply to the copied content. This pairs with 422 * desc_reserve:F. 423 * 424 * Memory barrier involvement: 425 * 426 * If desc_read:C reads from desc_reserve:G, then desc_read:E 427 * reads from desc_reserve:F. 428 * 429 * Relies on: 430 * 431 * WMB from desc_reserve:F to desc_reserve:G 432 * matching 433 * RMB from desc_read:C to desc_read:E 434 * 435 * 2. Guarantee the record data is loaded before re-checking the 436 * state. This avoids reading an obsolete descriptor state that may 437 * not apply to the copied data. This pairs with data_alloc:A. 438 * 439 * Memory barrier involvement: 440 * 441 * If copy_data:A reads from data_alloc:B, then desc_read:E 442 * reads from desc_make_reusable:A. 443 * 444 * Relies on: 445 * 446 * MB from desc_make_reusable:A to data_alloc:B 447 * matching 448 * RMB from desc_read:C to desc_read:E 449 * 450 * Note: desc_make_reusable:A and data_alloc:B can be different 451 * CPUs. However, the data_alloc:B CPU (which performs the 452 * full memory barrier) must have previously seen 453 * desc_make_reusable:A. 454 */ 455 smp_rmb(); /* LMM(desc_read:D) */ 456 457 /* Re-check the descriptor state. */ 458 state_val = atomic_long_read(state_var); /* LMM(desc_read:E) */ 459 return get_desc_state(id, state_val); 460 } 461 462 /* 463 * Take a specified descriptor out of the committed state by attempting 464 * the transition from committed to reusable. Either this context or some 465 * other context will have been successful. 466 */ 467 static void desc_make_reusable(struct prb_desc_ring *desc_ring, 468 unsigned long id) 469 { 470 unsigned long val_committed = id | DESC_COMMITTED_MASK; 471 unsigned long val_reusable = val_committed | DESC_REUSE_MASK; 472 struct prb_desc *desc = to_desc(desc_ring, id); 473 atomic_long_t *state_var = &desc->state_var; 474 475 atomic_long_cmpxchg_relaxed(state_var, val_committed, 476 val_reusable); /* LMM(desc_make_reusable:A) */ 477 } 478 479 /* 480 * Given a data ring (text or dict), put the associated descriptor of each 481 * data block from @lpos_begin until @lpos_end into the reusable state. 482 * 483 * If there is any problem making the associated descriptor reusable, either 484 * the descriptor has not yet been committed or another writer context has 485 * already pushed the tail lpos past the problematic data block. Regardless, 486 * on error the caller can re-load the tail lpos to determine the situation. 487 */ 488 static bool data_make_reusable(struct printk_ringbuffer *rb, 489 struct prb_data_ring *data_ring, 490 unsigned long lpos_begin, 491 unsigned long lpos_end, 492 unsigned long *lpos_out) 493 { 494 struct prb_desc_ring *desc_ring = &rb->desc_ring; 495 struct prb_data_blk_lpos *blk_lpos; 496 struct prb_data_block *blk; 497 enum desc_state d_state; 498 struct prb_desc desc; 499 unsigned long id; 500 501 /* 502 * Using the provided @data_ring, point @blk_lpos to the correct 503 * blk_lpos within the local copy of the descriptor. 504 */ 505 if (data_ring == &rb->text_data_ring) 506 blk_lpos = &desc.text_blk_lpos; 507 else 508 blk_lpos = &desc.dict_blk_lpos; 509 510 /* Loop until @lpos_begin has advanced to or beyond @lpos_end. */ 511 while ((lpos_end - lpos_begin) - 1 < DATA_SIZE(data_ring)) { 512 blk = to_block(data_ring, lpos_begin); 513 514 /* 515 * Load the block ID from the data block. This is a data race 516 * against a writer that may have newly reserved this data 517 * area. If the loaded value matches a valid descriptor ID, 518 * the blk_lpos of that descriptor will be checked to make 519 * sure it points back to this data block. If the check fails, 520 * the data area has been recycled by another writer. 521 */ 522 id = blk->id; /* LMM(data_make_reusable:A) */ 523 524 d_state = desc_read(desc_ring, id, &desc); /* LMM(data_make_reusable:B) */ 525 526 switch (d_state) { 527 case desc_miss: 528 return false; 529 case desc_reserved: 530 return false; 531 case desc_committed: 532 /* 533 * This data block is invalid if the descriptor 534 * does not point back to it. 535 */ 536 if (blk_lpos->begin != lpos_begin) 537 return false; 538 desc_make_reusable(desc_ring, id); 539 break; 540 case desc_reusable: 541 /* 542 * This data block is invalid if the descriptor 543 * does not point back to it. 544 */ 545 if (blk_lpos->begin != lpos_begin) 546 return false; 547 break; 548 } 549 550 /* Advance @lpos_begin to the next data block. */ 551 lpos_begin = blk_lpos->next; 552 } 553 554 *lpos_out = lpos_begin; 555 return true; 556 } 557 558 /* 559 * Advance the data ring tail to at least @lpos. This function puts 560 * descriptors into the reusable state if the tail is pushed beyond 561 * their associated data block. 562 */ 563 static bool data_push_tail(struct printk_ringbuffer *rb, 564 struct prb_data_ring *data_ring, 565 unsigned long lpos) 566 { 567 unsigned long tail_lpos_new; 568 unsigned long tail_lpos; 569 unsigned long next_lpos; 570 571 /* If @lpos is not valid, there is nothing to do. */ 572 if (lpos == INVALID_LPOS) 573 return true; 574 575 /* 576 * Any descriptor states that have transitioned to reusable due to the 577 * data tail being pushed to this loaded value will be visible to this 578 * CPU. This pairs with data_push_tail:D. 579 * 580 * Memory barrier involvement: 581 * 582 * If data_push_tail:A reads from data_push_tail:D, then this CPU can 583 * see desc_make_reusable:A. 584 * 585 * Relies on: 586 * 587 * MB from desc_make_reusable:A to data_push_tail:D 588 * matches 589 * READFROM from data_push_tail:D to data_push_tail:A 590 * thus 591 * READFROM from desc_make_reusable:A to this CPU 592 */ 593 tail_lpos = atomic_long_read(&data_ring->tail_lpos); /* LMM(data_push_tail:A) */ 594 595 /* 596 * Loop until the tail lpos is at or beyond @lpos. This condition 597 * may already be satisfied, resulting in no full memory barrier 598 * from data_push_tail:D being performed. However, since this CPU 599 * sees the new tail lpos, any descriptor states that transitioned to 600 * the reusable state must already be visible. 601 */ 602 while ((lpos - tail_lpos) - 1 < DATA_SIZE(data_ring)) { 603 /* 604 * Make all descriptors reusable that are associated with 605 * data blocks before @lpos. 606 */ 607 if (!data_make_reusable(rb, data_ring, tail_lpos, lpos, 608 &next_lpos)) { 609 /* 610 * 1. Guarantee the block ID loaded in 611 * data_make_reusable() is performed before 612 * reloading the tail lpos. The failed 613 * data_make_reusable() may be due to a newly 614 * recycled data area causing the tail lpos to 615 * have been previously pushed. This pairs with 616 * data_alloc:A. 617 * 618 * Memory barrier involvement: 619 * 620 * If data_make_reusable:A reads from data_alloc:B, 621 * then data_push_tail:C reads from 622 * data_push_tail:D. 623 * 624 * Relies on: 625 * 626 * MB from data_push_tail:D to data_alloc:B 627 * matching 628 * RMB from data_make_reusable:A to 629 * data_push_tail:C 630 * 631 * Note: data_push_tail:D and data_alloc:B can be 632 * different CPUs. However, the data_alloc:B 633 * CPU (which performs the full memory 634 * barrier) must have previously seen 635 * data_push_tail:D. 636 * 637 * 2. Guarantee the descriptor state loaded in 638 * data_make_reusable() is performed before 639 * reloading the tail lpos. The failed 640 * data_make_reusable() may be due to a newly 641 * recycled descriptor causing the tail lpos to 642 * have been previously pushed. This pairs with 643 * desc_reserve:D. 644 * 645 * Memory barrier involvement: 646 * 647 * If data_make_reusable:B reads from 648 * desc_reserve:F, then data_push_tail:C reads 649 * from data_push_tail:D. 650 * 651 * Relies on: 652 * 653 * MB from data_push_tail:D to desc_reserve:F 654 * matching 655 * RMB from data_make_reusable:B to 656 * data_push_tail:C 657 * 658 * Note: data_push_tail:D and desc_reserve:F can 659 * be different CPUs. However, the 660 * desc_reserve:F CPU (which performs the 661 * full memory barrier) must have previously 662 * seen data_push_tail:D. 663 */ 664 smp_rmb(); /* LMM(data_push_tail:B) */ 665 666 tail_lpos_new = atomic_long_read(&data_ring->tail_lpos 667 ); /* LMM(data_push_tail:C) */ 668 if (tail_lpos_new == tail_lpos) 669 return false; 670 671 /* Another CPU pushed the tail. Try again. */ 672 tail_lpos = tail_lpos_new; 673 continue; 674 } 675 676 /* 677 * Guarantee any descriptor states that have transitioned to 678 * reusable are stored before pushing the tail lpos. A full 679 * memory barrier is needed since other CPUs may have made 680 * the descriptor states reusable. This pairs with 681 * data_push_tail:A. 682 */ 683 if (atomic_long_try_cmpxchg(&data_ring->tail_lpos, &tail_lpos, 684 next_lpos)) { /* LMM(data_push_tail:D) */ 685 break; 686 } 687 } 688 689 return true; 690 } 691 692 /* 693 * Advance the desc ring tail. This function advances the tail by one 694 * descriptor, thus invalidating the oldest descriptor. Before advancing 695 * the tail, the tail descriptor is made reusable and all data blocks up to 696 * and including the descriptor's data block are invalidated (i.e. the data 697 * ring tail is pushed past the data block of the descriptor being made 698 * reusable). 699 */ 700 static bool desc_push_tail(struct printk_ringbuffer *rb, 701 unsigned long tail_id) 702 { 703 struct prb_desc_ring *desc_ring = &rb->desc_ring; 704 enum desc_state d_state; 705 struct prb_desc desc; 706 707 d_state = desc_read(desc_ring, tail_id, &desc); 708 709 switch (d_state) { 710 case desc_miss: 711 /* 712 * If the ID is exactly 1 wrap behind the expected, it is 713 * in the process of being reserved by another writer and 714 * must be considered reserved. 715 */ 716 if (DESC_ID(atomic_long_read(&desc.state_var)) == 717 DESC_ID_PREV_WRAP(desc_ring, tail_id)) { 718 return false; 719 } 720 721 /* 722 * The ID has changed. Another writer must have pushed the 723 * tail and recycled the descriptor already. Success is 724 * returned because the caller is only interested in the 725 * specified tail being pushed, which it was. 726 */ 727 return true; 728 case desc_reserved: 729 return false; 730 case desc_committed: 731 desc_make_reusable(desc_ring, tail_id); 732 break; 733 case desc_reusable: 734 break; 735 } 736 737 /* 738 * Data blocks must be invalidated before their associated 739 * descriptor can be made available for recycling. Invalidating 740 * them later is not possible because there is no way to trust 741 * data blocks once their associated descriptor is gone. 742 */ 743 744 if (!data_push_tail(rb, &rb->text_data_ring, desc.text_blk_lpos.next)) 745 return false; 746 if (!data_push_tail(rb, &rb->dict_data_ring, desc.dict_blk_lpos.next)) 747 return false; 748 749 /* 750 * Check the next descriptor after @tail_id before pushing the tail 751 * to it because the tail must always be in a committed or reusable 752 * state. The implementation of prb_first_seq() relies on this. 753 * 754 * A successful read implies that the next descriptor is less than or 755 * equal to @head_id so there is no risk of pushing the tail past the 756 * head. 757 */ 758 d_state = desc_read(desc_ring, DESC_ID(tail_id + 1), &desc); /* LMM(desc_push_tail:A) */ 759 760 if (d_state == desc_committed || d_state == desc_reusable) { 761 /* 762 * Guarantee any descriptor states that have transitioned to 763 * reusable are stored before pushing the tail ID. This allows 764 * verifying the recycled descriptor state. A full memory 765 * barrier is needed since other CPUs may have made the 766 * descriptor states reusable. This pairs with desc_reserve:D. 767 */ 768 atomic_long_cmpxchg(&desc_ring->tail_id, tail_id, 769 DESC_ID(tail_id + 1)); /* LMM(desc_push_tail:B) */ 770 } else { 771 /* 772 * Guarantee the last state load from desc_read() is before 773 * reloading @tail_id in order to see a new tail ID in the 774 * case that the descriptor has been recycled. This pairs 775 * with desc_reserve:D. 776 * 777 * Memory barrier involvement: 778 * 779 * If desc_push_tail:A reads from desc_reserve:F, then 780 * desc_push_tail:D reads from desc_push_tail:B. 781 * 782 * Relies on: 783 * 784 * MB from desc_push_tail:B to desc_reserve:F 785 * matching 786 * RMB from desc_push_tail:A to desc_push_tail:D 787 * 788 * Note: desc_push_tail:B and desc_reserve:F can be different 789 * CPUs. However, the desc_reserve:F CPU (which performs 790 * the full memory barrier) must have previously seen 791 * desc_push_tail:B. 792 */ 793 smp_rmb(); /* LMM(desc_push_tail:C) */ 794 795 /* 796 * Re-check the tail ID. The descriptor following @tail_id is 797 * not in an allowed tail state. But if the tail has since 798 * been moved by another CPU, then it does not matter. 799 */ 800 if (atomic_long_read(&desc_ring->tail_id) == tail_id) /* LMM(desc_push_tail:D) */ 801 return false; 802 } 803 804 return true; 805 } 806 807 /* Reserve a new descriptor, invalidating the oldest if necessary. */ 808 static bool desc_reserve(struct printk_ringbuffer *rb, unsigned long *id_out) 809 { 810 struct prb_desc_ring *desc_ring = &rb->desc_ring; 811 unsigned long prev_state_val; 812 unsigned long id_prev_wrap; 813 struct prb_desc *desc; 814 unsigned long head_id; 815 unsigned long id; 816 817 head_id = atomic_long_read(&desc_ring->head_id); /* LMM(desc_reserve:A) */ 818 819 do { 820 desc = to_desc(desc_ring, head_id); 821 822 id = DESC_ID(head_id + 1); 823 id_prev_wrap = DESC_ID_PREV_WRAP(desc_ring, id); 824 825 /* 826 * Guarantee the head ID is read before reading the tail ID. 827 * Since the tail ID is updated before the head ID, this 828 * guarantees that @id_prev_wrap is never ahead of the tail 829 * ID. This pairs with desc_reserve:D. 830 * 831 * Memory barrier involvement: 832 * 833 * If desc_reserve:A reads from desc_reserve:D, then 834 * desc_reserve:C reads from desc_push_tail:B. 835 * 836 * Relies on: 837 * 838 * MB from desc_push_tail:B to desc_reserve:D 839 * matching 840 * RMB from desc_reserve:A to desc_reserve:C 841 * 842 * Note: desc_push_tail:B and desc_reserve:D can be different 843 * CPUs. However, the desc_reserve:D CPU (which performs 844 * the full memory barrier) must have previously seen 845 * desc_push_tail:B. 846 */ 847 smp_rmb(); /* LMM(desc_reserve:B) */ 848 849 if (id_prev_wrap == atomic_long_read(&desc_ring->tail_id 850 )) { /* LMM(desc_reserve:C) */ 851 /* 852 * Make space for the new descriptor by 853 * advancing the tail. 854 */ 855 if (!desc_push_tail(rb, id_prev_wrap)) 856 return false; 857 } 858 859 /* 860 * 1. Guarantee the tail ID is read before validating the 861 * recycled descriptor state. A read memory barrier is 862 * sufficient for this. This pairs with desc_push_tail:B. 863 * 864 * Memory barrier involvement: 865 * 866 * If desc_reserve:C reads from desc_push_tail:B, then 867 * desc_reserve:E reads from desc_make_reusable:A. 868 * 869 * Relies on: 870 * 871 * MB from desc_make_reusable:A to desc_push_tail:B 872 * matching 873 * RMB from desc_reserve:C to desc_reserve:E 874 * 875 * Note: desc_make_reusable:A and desc_push_tail:B can be 876 * different CPUs. However, the desc_push_tail:B CPU 877 * (which performs the full memory barrier) must have 878 * previously seen desc_make_reusable:A. 879 * 880 * 2. Guarantee the tail ID is stored before storing the head 881 * ID. This pairs with desc_reserve:B. 882 * 883 * 3. Guarantee any data ring tail changes are stored before 884 * recycling the descriptor. Data ring tail changes can 885 * happen via desc_push_tail()->data_push_tail(). A full 886 * memory barrier is needed since another CPU may have 887 * pushed the data ring tails. This pairs with 888 * data_push_tail:B. 889 * 890 * 4. Guarantee a new tail ID is stored before recycling the 891 * descriptor. A full memory barrier is needed since 892 * another CPU may have pushed the tail ID. This pairs 893 * with desc_push_tail:C and this also pairs with 894 * prb_first_seq:C. 895 */ 896 } while (!atomic_long_try_cmpxchg(&desc_ring->head_id, &head_id, 897 id)); /* LMM(desc_reserve:D) */ 898 899 desc = to_desc(desc_ring, id); 900 901 /* 902 * If the descriptor has been recycled, verify the old state val. 903 * See "ABA Issues" about why this verification is performed. 904 */ 905 prev_state_val = atomic_long_read(&desc->state_var); /* LMM(desc_reserve:E) */ 906 if (prev_state_val && 907 prev_state_val != (id_prev_wrap | DESC_COMMITTED_MASK | DESC_REUSE_MASK)) { 908 WARN_ON_ONCE(1); 909 return false; 910 } 911 912 /* 913 * Assign the descriptor a new ID and set its state to reserved. 914 * See "ABA Issues" about why cmpxchg() instead of set() is used. 915 * 916 * Guarantee the new descriptor ID and state is stored before making 917 * any other changes. A write memory barrier is sufficient for this. 918 * This pairs with desc_read:D. 919 */ 920 if (!atomic_long_try_cmpxchg(&desc->state_var, &prev_state_val, 921 id | 0)) { /* LMM(desc_reserve:F) */ 922 WARN_ON_ONCE(1); 923 return false; 924 } 925 926 /* Now data in @desc can be modified: LMM(desc_reserve:G) */ 927 928 *id_out = id; 929 return true; 930 } 931 932 /* Determine the end of a data block. */ 933 static unsigned long get_next_lpos(struct prb_data_ring *data_ring, 934 unsigned long lpos, unsigned int size) 935 { 936 unsigned long begin_lpos; 937 unsigned long next_lpos; 938 939 begin_lpos = lpos; 940 next_lpos = lpos + size; 941 942 /* First check if the data block does not wrap. */ 943 if (DATA_WRAPS(data_ring, begin_lpos) == DATA_WRAPS(data_ring, next_lpos)) 944 return next_lpos; 945 946 /* Wrapping data blocks store their data at the beginning. */ 947 return (DATA_THIS_WRAP_START_LPOS(data_ring, next_lpos) + size); 948 } 949 950 /* 951 * Allocate a new data block, invalidating the oldest data block(s) 952 * if necessary. This function also associates the data block with 953 * a specified descriptor. 954 */ 955 static char *data_alloc(struct printk_ringbuffer *rb, 956 struct prb_data_ring *data_ring, unsigned int size, 957 struct prb_data_blk_lpos *blk_lpos, unsigned long id) 958 { 959 struct prb_data_block *blk; 960 unsigned long begin_lpos; 961 unsigned long next_lpos; 962 963 if (size == 0) { 964 /* Specify a data-less block. */ 965 blk_lpos->begin = INVALID_LPOS; 966 blk_lpos->next = INVALID_LPOS; 967 return NULL; 968 } 969 970 size = to_blk_size(size); 971 972 begin_lpos = atomic_long_read(&data_ring->head_lpos); 973 974 do { 975 next_lpos = get_next_lpos(data_ring, begin_lpos, size); 976 977 if (!data_push_tail(rb, data_ring, next_lpos - DATA_SIZE(data_ring))) { 978 /* Failed to allocate, specify a data-less block. */ 979 blk_lpos->begin = INVALID_LPOS; 980 blk_lpos->next = INVALID_LPOS; 981 return NULL; 982 } 983 984 /* 985 * 1. Guarantee any descriptor states that have transitioned 986 * to reusable are stored before modifying the newly 987 * allocated data area. A full memory barrier is needed 988 * since other CPUs may have made the descriptor states 989 * reusable. See data_push_tail:A about why the reusable 990 * states are visible. This pairs with desc_read:D. 991 * 992 * 2. Guarantee any updated tail lpos is stored before 993 * modifying the newly allocated data area. Another CPU may 994 * be in data_make_reusable() and is reading a block ID 995 * from this area. data_make_reusable() can handle reading 996 * a garbage block ID value, but then it must be able to 997 * load a new tail lpos. A full memory barrier is needed 998 * since other CPUs may have updated the tail lpos. This 999 * pairs with data_push_tail:B. 1000 */ 1001 } while (!atomic_long_try_cmpxchg(&data_ring->head_lpos, &begin_lpos, 1002 next_lpos)); /* LMM(data_alloc:A) */ 1003 1004 blk = to_block(data_ring, begin_lpos); 1005 blk->id = id; /* LMM(data_alloc:B) */ 1006 1007 if (DATA_WRAPS(data_ring, begin_lpos) != DATA_WRAPS(data_ring, next_lpos)) { 1008 /* Wrapping data blocks store their data at the beginning. */ 1009 blk = to_block(data_ring, 0); 1010 1011 /* 1012 * Store the ID on the wrapped block for consistency. 1013 * The printk_ringbuffer does not actually use it. 1014 */ 1015 blk->id = id; 1016 } 1017 1018 blk_lpos->begin = begin_lpos; 1019 blk_lpos->next = next_lpos; 1020 1021 return &blk->data[0]; 1022 } 1023 1024 /* Return the number of bytes used by a data block. */ 1025 static unsigned int space_used(struct prb_data_ring *data_ring, 1026 struct prb_data_blk_lpos *blk_lpos) 1027 { 1028 if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next)) { 1029 /* Data block does not wrap. */ 1030 return (DATA_INDEX(data_ring, blk_lpos->next) - 1031 DATA_INDEX(data_ring, blk_lpos->begin)); 1032 } 1033 1034 /* 1035 * For wrapping data blocks, the trailing (wasted) space is 1036 * also counted. 1037 */ 1038 return (DATA_INDEX(data_ring, blk_lpos->next) + 1039 DATA_SIZE(data_ring) - DATA_INDEX(data_ring, blk_lpos->begin)); 1040 } 1041 1042 /** 1043 * prb_reserve() - Reserve space in the ringbuffer. 1044 * 1045 * @e: The entry structure to setup. 1046 * @rb: The ringbuffer to reserve data in. 1047 * @r: The record structure to allocate buffers for. 1048 * 1049 * This is the public function available to writers to reserve data. 1050 * 1051 * The writer specifies the text and dict sizes to reserve by setting the 1052 * @text_buf_size and @dict_buf_size fields of @r, respectively. Dictionaries 1053 * are optional, so @dict_buf_size is allowed to be 0. To ensure proper 1054 * initialization of @r, prb_rec_init_wr() should be used. 1055 * 1056 * Context: Any context. Disables local interrupts on success. 1057 * Return: true if at least text data could be allocated, otherwise false. 1058 * 1059 * On success, the fields @info, @text_buf, @dict_buf of @r will be set by 1060 * this function and should be filled in by the writer before committing. Also 1061 * on success, prb_record_text_space() can be used on @e to query the actual 1062 * space used for the text data block. 1063 * 1064 * If the function fails to reserve dictionary space (but all else succeeded), 1065 * it will still report success. In that case @dict_buf is set to NULL and 1066 * @dict_buf_size is set to 0. Writers must check this before writing to 1067 * dictionary space. 1068 * 1069 * @info->text_len and @info->dict_len will already be set to @text_buf_size 1070 * and @dict_buf_size, respectively. If dictionary space reservation fails, 1071 * @info->dict_len is set to 0. 1072 */ 1073 bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb, 1074 struct printk_record *r) 1075 { 1076 struct prb_desc_ring *desc_ring = &rb->desc_ring; 1077 struct prb_desc *d; 1078 unsigned long id; 1079 1080 if (!data_check_size(&rb->text_data_ring, r->text_buf_size)) 1081 goto fail; 1082 1083 /* Records are allowed to not have dictionaries. */ 1084 if (r->dict_buf_size) { 1085 if (!data_check_size(&rb->dict_data_ring, r->dict_buf_size)) 1086 goto fail; 1087 } 1088 1089 /* 1090 * Descriptors in the reserved state act as blockers to all further 1091 * reservations once the desc_ring has fully wrapped. Disable 1092 * interrupts during the reserve/commit window in order to minimize 1093 * the likelihood of this happening. 1094 */ 1095 local_irq_save(e->irqflags); 1096 1097 if (!desc_reserve(rb, &id)) { 1098 /* Descriptor reservation failures are tracked. */ 1099 atomic_long_inc(&rb->fail); 1100 local_irq_restore(e->irqflags); 1101 goto fail; 1102 } 1103 1104 d = to_desc(desc_ring, id); 1105 1106 /* 1107 * Set the @e fields here so that prb_commit() can be used if 1108 * text data allocation fails. 1109 */ 1110 e->rb = rb; 1111 e->id = id; 1112 1113 /* 1114 * Initialize the sequence number if it has "never been set". 1115 * Otherwise just increment it by a full wrap. 1116 * 1117 * @seq is considered "never been set" if it has a value of 0, 1118 * _except_ for @descs[0], which was specially setup by the ringbuffer 1119 * initializer and therefore is always considered as set. 1120 * 1121 * See the "Bootstrap" comment block in printk_ringbuffer.h for 1122 * details about how the initializer bootstraps the descriptors. 1123 */ 1124 if (d->info.seq == 0 && DESC_INDEX(desc_ring, id) != 0) 1125 d->info.seq = DESC_INDEX(desc_ring, id); 1126 else 1127 d->info.seq += DESCS_COUNT(desc_ring); 1128 1129 r->text_buf = data_alloc(rb, &rb->text_data_ring, r->text_buf_size, 1130 &d->text_blk_lpos, id); 1131 /* If text data allocation fails, a data-less record is committed. */ 1132 if (r->text_buf_size && !r->text_buf) { 1133 d->info.text_len = 0; 1134 d->info.dict_len = 0; 1135 prb_commit(e); 1136 /* prb_commit() re-enabled interrupts. */ 1137 goto fail; 1138 } 1139 1140 r->dict_buf = data_alloc(rb, &rb->dict_data_ring, r->dict_buf_size, 1141 &d->dict_blk_lpos, id); 1142 /* 1143 * If dict data allocation fails, the caller can still commit 1144 * text. But dictionary information will not be available. 1145 */ 1146 if (r->dict_buf_size && !r->dict_buf) 1147 r->dict_buf_size = 0; 1148 1149 r->info = &d->info; 1150 1151 /* Set default values for the sizes. */ 1152 d->info.text_len = r->text_buf_size; 1153 d->info.dict_len = r->dict_buf_size; 1154 1155 /* Record full text space used by record. */ 1156 e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos); 1157 1158 return true; 1159 fail: 1160 /* Make it clear to the caller that the reserve failed. */ 1161 memset(r, 0, sizeof(*r)); 1162 return false; 1163 } 1164 1165 /** 1166 * prb_commit() - Commit (previously reserved) data to the ringbuffer. 1167 * 1168 * @e: The entry containing the reserved data information. 1169 * 1170 * This is the public function available to writers to commit data. 1171 * 1172 * Context: Any context. Enables local interrupts. 1173 */ 1174 void prb_commit(struct prb_reserved_entry *e) 1175 { 1176 struct prb_desc_ring *desc_ring = &e->rb->desc_ring; 1177 struct prb_desc *d = to_desc(desc_ring, e->id); 1178 unsigned long prev_state_val = e->id | 0; 1179 1180 /* Now the writer has finished all writing: LMM(prb_commit:A) */ 1181 1182 /* 1183 * Set the descriptor as committed. See "ABA Issues" about why 1184 * cmpxchg() instead of set() is used. 1185 * 1186 * Guarantee all record data is stored before the descriptor state 1187 * is stored as committed. A write memory barrier is sufficient for 1188 * this. This pairs with desc_read:B. 1189 */ 1190 if (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val, 1191 e->id | DESC_COMMITTED_MASK)) { /* LMM(prb_commit:B) */ 1192 WARN_ON_ONCE(1); 1193 } 1194 1195 /* Restore interrupts, the reserve/commit window is finished. */ 1196 local_irq_restore(e->irqflags); 1197 } 1198 1199 /* 1200 * Given @blk_lpos, return a pointer to the writer data from the data block 1201 * and calculate the size of the data part. A NULL pointer is returned if 1202 * @blk_lpos specifies values that could never be legal. 1203 * 1204 * This function (used by readers) performs strict validation on the lpos 1205 * values to possibly detect bugs in the writer code. A WARN_ON_ONCE() is 1206 * triggered if an internal error is detected. 1207 */ 1208 static char *get_data(struct prb_data_ring *data_ring, 1209 struct prb_data_blk_lpos *blk_lpos, 1210 unsigned int *data_size) 1211 { 1212 struct prb_data_block *db; 1213 1214 /* Data-less data block description. */ 1215 if (blk_lpos->begin == INVALID_LPOS && 1216 blk_lpos->next == INVALID_LPOS) { 1217 return NULL; 1218 } 1219 1220 /* Regular data block: @begin less than @next and in same wrap. */ 1221 if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next) && 1222 blk_lpos->begin < blk_lpos->next) { 1223 db = to_block(data_ring, blk_lpos->begin); 1224 *data_size = blk_lpos->next - blk_lpos->begin; 1225 1226 /* Wrapping data block: @begin is one wrap behind @next. */ 1227 } else if (DATA_WRAPS(data_ring, blk_lpos->begin + DATA_SIZE(data_ring)) == 1228 DATA_WRAPS(data_ring, blk_lpos->next)) { 1229 db = to_block(data_ring, 0); 1230 *data_size = DATA_INDEX(data_ring, blk_lpos->next); 1231 1232 /* Illegal block description. */ 1233 } else { 1234 WARN_ON_ONCE(1); 1235 return NULL; 1236 } 1237 1238 /* A valid data block will always be aligned to the ID size. */ 1239 if (WARN_ON_ONCE(blk_lpos->begin != ALIGN(blk_lpos->begin, sizeof(db->id))) || 1240 WARN_ON_ONCE(blk_lpos->next != ALIGN(blk_lpos->next, sizeof(db->id)))) { 1241 return NULL; 1242 } 1243 1244 /* A valid data block will always have at least an ID. */ 1245 if (WARN_ON_ONCE(*data_size < sizeof(db->id))) 1246 return NULL; 1247 1248 /* Subtract block ID space from size to reflect data size. */ 1249 *data_size -= sizeof(db->id); 1250 1251 return &db->data[0]; 1252 } 1253 1254 /* 1255 * Count the number of lines in provided text. All text has at least 1 line 1256 * (even if @text_size is 0). Each '\n' processed is counted as an additional 1257 * line. 1258 */ 1259 static unsigned int count_lines(char *text, unsigned int text_size) 1260 { 1261 unsigned int next_size = text_size; 1262 unsigned int line_count = 1; 1263 char *next = text; 1264 1265 while (next_size) { 1266 next = memchr(next, '\n', next_size); 1267 if (!next) 1268 break; 1269 line_count++; 1270 next++; 1271 next_size = text_size - (next - text); 1272 } 1273 1274 return line_count; 1275 } 1276 1277 /* 1278 * Given @blk_lpos, copy an expected @len of data into the provided buffer. 1279 * If @line_count is provided, count the number of lines in the data. 1280 * 1281 * This function (used by readers) performs strict validation on the data 1282 * size to possibly detect bugs in the writer code. A WARN_ON_ONCE() is 1283 * triggered if an internal error is detected. 1284 */ 1285 static bool copy_data(struct prb_data_ring *data_ring, 1286 struct prb_data_blk_lpos *blk_lpos, u16 len, char *buf, 1287 unsigned int buf_size, unsigned int *line_count) 1288 { 1289 unsigned int data_size; 1290 char *data; 1291 1292 /* Caller might not want any data. */ 1293 if ((!buf || !buf_size) && !line_count) 1294 return true; 1295 1296 data = get_data(data_ring, blk_lpos, &data_size); 1297 if (!data) 1298 return false; 1299 1300 /* 1301 * Actual cannot be less than expected. It can be more than expected 1302 * because of the trailing alignment padding. 1303 */ 1304 if (WARN_ON_ONCE(data_size < (unsigned int)len)) { 1305 pr_warn_once("wrong data size (%u, expecting %hu) for data: %.*s\n", 1306 data_size, len, data_size, data); 1307 return false; 1308 } 1309 1310 /* Caller interested in the line count? */ 1311 if (line_count) 1312 *line_count = count_lines(data, data_size); 1313 1314 /* Caller interested in the data content? */ 1315 if (!buf || !buf_size) 1316 return true; 1317 1318 data_size = min_t(u16, buf_size, len); 1319 1320 if (!WARN_ON_ONCE(!data_size)) 1321 memcpy(&buf[0], data, data_size); /* LMM(copy_data:A) */ 1322 return true; 1323 } 1324 1325 /* 1326 * This is an extended version of desc_read(). It gets a copy of a specified 1327 * descriptor. However, it also verifies that the record is committed and has 1328 * the sequence number @seq. On success, 0 is returned. 1329 * 1330 * Error return values: 1331 * -EINVAL: A committed record with sequence number @seq does not exist. 1332 * -ENOENT: A committed record with sequence number @seq exists, but its data 1333 * is not available. This is a valid record, so readers should 1334 * continue with the next record. 1335 */ 1336 static int desc_read_committed_seq(struct prb_desc_ring *desc_ring, 1337 unsigned long id, u64 seq, 1338 struct prb_desc *desc_out) 1339 { 1340 struct prb_data_blk_lpos *blk_lpos = &desc_out->text_blk_lpos; 1341 enum desc_state d_state; 1342 1343 d_state = desc_read(desc_ring, id, desc_out); 1344 1345 /* 1346 * An unexpected @id (desc_miss) or @seq mismatch means the record 1347 * does not exist. A descriptor in the reserved state means the 1348 * record does not yet exist for the reader. 1349 */ 1350 if (d_state == desc_miss || 1351 d_state == desc_reserved || 1352 desc_out->info.seq != seq) { 1353 return -EINVAL; 1354 } 1355 1356 /* 1357 * A descriptor in the reusable state may no longer have its data 1358 * available; report it as a data-less record. Or the record may 1359 * actually be a data-less record. 1360 */ 1361 if (d_state == desc_reusable || 1362 (blk_lpos->begin == INVALID_LPOS && blk_lpos->next == INVALID_LPOS)) { 1363 return -ENOENT; 1364 } 1365 1366 return 0; 1367 } 1368 1369 /* 1370 * Copy the ringbuffer data from the record with @seq to the provided 1371 * @r buffer. On success, 0 is returned. 1372 * 1373 * See desc_read_committed_seq() for error return values. 1374 */ 1375 static int prb_read(struct printk_ringbuffer *rb, u64 seq, 1376 struct printk_record *r, unsigned int *line_count) 1377 { 1378 struct prb_desc_ring *desc_ring = &rb->desc_ring; 1379 struct prb_desc *rdesc = to_desc(desc_ring, seq); 1380 atomic_long_t *state_var = &rdesc->state_var; 1381 struct prb_desc desc; 1382 unsigned long id; 1383 int err; 1384 1385 /* Extract the ID, used to specify the descriptor to read. */ 1386 id = DESC_ID(atomic_long_read(state_var)); 1387 1388 /* Get a local copy of the correct descriptor (if available). */ 1389 err = desc_read_committed_seq(desc_ring, id, seq, &desc); 1390 1391 /* 1392 * If @r is NULL, the caller is only interested in the availability 1393 * of the record. 1394 */ 1395 if (err || !r) 1396 return err; 1397 1398 /* If requested, copy meta data. */ 1399 if (r->info) 1400 memcpy(r->info, &desc.info, sizeof(*(r->info))); 1401 1402 /* Copy text data. If it fails, this is a data-less record. */ 1403 if (!copy_data(&rb->text_data_ring, &desc.text_blk_lpos, desc.info.text_len, 1404 r->text_buf, r->text_buf_size, line_count)) { 1405 return -ENOENT; 1406 } 1407 1408 /* 1409 * Copy dict data. Although this should not fail, dict data is not 1410 * important. So if it fails, modify the copied meta data to report 1411 * that there is no dict data, thus silently dropping the dict data. 1412 */ 1413 if (!copy_data(&rb->dict_data_ring, &desc.dict_blk_lpos, desc.info.dict_len, 1414 r->dict_buf, r->dict_buf_size, NULL)) { 1415 if (r->info) 1416 r->info->dict_len = 0; 1417 } 1418 1419 /* Ensure the record is still committed and has the same @seq. */ 1420 return desc_read_committed_seq(desc_ring, id, seq, &desc); 1421 } 1422 1423 /* Get the sequence number of the tail descriptor. */ 1424 static u64 prb_first_seq(struct printk_ringbuffer *rb) 1425 { 1426 struct prb_desc_ring *desc_ring = &rb->desc_ring; 1427 enum desc_state d_state; 1428 struct prb_desc desc; 1429 unsigned long id; 1430 1431 for (;;) { 1432 id = atomic_long_read(&rb->desc_ring.tail_id); /* LMM(prb_first_seq:A) */ 1433 1434 d_state = desc_read(desc_ring, id, &desc); /* LMM(prb_first_seq:B) */ 1435 1436 /* 1437 * This loop will not be infinite because the tail is 1438 * _always_ in the committed or reusable state. 1439 */ 1440 if (d_state == desc_committed || d_state == desc_reusable) 1441 break; 1442 1443 /* 1444 * Guarantee the last state load from desc_read() is before 1445 * reloading @tail_id in order to see a new tail in the case 1446 * that the descriptor has been recycled. This pairs with 1447 * desc_reserve:D. 1448 * 1449 * Memory barrier involvement: 1450 * 1451 * If prb_first_seq:B reads from desc_reserve:F, then 1452 * prb_first_seq:A reads from desc_push_tail:B. 1453 * 1454 * Relies on: 1455 * 1456 * MB from desc_push_tail:B to desc_reserve:F 1457 * matching 1458 * RMB prb_first_seq:B to prb_first_seq:A 1459 */ 1460 smp_rmb(); /* LMM(prb_first_seq:C) */ 1461 } 1462 1463 return desc.info.seq; 1464 } 1465 1466 /* 1467 * Non-blocking read of a record. Updates @seq to the last committed record 1468 * (which may have no data). 1469 * 1470 * See the description of prb_read_valid() and prb_read_valid_info() 1471 * for details. 1472 */ 1473 static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq, 1474 struct printk_record *r, unsigned int *line_count) 1475 { 1476 u64 tail_seq; 1477 int err; 1478 1479 while ((err = prb_read(rb, *seq, r, line_count))) { 1480 tail_seq = prb_first_seq(rb); 1481 1482 if (*seq < tail_seq) { 1483 /* 1484 * Behind the tail. Catch up and try again. This 1485 * can happen for -ENOENT and -EINVAL cases. 1486 */ 1487 *seq = tail_seq; 1488 1489 } else if (err == -ENOENT) { 1490 /* Record exists, but no data available. Skip. */ 1491 (*seq)++; 1492 1493 } else { 1494 /* Non-existent/non-committed record. Must stop. */ 1495 return false; 1496 } 1497 } 1498 1499 return true; 1500 } 1501 1502 /** 1503 * prb_read_valid() - Non-blocking read of a requested record or (if gone) 1504 * the next available record. 1505 * 1506 * @rb: The ringbuffer to read from. 1507 * @seq: The sequence number of the record to read. 1508 * @r: A record data buffer to store the read record to. 1509 * 1510 * This is the public function available to readers to read a record. 1511 * 1512 * The reader provides the @info, @text_buf, @dict_buf buffers of @r to be 1513 * filled in. Any of the buffer pointers can be set to NULL if the reader 1514 * is not interested in that data. To ensure proper initialization of @r, 1515 * prb_rec_init_rd() should be used. 1516 * 1517 * Context: Any context. 1518 * Return: true if a record was read, otherwise false. 1519 * 1520 * On success, the reader must check r->info.seq to see which record was 1521 * actually read. This allows the reader to detect dropped records. 1522 * 1523 * Failure means @seq refers to a not yet written record. 1524 */ 1525 bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq, 1526 struct printk_record *r) 1527 { 1528 return _prb_read_valid(rb, &seq, r, NULL); 1529 } 1530 1531 /** 1532 * prb_read_valid_info() - Non-blocking read of meta data for a requested 1533 * record or (if gone) the next available record. 1534 * 1535 * @rb: The ringbuffer to read from. 1536 * @seq: The sequence number of the record to read. 1537 * @info: A buffer to store the read record meta data to. 1538 * @line_count: A buffer to store the number of lines in the record text. 1539 * 1540 * This is the public function available to readers to read only the 1541 * meta data of a record. 1542 * 1543 * The reader provides the @info, @line_count buffers to be filled in. 1544 * Either of the buffer pointers can be set to NULL if the reader is not 1545 * interested in that data. 1546 * 1547 * Context: Any context. 1548 * Return: true if a record's meta data was read, otherwise false. 1549 * 1550 * On success, the reader must check info->seq to see which record meta data 1551 * was actually read. This allows the reader to detect dropped records. 1552 * 1553 * Failure means @seq refers to a not yet written record. 1554 */ 1555 bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq, 1556 struct printk_info *info, unsigned int *line_count) 1557 { 1558 struct printk_record r; 1559 1560 prb_rec_init_rd(&r, info, NULL, 0, NULL, 0); 1561 1562 return _prb_read_valid(rb, &seq, &r, line_count); 1563 } 1564 1565 /** 1566 * prb_first_valid_seq() - Get the sequence number of the oldest available 1567 * record. 1568 * 1569 * @rb: The ringbuffer to get the sequence number from. 1570 * 1571 * This is the public function available to readers to see what the 1572 * first/oldest valid sequence number is. 1573 * 1574 * This provides readers a starting point to begin iterating the ringbuffer. 1575 * 1576 * Context: Any context. 1577 * Return: The sequence number of the first/oldest record or, if the 1578 * ringbuffer is empty, 0 is returned. 1579 */ 1580 u64 prb_first_valid_seq(struct printk_ringbuffer *rb) 1581 { 1582 u64 seq = 0; 1583 1584 if (!_prb_read_valid(rb, &seq, NULL, NULL)) 1585 return 0; 1586 1587 return seq; 1588 } 1589 1590 /** 1591 * prb_next_seq() - Get the sequence number after the last available record. 1592 * 1593 * @rb: The ringbuffer to get the sequence number from. 1594 * 1595 * This is the public function available to readers to see what the next 1596 * newest sequence number available to readers will be. 1597 * 1598 * This provides readers a sequence number to jump to if all currently 1599 * available records should be skipped. 1600 * 1601 * Context: Any context. 1602 * Return: The sequence number of the next newest (not yet available) record 1603 * for readers. 1604 */ 1605 u64 prb_next_seq(struct printk_ringbuffer *rb) 1606 { 1607 u64 seq = 0; 1608 1609 /* Search forward from the oldest descriptor. */ 1610 while (_prb_read_valid(rb, &seq, NULL, NULL)) 1611 seq++; 1612 1613 return seq; 1614 } 1615 1616 /** 1617 * prb_init() - Initialize a ringbuffer to use provided external buffers. 1618 * 1619 * @rb: The ringbuffer to initialize. 1620 * @text_buf: The data buffer for text data. 1621 * @textbits: The size of @text_buf as a power-of-2 value. 1622 * @dict_buf: The data buffer for dictionary data. 1623 * @dictbits: The size of @dict_buf as a power-of-2 value. 1624 * @descs: The descriptor buffer for ringbuffer records. 1625 * @descbits: The count of @descs items as a power-of-2 value. 1626 * 1627 * This is the public function available to writers to setup a ringbuffer 1628 * during runtime using provided buffers. 1629 * 1630 * This must match the initialization of DEFINE_PRINTKRB(). 1631 * 1632 * Context: Any context. 1633 */ 1634 void prb_init(struct printk_ringbuffer *rb, 1635 char *text_buf, unsigned int textbits, 1636 char *dict_buf, unsigned int dictbits, 1637 struct prb_desc *descs, unsigned int descbits) 1638 { 1639 memset(descs, 0, _DESCS_COUNT(descbits) * sizeof(descs[0])); 1640 1641 rb->desc_ring.count_bits = descbits; 1642 rb->desc_ring.descs = descs; 1643 atomic_long_set(&rb->desc_ring.head_id, DESC0_ID(descbits)); 1644 atomic_long_set(&rb->desc_ring.tail_id, DESC0_ID(descbits)); 1645 1646 rb->text_data_ring.size_bits = textbits; 1647 rb->text_data_ring.data = text_buf; 1648 atomic_long_set(&rb->text_data_ring.head_lpos, BLK0_LPOS(textbits)); 1649 atomic_long_set(&rb->text_data_ring.tail_lpos, BLK0_LPOS(textbits)); 1650 1651 rb->dict_data_ring.size_bits = dictbits; 1652 rb->dict_data_ring.data = dict_buf; 1653 atomic_long_set(&rb->dict_data_ring.head_lpos, BLK0_LPOS(dictbits)); 1654 atomic_long_set(&rb->dict_data_ring.tail_lpos, BLK0_LPOS(dictbits)); 1655 1656 atomic_long_set(&rb->fail, 0); 1657 1658 descs[0].info.seq = -(u64)_DESCS_COUNT(descbits); 1659 1660 descs[_DESCS_COUNT(descbits) - 1].info.seq = 0; 1661 atomic_long_set(&(descs[_DESCS_COUNT(descbits) - 1].state_var), DESC0_SV(descbits)); 1662 descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.begin = INVALID_LPOS; 1663 descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.next = INVALID_LPOS; 1664 descs[_DESCS_COUNT(descbits) - 1].dict_blk_lpos.begin = INVALID_LPOS; 1665 descs[_DESCS_COUNT(descbits) - 1].dict_blk_lpos.next = INVALID_LPOS; 1666 } 1667 1668 /** 1669 * prb_record_text_space() - Query the full actual used ringbuffer space for 1670 * the text data of a reserved entry. 1671 * 1672 * @e: The successfully reserved entry to query. 1673 * 1674 * This is the public function available to writers to see how much actual 1675 * space is used in the ringbuffer to store the text data of the specified 1676 * entry. 1677 * 1678 * This function is only valid if @e has been successfully reserved using 1679 * prb_reserve(). 1680 * 1681 * Context: Any context. 1682 * Return: The size in bytes used by the text data of the associated record. 1683 */ 1684 unsigned int prb_record_text_space(struct prb_reserved_entry *e) 1685 { 1686 return e->text_space; 1687 } 1688