1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/kernel.h> 4 #include <linux/irqflags.h> 5 #include <linux/string.h> 6 #include <linux/errno.h> 7 #include <linux/bug.h> 8 #include "printk_ringbuffer.h" 9 10 /** 11 * DOC: printk_ringbuffer overview 12 * 13 * Data Structure 14 * -------------- 15 * The printk_ringbuffer is made up of 3 internal ringbuffers: 16 * 17 * desc_ring 18 * A ring of descriptors. A descriptor contains all record meta data 19 * (sequence number, timestamp, loglevel, etc.) as well as internal state 20 * information about the record and logical positions specifying where in 21 * the other ringbuffers the text and dictionary strings are located. 22 * 23 * text_data_ring 24 * A ring of data blocks. A data block consists of an unsigned long 25 * integer (ID) that maps to a desc_ring index followed by the text 26 * string of the record. 27 * 28 * dict_data_ring 29 * A ring of data blocks. A data block consists of an unsigned long 30 * integer (ID) that maps to a desc_ring index followed by the dictionary 31 * string of the record. 32 * 33 * The internal state information of a descriptor is the key element to allow 34 * readers and writers to locklessly synchronize access to the data. 35 * 36 * Implementation 37 * -------------- 38 * 39 * Descriptor Ring 40 * ~~~~~~~~~~~~~~~ 41 * The descriptor ring is an array of descriptors. A descriptor contains all 42 * the meta data of a printk record as well as blk_lpos structs pointing to 43 * associated text and dictionary data blocks (see "Data Rings" below). Each 44 * descriptor is assigned an ID that maps directly to index values of the 45 * descriptor array and has a state. The ID and the state are bitwise combined 46 * into a single descriptor field named @state_var, allowing ID and state to 47 * be synchronously and atomically updated. 48 * 49 * Descriptors have three states: 50 * 51 * reserved 52 * A writer is modifying the record. 53 * 54 * committed 55 * The record and all its data are complete and available for reading. 56 * 57 * reusable 58 * The record exists, but its text and/or dictionary data may no longer 59 * be available. 60 * 61 * Querying the @state_var of a record requires providing the ID of the 62 * descriptor to query. This can yield a possible fourth (pseudo) state: 63 * 64 * miss 65 * The descriptor being queried has an unexpected ID. 66 * 67 * The descriptor ring has a @tail_id that contains the ID of the oldest 68 * descriptor and @head_id that contains the ID of the newest descriptor. 69 * 70 * When a new descriptor should be created (and the ring is full), the tail 71 * descriptor is invalidated by first transitioning to the reusable state and 72 * then invalidating all tail data blocks up to and including the data blocks 73 * associated with the tail descriptor (for text and dictionary rings). Then 74 * @tail_id is advanced, followed by advancing @head_id. And finally the 75 * @state_var of the new descriptor is initialized to the new ID and reserved 76 * state. 77 * 78 * The @tail_id can only be advanced if the new @tail_id would be in the 79 * committed or reusable queried state. This makes it possible that a valid 80 * sequence number of the tail is always available. 81 * 82 * Data Rings 83 * ~~~~~~~~~~ 84 * The two data rings (text and dictionary) function identically. They exist 85 * separately so that their buffer sizes can be individually set and they do 86 * not affect one another. 87 * 88 * Data rings are byte arrays composed of data blocks. Data blocks are 89 * referenced by blk_lpos structs that point to the logical position of the 90 * beginning of a data block and the beginning of the next adjacent data 91 * block. Logical positions are mapped directly to index values of the byte 92 * array ringbuffer. 93 * 94 * Each data block consists of an ID followed by the writer data. The ID is 95 * the identifier of a descriptor that is associated with the data block. A 96 * given data block is considered valid if all of the following conditions 97 * are met: 98 * 99 * 1) The descriptor associated with the data block is in the committed 100 * queried state. 101 * 102 * 2) The blk_lpos struct within the descriptor associated with the data 103 * block references back to the same data block. 104 * 105 * 3) The data block is within the head/tail logical position range. 106 * 107 * If the writer data of a data block would extend beyond the end of the 108 * byte array, only the ID of the data block is stored at the logical 109 * position and the full data block (ID and writer data) is stored at the 110 * beginning of the byte array. The referencing blk_lpos will point to the 111 * ID before the wrap and the next data block will be at the logical 112 * position adjacent the full data block after the wrap. 113 * 114 * Data rings have a @tail_lpos that points to the beginning of the oldest 115 * data block and a @head_lpos that points to the logical position of the 116 * next (not yet existing) data block. 117 * 118 * When a new data block should be created (and the ring is full), tail data 119 * blocks will first be invalidated by putting their associated descriptors 120 * into the reusable state and then pushing the @tail_lpos forward beyond 121 * them. Then the @head_lpos is pushed forward and is associated with a new 122 * descriptor. If a data block is not valid, the @tail_lpos cannot be 123 * advanced beyond it. 124 * 125 * Usage 126 * ----- 127 * Here are some simple examples demonstrating writers and readers. For the 128 * examples a global ringbuffer (test_rb) is available (which is not the 129 * actual ringbuffer used by printk):: 130 * 131 * DEFINE_PRINTKRB(test_rb, 15, 5, 3); 132 * 133 * This ringbuffer allows up to 32768 records (2 ^ 15) and has a size of 134 * 1 MiB (2 ^ (15 + 5)) for text data and 256 KiB (2 ^ (15 + 3)) for 135 * dictionary data. 136 * 137 * Sample writer code:: 138 * 139 * const char *dictstr = "dictionary text"; 140 * const char *textstr = "message text"; 141 * struct prb_reserved_entry e; 142 * struct printk_record r; 143 * 144 * // specify how much to allocate 145 * prb_rec_init_wr(&r, strlen(textstr) + 1, strlen(dictstr) + 1); 146 * 147 * if (prb_reserve(&e, &test_rb, &r)) { 148 * snprintf(r.text_buf, r.text_buf_size, "%s", textstr); 149 * 150 * // dictionary allocation may have failed 151 * if (r.dict_buf) 152 * snprintf(r.dict_buf, r.dict_buf_size, "%s", dictstr); 153 * 154 * r.info->ts_nsec = local_clock(); 155 * 156 * prb_commit(&e); 157 * } 158 * 159 * Sample reader code:: 160 * 161 * struct printk_info info; 162 * struct printk_record r; 163 * char text_buf[32]; 164 * char dict_buf[32]; 165 * u64 seq; 166 * 167 * prb_rec_init_rd(&r, &info, &text_buf[0], sizeof(text_buf), 168 * &dict_buf[0], sizeof(dict_buf)); 169 * 170 * prb_for_each_record(0, &test_rb, &seq, &r) { 171 * if (info.seq != seq) 172 * pr_warn("lost %llu records\n", info.seq - seq); 173 * 174 * if (info.text_len > r.text_buf_size) { 175 * pr_warn("record %llu text truncated\n", info.seq); 176 * text_buf[r.text_buf_size - 1] = 0; 177 * } 178 * 179 * if (info.dict_len > r.dict_buf_size) { 180 * pr_warn("record %llu dict truncated\n", info.seq); 181 * dict_buf[r.dict_buf_size - 1] = 0; 182 * } 183 * 184 * pr_info("%llu: %llu: %s;%s\n", info.seq, info.ts_nsec, 185 * &text_buf[0], info.dict_len ? &dict_buf[0] : ""); 186 * } 187 * 188 * Note that additional less convenient reader functions are available to 189 * allow complex record access. 190 * 191 * ABA Issues 192 * ~~~~~~~~~~ 193 * To help avoid ABA issues, descriptors are referenced by IDs (array index 194 * values combined with tagged bits counting array wraps) and data blocks are 195 * referenced by logical positions (array index values combined with tagged 196 * bits counting array wraps). However, on 32-bit systems the number of 197 * tagged bits is relatively small such that an ABA incident is (at least 198 * theoretically) possible. For example, if 4 million maximally sized (1KiB) 199 * printk messages were to occur in NMI context on a 32-bit system, the 200 * interrupted context would not be able to recognize that the 32-bit integer 201 * completely wrapped and thus represents a different data block than the one 202 * the interrupted context expects. 203 * 204 * To help combat this possibility, additional state checking is performed 205 * (such as using cmpxchg() even though set() would suffice). These extra 206 * checks are commented as such and will hopefully catch any ABA issue that 207 * a 32-bit system might experience. 208 * 209 * Memory Barriers 210 * ~~~~~~~~~~~~~~~ 211 * Multiple memory barriers are used. To simplify proving correctness and 212 * generating litmus tests, lines of code related to memory barriers 213 * (loads, stores, and the associated memory barriers) are labeled:: 214 * 215 * LMM(function:letter) 216 * 217 * Comments reference the labels using only the "function:letter" part. 218 * 219 * The memory barrier pairs and their ordering are: 220 * 221 * desc_reserve:D / desc_reserve:B 222 * push descriptor tail (id), then push descriptor head (id) 223 * 224 * desc_reserve:D / data_push_tail:B 225 * push data tail (lpos), then set new descriptor reserved (state) 226 * 227 * desc_reserve:D / desc_push_tail:C 228 * push descriptor tail (id), then set new descriptor reserved (state) 229 * 230 * desc_reserve:D / prb_first_seq:C 231 * push descriptor tail (id), then set new descriptor reserved (state) 232 * 233 * desc_reserve:F / desc_read:D 234 * set new descriptor id and reserved (state), then allow writer changes 235 * 236 * data_alloc:A / desc_read:D 237 * set old descriptor reusable (state), then modify new data block area 238 * 239 * data_alloc:A / data_push_tail:B 240 * push data tail (lpos), then modify new data block area 241 * 242 * prb_commit:B / desc_read:B 243 * store writer changes, then set new descriptor committed (state) 244 * 245 * data_push_tail:D / data_push_tail:A 246 * set descriptor reusable (state), then push data tail (lpos) 247 * 248 * desc_push_tail:B / desc_reserve:D 249 * set descriptor reusable (state), then push descriptor tail (id) 250 */ 251 252 #define DATA_SIZE(data_ring) _DATA_SIZE((data_ring)->size_bits) 253 #define DATA_SIZE_MASK(data_ring) (DATA_SIZE(data_ring) - 1) 254 255 #define DESCS_COUNT(desc_ring) _DESCS_COUNT((desc_ring)->count_bits) 256 #define DESCS_COUNT_MASK(desc_ring) (DESCS_COUNT(desc_ring) - 1) 257 258 /* Determine the data array index from a logical position. */ 259 #define DATA_INDEX(data_ring, lpos) ((lpos) & DATA_SIZE_MASK(data_ring)) 260 261 /* Determine the desc array index from an ID or sequence number. */ 262 #define DESC_INDEX(desc_ring, n) ((n) & DESCS_COUNT_MASK(desc_ring)) 263 264 /* Determine how many times the data array has wrapped. */ 265 #define DATA_WRAPS(data_ring, lpos) ((lpos) >> (data_ring)->size_bits) 266 267 /* Determine if a logical position refers to a data-less block. */ 268 #define LPOS_DATALESS(lpos) ((lpos) & 1UL) 269 270 /* Get the logical position at index 0 of the current wrap. */ 271 #define DATA_THIS_WRAP_START_LPOS(data_ring, lpos) \ 272 ((lpos) & ~DATA_SIZE_MASK(data_ring)) 273 274 /* Get the ID for the same index of the previous wrap as the given ID. */ 275 #define DESC_ID_PREV_WRAP(desc_ring, id) \ 276 DESC_ID((id) - DESCS_COUNT(desc_ring)) 277 278 /* 279 * A data block: mapped directly to the beginning of the data block area 280 * specified as a logical position within the data ring. 281 * 282 * @id: the ID of the associated descriptor 283 * @data: the writer data 284 * 285 * Note that the size of a data block is only known by its associated 286 * descriptor. 287 */ 288 struct prb_data_block { 289 unsigned long id; 290 char data[0]; 291 }; 292 293 /* 294 * Return the descriptor associated with @n. @n can be either a 295 * descriptor ID or a sequence number. 296 */ 297 static struct prb_desc *to_desc(struct prb_desc_ring *desc_ring, u64 n) 298 { 299 return &desc_ring->descs[DESC_INDEX(desc_ring, n)]; 300 } 301 302 static struct prb_data_block *to_block(struct prb_data_ring *data_ring, 303 unsigned long begin_lpos) 304 { 305 return (void *)&data_ring->data[DATA_INDEX(data_ring, begin_lpos)]; 306 } 307 308 /* 309 * Increase the data size to account for data block meta data plus any 310 * padding so that the adjacent data block is aligned on the ID size. 311 */ 312 static unsigned int to_blk_size(unsigned int size) 313 { 314 struct prb_data_block *db = NULL; 315 316 size += sizeof(*db); 317 size = ALIGN(size, sizeof(db->id)); 318 return size; 319 } 320 321 /* 322 * Sanity checker for reserve size. The ringbuffer code assumes that a data 323 * block does not exceed the maximum possible size that could fit within the 324 * ringbuffer. This function provides that basic size check so that the 325 * assumption is safe. 326 */ 327 static bool data_check_size(struct prb_data_ring *data_ring, unsigned int size) 328 { 329 struct prb_data_block *db = NULL; 330 331 if (size == 0) 332 return true; 333 334 /* 335 * Ensure the alignment padded size could possibly fit in the data 336 * array. The largest possible data block must still leave room for 337 * at least the ID of the next block. 338 */ 339 size = to_blk_size(size); 340 if (size > DATA_SIZE(data_ring) - sizeof(db->id)) 341 return false; 342 343 return true; 344 } 345 346 /* The possible responses of a descriptor state-query. */ 347 enum desc_state { 348 desc_miss, /* ID mismatch */ 349 desc_reserved, /* reserved, in use by writer */ 350 desc_committed, /* committed, writer is done */ 351 desc_reusable, /* free, not yet used by any writer */ 352 }; 353 354 /* Query the state of a descriptor. */ 355 static enum desc_state get_desc_state(unsigned long id, 356 unsigned long state_val) 357 { 358 if (id != DESC_ID(state_val)) 359 return desc_miss; 360 361 if (state_val & DESC_REUSE_MASK) 362 return desc_reusable; 363 364 if (state_val & DESC_COMMITTED_MASK) 365 return desc_committed; 366 367 return desc_reserved; 368 } 369 370 /* 371 * Get a copy of a specified descriptor and return its queried state. If the 372 * descriptor is in an inconsistent state (miss or reserved), the caller can 373 * only expect the descriptor's @state_var field to be valid. 374 */ 375 static enum desc_state desc_read(struct prb_desc_ring *desc_ring, 376 unsigned long id, struct prb_desc *desc_out) 377 { 378 struct prb_desc *desc = to_desc(desc_ring, id); 379 atomic_long_t *state_var = &desc->state_var; 380 enum desc_state d_state; 381 unsigned long state_val; 382 383 /* Check the descriptor state. */ 384 state_val = atomic_long_read(state_var); /* LMM(desc_read:A) */ 385 d_state = get_desc_state(id, state_val); 386 if (d_state == desc_miss || d_state == desc_reserved) { 387 /* 388 * The descriptor is in an inconsistent state. Set at least 389 * @state_var so that the caller can see the details of 390 * the inconsistent state. 391 */ 392 goto out; 393 } 394 395 /* 396 * Guarantee the state is loaded before copying the descriptor 397 * content. This avoids copying obsolete descriptor content that might 398 * not apply to the descriptor state. This pairs with prb_commit:B. 399 * 400 * Memory barrier involvement: 401 * 402 * If desc_read:A reads from prb_commit:B, then desc_read:C reads 403 * from prb_commit:A. 404 * 405 * Relies on: 406 * 407 * WMB from prb_commit:A to prb_commit:B 408 * matching 409 * RMB from desc_read:A to desc_read:C 410 */ 411 smp_rmb(); /* LMM(desc_read:B) */ 412 413 /* 414 * Copy the descriptor data. The data is not valid until the 415 * state has been re-checked. A memcpy() for all of @desc 416 * cannot be used because of the atomic_t @state_var field. 417 */ 418 memcpy(&desc_out->info, &desc->info, sizeof(desc_out->info)); /* LMM(desc_read:C) */ 419 memcpy(&desc_out->text_blk_lpos, &desc->text_blk_lpos, 420 sizeof(desc_out->text_blk_lpos)); /* also part of desc_read:C */ 421 memcpy(&desc_out->dict_blk_lpos, &desc->dict_blk_lpos, 422 sizeof(desc_out->dict_blk_lpos)); /* also part of desc_read:C */ 423 424 /* 425 * 1. Guarantee the descriptor content is loaded before re-checking 426 * the state. This avoids reading an obsolete descriptor state 427 * that may not apply to the copied content. This pairs with 428 * desc_reserve:F. 429 * 430 * Memory barrier involvement: 431 * 432 * If desc_read:C reads from desc_reserve:G, then desc_read:E 433 * reads from desc_reserve:F. 434 * 435 * Relies on: 436 * 437 * WMB from desc_reserve:F to desc_reserve:G 438 * matching 439 * RMB from desc_read:C to desc_read:E 440 * 441 * 2. Guarantee the record data is loaded before re-checking the 442 * state. This avoids reading an obsolete descriptor state that may 443 * not apply to the copied data. This pairs with data_alloc:A. 444 * 445 * Memory barrier involvement: 446 * 447 * If copy_data:A reads from data_alloc:B, then desc_read:E 448 * reads from desc_make_reusable:A. 449 * 450 * Relies on: 451 * 452 * MB from desc_make_reusable:A to data_alloc:B 453 * matching 454 * RMB from desc_read:C to desc_read:E 455 * 456 * Note: desc_make_reusable:A and data_alloc:B can be different 457 * CPUs. However, the data_alloc:B CPU (which performs the 458 * full memory barrier) must have previously seen 459 * desc_make_reusable:A. 460 */ 461 smp_rmb(); /* LMM(desc_read:D) */ 462 463 /* 464 * The data has been copied. Return the current descriptor state, 465 * which may have changed since the load above. 466 */ 467 state_val = atomic_long_read(state_var); /* LMM(desc_read:E) */ 468 d_state = get_desc_state(id, state_val); 469 out: 470 atomic_long_set(&desc_out->state_var, state_val); 471 return d_state; 472 } 473 474 /* 475 * Take a specified descriptor out of the committed state by attempting 476 * the transition from committed to reusable. Either this context or some 477 * other context will have been successful. 478 */ 479 static void desc_make_reusable(struct prb_desc_ring *desc_ring, 480 unsigned long id) 481 { 482 unsigned long val_committed = id | DESC_COMMITTED_MASK; 483 unsigned long val_reusable = val_committed | DESC_REUSE_MASK; 484 struct prb_desc *desc = to_desc(desc_ring, id); 485 atomic_long_t *state_var = &desc->state_var; 486 487 atomic_long_cmpxchg_relaxed(state_var, val_committed, 488 val_reusable); /* LMM(desc_make_reusable:A) */ 489 } 490 491 /* 492 * Given a data ring (text or dict), put the associated descriptor of each 493 * data block from @lpos_begin until @lpos_end into the reusable state. 494 * 495 * If there is any problem making the associated descriptor reusable, either 496 * the descriptor has not yet been committed or another writer context has 497 * already pushed the tail lpos past the problematic data block. Regardless, 498 * on error the caller can re-load the tail lpos to determine the situation. 499 */ 500 static bool data_make_reusable(struct printk_ringbuffer *rb, 501 struct prb_data_ring *data_ring, 502 unsigned long lpos_begin, 503 unsigned long lpos_end, 504 unsigned long *lpos_out) 505 { 506 struct prb_desc_ring *desc_ring = &rb->desc_ring; 507 struct prb_data_blk_lpos *blk_lpos; 508 struct prb_data_block *blk; 509 enum desc_state d_state; 510 struct prb_desc desc; 511 unsigned long id; 512 513 /* 514 * Using the provided @data_ring, point @blk_lpos to the correct 515 * blk_lpos within the local copy of the descriptor. 516 */ 517 if (data_ring == &rb->text_data_ring) 518 blk_lpos = &desc.text_blk_lpos; 519 else 520 blk_lpos = &desc.dict_blk_lpos; 521 522 /* Loop until @lpos_begin has advanced to or beyond @lpos_end. */ 523 while ((lpos_end - lpos_begin) - 1 < DATA_SIZE(data_ring)) { 524 blk = to_block(data_ring, lpos_begin); 525 526 /* 527 * Load the block ID from the data block. This is a data race 528 * against a writer that may have newly reserved this data 529 * area. If the loaded value matches a valid descriptor ID, 530 * the blk_lpos of that descriptor will be checked to make 531 * sure it points back to this data block. If the check fails, 532 * the data area has been recycled by another writer. 533 */ 534 id = blk->id; /* LMM(data_make_reusable:A) */ 535 536 d_state = desc_read(desc_ring, id, &desc); /* LMM(data_make_reusable:B) */ 537 538 switch (d_state) { 539 case desc_miss: 540 return false; 541 case desc_reserved: 542 return false; 543 case desc_committed: 544 /* 545 * This data block is invalid if the descriptor 546 * does not point back to it. 547 */ 548 if (blk_lpos->begin != lpos_begin) 549 return false; 550 desc_make_reusable(desc_ring, id); 551 break; 552 case desc_reusable: 553 /* 554 * This data block is invalid if the descriptor 555 * does not point back to it. 556 */ 557 if (blk_lpos->begin != lpos_begin) 558 return false; 559 break; 560 } 561 562 /* Advance @lpos_begin to the next data block. */ 563 lpos_begin = blk_lpos->next; 564 } 565 566 *lpos_out = lpos_begin; 567 return true; 568 } 569 570 /* 571 * Advance the data ring tail to at least @lpos. This function puts 572 * descriptors into the reusable state if the tail is pushed beyond 573 * their associated data block. 574 */ 575 static bool data_push_tail(struct printk_ringbuffer *rb, 576 struct prb_data_ring *data_ring, 577 unsigned long lpos) 578 { 579 unsigned long tail_lpos_new; 580 unsigned long tail_lpos; 581 unsigned long next_lpos; 582 583 /* If @lpos is from a data-less block, there is nothing to do. */ 584 if (LPOS_DATALESS(lpos)) 585 return true; 586 587 /* 588 * Any descriptor states that have transitioned to reusable due to the 589 * data tail being pushed to this loaded value will be visible to this 590 * CPU. This pairs with data_push_tail:D. 591 * 592 * Memory barrier involvement: 593 * 594 * If data_push_tail:A reads from data_push_tail:D, then this CPU can 595 * see desc_make_reusable:A. 596 * 597 * Relies on: 598 * 599 * MB from desc_make_reusable:A to data_push_tail:D 600 * matches 601 * READFROM from data_push_tail:D to data_push_tail:A 602 * thus 603 * READFROM from desc_make_reusable:A to this CPU 604 */ 605 tail_lpos = atomic_long_read(&data_ring->tail_lpos); /* LMM(data_push_tail:A) */ 606 607 /* 608 * Loop until the tail lpos is at or beyond @lpos. This condition 609 * may already be satisfied, resulting in no full memory barrier 610 * from data_push_tail:D being performed. However, since this CPU 611 * sees the new tail lpos, any descriptor states that transitioned to 612 * the reusable state must already be visible. 613 */ 614 while ((lpos - tail_lpos) - 1 < DATA_SIZE(data_ring)) { 615 /* 616 * Make all descriptors reusable that are associated with 617 * data blocks before @lpos. 618 */ 619 if (!data_make_reusable(rb, data_ring, tail_lpos, lpos, 620 &next_lpos)) { 621 /* 622 * 1. Guarantee the block ID loaded in 623 * data_make_reusable() is performed before 624 * reloading the tail lpos. The failed 625 * data_make_reusable() may be due to a newly 626 * recycled data area causing the tail lpos to 627 * have been previously pushed. This pairs with 628 * data_alloc:A. 629 * 630 * Memory barrier involvement: 631 * 632 * If data_make_reusable:A reads from data_alloc:B, 633 * then data_push_tail:C reads from 634 * data_push_tail:D. 635 * 636 * Relies on: 637 * 638 * MB from data_push_tail:D to data_alloc:B 639 * matching 640 * RMB from data_make_reusable:A to 641 * data_push_tail:C 642 * 643 * Note: data_push_tail:D and data_alloc:B can be 644 * different CPUs. However, the data_alloc:B 645 * CPU (which performs the full memory 646 * barrier) must have previously seen 647 * data_push_tail:D. 648 * 649 * 2. Guarantee the descriptor state loaded in 650 * data_make_reusable() is performed before 651 * reloading the tail lpos. The failed 652 * data_make_reusable() may be due to a newly 653 * recycled descriptor causing the tail lpos to 654 * have been previously pushed. This pairs with 655 * desc_reserve:D. 656 * 657 * Memory barrier involvement: 658 * 659 * If data_make_reusable:B reads from 660 * desc_reserve:F, then data_push_tail:C reads 661 * from data_push_tail:D. 662 * 663 * Relies on: 664 * 665 * MB from data_push_tail:D to desc_reserve:F 666 * matching 667 * RMB from data_make_reusable:B to 668 * data_push_tail:C 669 * 670 * Note: data_push_tail:D and desc_reserve:F can 671 * be different CPUs. However, the 672 * desc_reserve:F CPU (which performs the 673 * full memory barrier) must have previously 674 * seen data_push_tail:D. 675 */ 676 smp_rmb(); /* LMM(data_push_tail:B) */ 677 678 tail_lpos_new = atomic_long_read(&data_ring->tail_lpos 679 ); /* LMM(data_push_tail:C) */ 680 if (tail_lpos_new == tail_lpos) 681 return false; 682 683 /* Another CPU pushed the tail. Try again. */ 684 tail_lpos = tail_lpos_new; 685 continue; 686 } 687 688 /* 689 * Guarantee any descriptor states that have transitioned to 690 * reusable are stored before pushing the tail lpos. A full 691 * memory barrier is needed since other CPUs may have made 692 * the descriptor states reusable. This pairs with 693 * data_push_tail:A. 694 */ 695 if (atomic_long_try_cmpxchg(&data_ring->tail_lpos, &tail_lpos, 696 next_lpos)) { /* LMM(data_push_tail:D) */ 697 break; 698 } 699 } 700 701 return true; 702 } 703 704 /* 705 * Advance the desc ring tail. This function advances the tail by one 706 * descriptor, thus invalidating the oldest descriptor. Before advancing 707 * the tail, the tail descriptor is made reusable and all data blocks up to 708 * and including the descriptor's data block are invalidated (i.e. the data 709 * ring tail is pushed past the data block of the descriptor being made 710 * reusable). 711 */ 712 static bool desc_push_tail(struct printk_ringbuffer *rb, 713 unsigned long tail_id) 714 { 715 struct prb_desc_ring *desc_ring = &rb->desc_ring; 716 enum desc_state d_state; 717 struct prb_desc desc; 718 719 d_state = desc_read(desc_ring, tail_id, &desc); 720 721 switch (d_state) { 722 case desc_miss: 723 /* 724 * If the ID is exactly 1 wrap behind the expected, it is 725 * in the process of being reserved by another writer and 726 * must be considered reserved. 727 */ 728 if (DESC_ID(atomic_long_read(&desc.state_var)) == 729 DESC_ID_PREV_WRAP(desc_ring, tail_id)) { 730 return false; 731 } 732 733 /* 734 * The ID has changed. Another writer must have pushed the 735 * tail and recycled the descriptor already. Success is 736 * returned because the caller is only interested in the 737 * specified tail being pushed, which it was. 738 */ 739 return true; 740 case desc_reserved: 741 return false; 742 case desc_committed: 743 desc_make_reusable(desc_ring, tail_id); 744 break; 745 case desc_reusable: 746 break; 747 } 748 749 /* 750 * Data blocks must be invalidated before their associated 751 * descriptor can be made available for recycling. Invalidating 752 * them later is not possible because there is no way to trust 753 * data blocks once their associated descriptor is gone. 754 */ 755 756 if (!data_push_tail(rb, &rb->text_data_ring, desc.text_blk_lpos.next)) 757 return false; 758 if (!data_push_tail(rb, &rb->dict_data_ring, desc.dict_blk_lpos.next)) 759 return false; 760 761 /* 762 * Check the next descriptor after @tail_id before pushing the tail 763 * to it because the tail must always be in a committed or reusable 764 * state. The implementation of prb_first_seq() relies on this. 765 * 766 * A successful read implies that the next descriptor is less than or 767 * equal to @head_id so there is no risk of pushing the tail past the 768 * head. 769 */ 770 d_state = desc_read(desc_ring, DESC_ID(tail_id + 1), &desc); /* LMM(desc_push_tail:A) */ 771 772 if (d_state == desc_committed || d_state == desc_reusable) { 773 /* 774 * Guarantee any descriptor states that have transitioned to 775 * reusable are stored before pushing the tail ID. This allows 776 * verifying the recycled descriptor state. A full memory 777 * barrier is needed since other CPUs may have made the 778 * descriptor states reusable. This pairs with desc_reserve:D. 779 */ 780 atomic_long_cmpxchg(&desc_ring->tail_id, tail_id, 781 DESC_ID(tail_id + 1)); /* LMM(desc_push_tail:B) */ 782 } else { 783 /* 784 * Guarantee the last state load from desc_read() is before 785 * reloading @tail_id in order to see a new tail ID in the 786 * case that the descriptor has been recycled. This pairs 787 * with desc_reserve:D. 788 * 789 * Memory barrier involvement: 790 * 791 * If desc_push_tail:A reads from desc_reserve:F, then 792 * desc_push_tail:D reads from desc_push_tail:B. 793 * 794 * Relies on: 795 * 796 * MB from desc_push_tail:B to desc_reserve:F 797 * matching 798 * RMB from desc_push_tail:A to desc_push_tail:D 799 * 800 * Note: desc_push_tail:B and desc_reserve:F can be different 801 * CPUs. However, the desc_reserve:F CPU (which performs 802 * the full memory barrier) must have previously seen 803 * desc_push_tail:B. 804 */ 805 smp_rmb(); /* LMM(desc_push_tail:C) */ 806 807 /* 808 * Re-check the tail ID. The descriptor following @tail_id is 809 * not in an allowed tail state. But if the tail has since 810 * been moved by another CPU, then it does not matter. 811 */ 812 if (atomic_long_read(&desc_ring->tail_id) == tail_id) /* LMM(desc_push_tail:D) */ 813 return false; 814 } 815 816 return true; 817 } 818 819 /* Reserve a new descriptor, invalidating the oldest if necessary. */ 820 static bool desc_reserve(struct printk_ringbuffer *rb, unsigned long *id_out) 821 { 822 struct prb_desc_ring *desc_ring = &rb->desc_ring; 823 unsigned long prev_state_val; 824 unsigned long id_prev_wrap; 825 struct prb_desc *desc; 826 unsigned long head_id; 827 unsigned long id; 828 829 head_id = atomic_long_read(&desc_ring->head_id); /* LMM(desc_reserve:A) */ 830 831 do { 832 desc = to_desc(desc_ring, head_id); 833 834 id = DESC_ID(head_id + 1); 835 id_prev_wrap = DESC_ID_PREV_WRAP(desc_ring, id); 836 837 /* 838 * Guarantee the head ID is read before reading the tail ID. 839 * Since the tail ID is updated before the head ID, this 840 * guarantees that @id_prev_wrap is never ahead of the tail 841 * ID. This pairs with desc_reserve:D. 842 * 843 * Memory barrier involvement: 844 * 845 * If desc_reserve:A reads from desc_reserve:D, then 846 * desc_reserve:C reads from desc_push_tail:B. 847 * 848 * Relies on: 849 * 850 * MB from desc_push_tail:B to desc_reserve:D 851 * matching 852 * RMB from desc_reserve:A to desc_reserve:C 853 * 854 * Note: desc_push_tail:B and desc_reserve:D can be different 855 * CPUs. However, the desc_reserve:D CPU (which performs 856 * the full memory barrier) must have previously seen 857 * desc_push_tail:B. 858 */ 859 smp_rmb(); /* LMM(desc_reserve:B) */ 860 861 if (id_prev_wrap == atomic_long_read(&desc_ring->tail_id 862 )) { /* LMM(desc_reserve:C) */ 863 /* 864 * Make space for the new descriptor by 865 * advancing the tail. 866 */ 867 if (!desc_push_tail(rb, id_prev_wrap)) 868 return false; 869 } 870 871 /* 872 * 1. Guarantee the tail ID is read before validating the 873 * recycled descriptor state. A read memory barrier is 874 * sufficient for this. This pairs with desc_push_tail:B. 875 * 876 * Memory barrier involvement: 877 * 878 * If desc_reserve:C reads from desc_push_tail:B, then 879 * desc_reserve:E reads from desc_make_reusable:A. 880 * 881 * Relies on: 882 * 883 * MB from desc_make_reusable:A to desc_push_tail:B 884 * matching 885 * RMB from desc_reserve:C to desc_reserve:E 886 * 887 * Note: desc_make_reusable:A and desc_push_tail:B can be 888 * different CPUs. However, the desc_push_tail:B CPU 889 * (which performs the full memory barrier) must have 890 * previously seen desc_make_reusable:A. 891 * 892 * 2. Guarantee the tail ID is stored before storing the head 893 * ID. This pairs with desc_reserve:B. 894 * 895 * 3. Guarantee any data ring tail changes are stored before 896 * recycling the descriptor. Data ring tail changes can 897 * happen via desc_push_tail()->data_push_tail(). A full 898 * memory barrier is needed since another CPU may have 899 * pushed the data ring tails. This pairs with 900 * data_push_tail:B. 901 * 902 * 4. Guarantee a new tail ID is stored before recycling the 903 * descriptor. A full memory barrier is needed since 904 * another CPU may have pushed the tail ID. This pairs 905 * with desc_push_tail:C and this also pairs with 906 * prb_first_seq:C. 907 */ 908 } while (!atomic_long_try_cmpxchg(&desc_ring->head_id, &head_id, 909 id)); /* LMM(desc_reserve:D) */ 910 911 desc = to_desc(desc_ring, id); 912 913 /* 914 * If the descriptor has been recycled, verify the old state val. 915 * See "ABA Issues" about why this verification is performed. 916 */ 917 prev_state_val = atomic_long_read(&desc->state_var); /* LMM(desc_reserve:E) */ 918 if (prev_state_val && 919 prev_state_val != (id_prev_wrap | DESC_COMMITTED_MASK | DESC_REUSE_MASK)) { 920 WARN_ON_ONCE(1); 921 return false; 922 } 923 924 /* 925 * Assign the descriptor a new ID and set its state to reserved. 926 * See "ABA Issues" about why cmpxchg() instead of set() is used. 927 * 928 * Guarantee the new descriptor ID and state is stored before making 929 * any other changes. A write memory barrier is sufficient for this. 930 * This pairs with desc_read:D. 931 */ 932 if (!atomic_long_try_cmpxchg(&desc->state_var, &prev_state_val, 933 id | 0)) { /* LMM(desc_reserve:F) */ 934 WARN_ON_ONCE(1); 935 return false; 936 } 937 938 /* Now data in @desc can be modified: LMM(desc_reserve:G) */ 939 940 *id_out = id; 941 return true; 942 } 943 944 /* Determine the end of a data block. */ 945 static unsigned long get_next_lpos(struct prb_data_ring *data_ring, 946 unsigned long lpos, unsigned int size) 947 { 948 unsigned long begin_lpos; 949 unsigned long next_lpos; 950 951 begin_lpos = lpos; 952 next_lpos = lpos + size; 953 954 /* First check if the data block does not wrap. */ 955 if (DATA_WRAPS(data_ring, begin_lpos) == DATA_WRAPS(data_ring, next_lpos)) 956 return next_lpos; 957 958 /* Wrapping data blocks store their data at the beginning. */ 959 return (DATA_THIS_WRAP_START_LPOS(data_ring, next_lpos) + size); 960 } 961 962 /* 963 * Allocate a new data block, invalidating the oldest data block(s) 964 * if necessary. This function also associates the data block with 965 * a specified descriptor. 966 */ 967 static char *data_alloc(struct printk_ringbuffer *rb, 968 struct prb_data_ring *data_ring, unsigned int size, 969 struct prb_data_blk_lpos *blk_lpos, unsigned long id) 970 { 971 struct prb_data_block *blk; 972 unsigned long begin_lpos; 973 unsigned long next_lpos; 974 975 if (size == 0) { 976 /* Specify a data-less block. */ 977 blk_lpos->begin = NO_LPOS; 978 blk_lpos->next = NO_LPOS; 979 return NULL; 980 } 981 982 size = to_blk_size(size); 983 984 begin_lpos = atomic_long_read(&data_ring->head_lpos); 985 986 do { 987 next_lpos = get_next_lpos(data_ring, begin_lpos, size); 988 989 if (!data_push_tail(rb, data_ring, next_lpos - DATA_SIZE(data_ring))) { 990 /* Failed to allocate, specify a data-less block. */ 991 blk_lpos->begin = FAILED_LPOS; 992 blk_lpos->next = FAILED_LPOS; 993 return NULL; 994 } 995 996 /* 997 * 1. Guarantee any descriptor states that have transitioned 998 * to reusable are stored before modifying the newly 999 * allocated data area. A full memory barrier is needed 1000 * since other CPUs may have made the descriptor states 1001 * reusable. See data_push_tail:A about why the reusable 1002 * states are visible. This pairs with desc_read:D. 1003 * 1004 * 2. Guarantee any updated tail lpos is stored before 1005 * modifying the newly allocated data area. Another CPU may 1006 * be in data_make_reusable() and is reading a block ID 1007 * from this area. data_make_reusable() can handle reading 1008 * a garbage block ID value, but then it must be able to 1009 * load a new tail lpos. A full memory barrier is needed 1010 * since other CPUs may have updated the tail lpos. This 1011 * pairs with data_push_tail:B. 1012 */ 1013 } while (!atomic_long_try_cmpxchg(&data_ring->head_lpos, &begin_lpos, 1014 next_lpos)); /* LMM(data_alloc:A) */ 1015 1016 blk = to_block(data_ring, begin_lpos); 1017 blk->id = id; /* LMM(data_alloc:B) */ 1018 1019 if (DATA_WRAPS(data_ring, begin_lpos) != DATA_WRAPS(data_ring, next_lpos)) { 1020 /* Wrapping data blocks store their data at the beginning. */ 1021 blk = to_block(data_ring, 0); 1022 1023 /* 1024 * Store the ID on the wrapped block for consistency. 1025 * The printk_ringbuffer does not actually use it. 1026 */ 1027 blk->id = id; 1028 } 1029 1030 blk_lpos->begin = begin_lpos; 1031 blk_lpos->next = next_lpos; 1032 1033 return &blk->data[0]; 1034 } 1035 1036 /* Return the number of bytes used by a data block. */ 1037 static unsigned int space_used(struct prb_data_ring *data_ring, 1038 struct prb_data_blk_lpos *blk_lpos) 1039 { 1040 /* Data-less blocks take no space. */ 1041 if (LPOS_DATALESS(blk_lpos->begin)) 1042 return 0; 1043 1044 if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next)) { 1045 /* Data block does not wrap. */ 1046 return (DATA_INDEX(data_ring, blk_lpos->next) - 1047 DATA_INDEX(data_ring, blk_lpos->begin)); 1048 } 1049 1050 /* 1051 * For wrapping data blocks, the trailing (wasted) space is 1052 * also counted. 1053 */ 1054 return (DATA_INDEX(data_ring, blk_lpos->next) + 1055 DATA_SIZE(data_ring) - DATA_INDEX(data_ring, blk_lpos->begin)); 1056 } 1057 1058 /* 1059 * Given @blk_lpos, return a pointer to the writer data from the data block 1060 * and calculate the size of the data part. A NULL pointer is returned if 1061 * @blk_lpos specifies values that could never be legal. 1062 * 1063 * This function (used by readers) performs strict validation on the lpos 1064 * values to possibly detect bugs in the writer code. A WARN_ON_ONCE() is 1065 * triggered if an internal error is detected. 1066 */ 1067 static const char *get_data(struct prb_data_ring *data_ring, 1068 struct prb_data_blk_lpos *blk_lpos, 1069 unsigned int *data_size) 1070 { 1071 struct prb_data_block *db; 1072 1073 /* Data-less data block description. */ 1074 if (LPOS_DATALESS(blk_lpos->begin) && LPOS_DATALESS(blk_lpos->next)) { 1075 if (blk_lpos->begin == NO_LPOS && blk_lpos->next == NO_LPOS) { 1076 *data_size = 0; 1077 return ""; 1078 } 1079 return NULL; 1080 } 1081 1082 /* Regular data block: @begin less than @next and in same wrap. */ 1083 if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next) && 1084 blk_lpos->begin < blk_lpos->next) { 1085 db = to_block(data_ring, blk_lpos->begin); 1086 *data_size = blk_lpos->next - blk_lpos->begin; 1087 1088 /* Wrapping data block: @begin is one wrap behind @next. */ 1089 } else if (DATA_WRAPS(data_ring, blk_lpos->begin + DATA_SIZE(data_ring)) == 1090 DATA_WRAPS(data_ring, blk_lpos->next)) { 1091 db = to_block(data_ring, 0); 1092 *data_size = DATA_INDEX(data_ring, blk_lpos->next); 1093 1094 /* Illegal block description. */ 1095 } else { 1096 WARN_ON_ONCE(1); 1097 return NULL; 1098 } 1099 1100 /* A valid data block will always be aligned to the ID size. */ 1101 if (WARN_ON_ONCE(blk_lpos->begin != ALIGN(blk_lpos->begin, sizeof(db->id))) || 1102 WARN_ON_ONCE(blk_lpos->next != ALIGN(blk_lpos->next, sizeof(db->id)))) { 1103 return NULL; 1104 } 1105 1106 /* A valid data block will always have at least an ID. */ 1107 if (WARN_ON_ONCE(*data_size < sizeof(db->id))) 1108 return NULL; 1109 1110 /* Subtract block ID space from size to reflect data size. */ 1111 *data_size -= sizeof(db->id); 1112 1113 return &db->data[0]; 1114 } 1115 1116 /** 1117 * prb_reserve() - Reserve space in the ringbuffer. 1118 * 1119 * @e: The entry structure to setup. 1120 * @rb: The ringbuffer to reserve data in. 1121 * @r: The record structure to allocate buffers for. 1122 * 1123 * This is the public function available to writers to reserve data. 1124 * 1125 * The writer specifies the text and dict sizes to reserve by setting the 1126 * @text_buf_size and @dict_buf_size fields of @r, respectively. Dictionaries 1127 * are optional, so @dict_buf_size is allowed to be 0. To ensure proper 1128 * initialization of @r, prb_rec_init_wr() should be used. 1129 * 1130 * Context: Any context. Disables local interrupts on success. 1131 * Return: true if at least text data could be allocated, otherwise false. 1132 * 1133 * On success, the fields @info, @text_buf, @dict_buf of @r will be set by 1134 * this function and should be filled in by the writer before committing. Also 1135 * on success, prb_record_text_space() can be used on @e to query the actual 1136 * space used for the text data block. 1137 * 1138 * If the function fails to reserve dictionary space (but all else succeeded), 1139 * it will still report success. In that case @dict_buf is set to NULL and 1140 * @dict_buf_size is set to 0. Writers must check this before writing to 1141 * dictionary space. 1142 * 1143 * @info->text_len and @info->dict_len will already be set to @text_buf_size 1144 * and @dict_buf_size, respectively. If dictionary space reservation fails, 1145 * @info->dict_len is set to 0. 1146 */ 1147 bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb, 1148 struct printk_record *r) 1149 { 1150 struct prb_desc_ring *desc_ring = &rb->desc_ring; 1151 struct prb_desc *d; 1152 unsigned long id; 1153 1154 if (!data_check_size(&rb->text_data_ring, r->text_buf_size)) 1155 goto fail; 1156 1157 if (!data_check_size(&rb->dict_data_ring, r->dict_buf_size)) 1158 goto fail; 1159 1160 /* 1161 * Descriptors in the reserved state act as blockers to all further 1162 * reservations once the desc_ring has fully wrapped. Disable 1163 * interrupts during the reserve/commit window in order to minimize 1164 * the likelihood of this happening. 1165 */ 1166 local_irq_save(e->irqflags); 1167 1168 if (!desc_reserve(rb, &id)) { 1169 /* Descriptor reservation failures are tracked. */ 1170 atomic_long_inc(&rb->fail); 1171 local_irq_restore(e->irqflags); 1172 goto fail; 1173 } 1174 1175 d = to_desc(desc_ring, id); 1176 1177 /* 1178 * Set the @e fields here so that prb_commit() can be used if 1179 * text data allocation fails. 1180 */ 1181 e->rb = rb; 1182 e->id = id; 1183 1184 /* 1185 * Initialize the sequence number if it has "never been set". 1186 * Otherwise just increment it by a full wrap. 1187 * 1188 * @seq is considered "never been set" if it has a value of 0, 1189 * _except_ for @descs[0], which was specially setup by the ringbuffer 1190 * initializer and therefore is always considered as set. 1191 * 1192 * See the "Bootstrap" comment block in printk_ringbuffer.h for 1193 * details about how the initializer bootstraps the descriptors. 1194 */ 1195 if (d->info.seq == 0 && DESC_INDEX(desc_ring, id) != 0) 1196 d->info.seq = DESC_INDEX(desc_ring, id); 1197 else 1198 d->info.seq += DESCS_COUNT(desc_ring); 1199 1200 r->text_buf = data_alloc(rb, &rb->text_data_ring, r->text_buf_size, 1201 &d->text_blk_lpos, id); 1202 /* If text data allocation fails, a data-less record is committed. */ 1203 if (r->text_buf_size && !r->text_buf) { 1204 d->info.text_len = 0; 1205 d->info.dict_len = 0; 1206 prb_commit(e); 1207 /* prb_commit() re-enabled interrupts. */ 1208 goto fail; 1209 } 1210 1211 r->dict_buf = data_alloc(rb, &rb->dict_data_ring, r->dict_buf_size, 1212 &d->dict_blk_lpos, id); 1213 /* 1214 * If dict data allocation fails, the caller can still commit 1215 * text. But dictionary information will not be available. 1216 */ 1217 if (r->dict_buf_size && !r->dict_buf) 1218 r->dict_buf_size = 0; 1219 1220 r->info = &d->info; 1221 1222 /* Set default values for the sizes. */ 1223 d->info.text_len = r->text_buf_size; 1224 d->info.dict_len = r->dict_buf_size; 1225 1226 /* Record full text space used by record. */ 1227 e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos); 1228 1229 return true; 1230 fail: 1231 /* Make it clear to the caller that the reserve failed. */ 1232 memset(r, 0, sizeof(*r)); 1233 return false; 1234 } 1235 1236 /** 1237 * prb_commit() - Commit (previously reserved) data to the ringbuffer. 1238 * 1239 * @e: The entry containing the reserved data information. 1240 * 1241 * This is the public function available to writers to commit data. 1242 * 1243 * Context: Any context. Enables local interrupts. 1244 */ 1245 void prb_commit(struct prb_reserved_entry *e) 1246 { 1247 struct prb_desc_ring *desc_ring = &e->rb->desc_ring; 1248 struct prb_desc *d = to_desc(desc_ring, e->id); 1249 unsigned long prev_state_val = e->id | 0; 1250 1251 /* Now the writer has finished all writing: LMM(prb_commit:A) */ 1252 1253 /* 1254 * Set the descriptor as committed. See "ABA Issues" about why 1255 * cmpxchg() instead of set() is used. 1256 * 1257 * Guarantee all record data is stored before the descriptor state 1258 * is stored as committed. A write memory barrier is sufficient for 1259 * this. This pairs with desc_read:B. 1260 */ 1261 if (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val, 1262 e->id | DESC_COMMITTED_MASK)) { /* LMM(prb_commit:B) */ 1263 WARN_ON_ONCE(1); 1264 } 1265 1266 /* Restore interrupts, the reserve/commit window is finished. */ 1267 local_irq_restore(e->irqflags); 1268 } 1269 1270 /* 1271 * Count the number of lines in provided text. All text has at least 1 line 1272 * (even if @text_size is 0). Each '\n' processed is counted as an additional 1273 * line. 1274 */ 1275 static unsigned int count_lines(const char *text, unsigned int text_size) 1276 { 1277 unsigned int next_size = text_size; 1278 unsigned int line_count = 1; 1279 const char *next = text; 1280 1281 while (next_size) { 1282 next = memchr(next, '\n', next_size); 1283 if (!next) 1284 break; 1285 line_count++; 1286 next++; 1287 next_size = text_size - (next - text); 1288 } 1289 1290 return line_count; 1291 } 1292 1293 /* 1294 * Given @blk_lpos, copy an expected @len of data into the provided buffer. 1295 * If @line_count is provided, count the number of lines in the data. 1296 * 1297 * This function (used by readers) performs strict validation on the data 1298 * size to possibly detect bugs in the writer code. A WARN_ON_ONCE() is 1299 * triggered if an internal error is detected. 1300 */ 1301 static bool copy_data(struct prb_data_ring *data_ring, 1302 struct prb_data_blk_lpos *blk_lpos, u16 len, char *buf, 1303 unsigned int buf_size, unsigned int *line_count) 1304 { 1305 unsigned int data_size; 1306 const char *data; 1307 1308 /* Caller might not want any data. */ 1309 if ((!buf || !buf_size) && !line_count) 1310 return true; 1311 1312 data = get_data(data_ring, blk_lpos, &data_size); 1313 if (!data) 1314 return false; 1315 1316 /* 1317 * Actual cannot be less than expected. It can be more than expected 1318 * because of the trailing alignment padding. 1319 */ 1320 if (WARN_ON_ONCE(data_size < (unsigned int)len)) { 1321 pr_warn_once("wrong data size (%u, expecting %hu) for data: %.*s\n", 1322 data_size, len, data_size, data); 1323 return false; 1324 } 1325 1326 /* Caller interested in the line count? */ 1327 if (line_count) 1328 *line_count = count_lines(data, data_size); 1329 1330 /* Caller interested in the data content? */ 1331 if (!buf || !buf_size) 1332 return true; 1333 1334 data_size = min_t(u16, buf_size, len); 1335 1336 memcpy(&buf[0], data, data_size); /* LMM(copy_data:A) */ 1337 return true; 1338 } 1339 1340 /* 1341 * This is an extended version of desc_read(). It gets a copy of a specified 1342 * descriptor. However, it also verifies that the record is committed and has 1343 * the sequence number @seq. On success, 0 is returned. 1344 * 1345 * Error return values: 1346 * -EINVAL: A committed record with sequence number @seq does not exist. 1347 * -ENOENT: A committed record with sequence number @seq exists, but its data 1348 * is not available. This is a valid record, so readers should 1349 * continue with the next record. 1350 */ 1351 static int desc_read_committed_seq(struct prb_desc_ring *desc_ring, 1352 unsigned long id, u64 seq, 1353 struct prb_desc *desc_out) 1354 { 1355 struct prb_data_blk_lpos *blk_lpos = &desc_out->text_blk_lpos; 1356 enum desc_state d_state; 1357 1358 d_state = desc_read(desc_ring, id, desc_out); 1359 1360 /* 1361 * An unexpected @id (desc_miss) or @seq mismatch means the record 1362 * does not exist. A descriptor in the reserved state means the 1363 * record does not yet exist for the reader. 1364 */ 1365 if (d_state == desc_miss || 1366 d_state == desc_reserved || 1367 desc_out->info.seq != seq) { 1368 return -EINVAL; 1369 } 1370 1371 /* 1372 * A descriptor in the reusable state may no longer have its data 1373 * available; report it as existing but with lost data. Or the record 1374 * may actually be a record with lost data. 1375 */ 1376 if (d_state == desc_reusable || 1377 (blk_lpos->begin == FAILED_LPOS && blk_lpos->next == FAILED_LPOS)) { 1378 return -ENOENT; 1379 } 1380 1381 return 0; 1382 } 1383 1384 /* 1385 * Copy the ringbuffer data from the record with @seq to the provided 1386 * @r buffer. On success, 0 is returned. 1387 * 1388 * See desc_read_committed_seq() for error return values. 1389 */ 1390 static int prb_read(struct printk_ringbuffer *rb, u64 seq, 1391 struct printk_record *r, unsigned int *line_count) 1392 { 1393 struct prb_desc_ring *desc_ring = &rb->desc_ring; 1394 struct prb_desc *rdesc = to_desc(desc_ring, seq); 1395 atomic_long_t *state_var = &rdesc->state_var; 1396 struct prb_desc desc; 1397 unsigned long id; 1398 int err; 1399 1400 /* Extract the ID, used to specify the descriptor to read. */ 1401 id = DESC_ID(atomic_long_read(state_var)); 1402 1403 /* Get a local copy of the correct descriptor (if available). */ 1404 err = desc_read_committed_seq(desc_ring, id, seq, &desc); 1405 1406 /* 1407 * If @r is NULL, the caller is only interested in the availability 1408 * of the record. 1409 */ 1410 if (err || !r) 1411 return err; 1412 1413 /* If requested, copy meta data. */ 1414 if (r->info) 1415 memcpy(r->info, &desc.info, sizeof(*(r->info))); 1416 1417 /* Copy text data. If it fails, this is a data-less record. */ 1418 if (!copy_data(&rb->text_data_ring, &desc.text_blk_lpos, desc.info.text_len, 1419 r->text_buf, r->text_buf_size, line_count)) { 1420 return -ENOENT; 1421 } 1422 1423 /* 1424 * Copy dict data. Although this should not fail, dict data is not 1425 * important. So if it fails, modify the copied meta data to report 1426 * that there is no dict data, thus silently dropping the dict data. 1427 */ 1428 if (!copy_data(&rb->dict_data_ring, &desc.dict_blk_lpos, desc.info.dict_len, 1429 r->dict_buf, r->dict_buf_size, NULL)) { 1430 if (r->info) 1431 r->info->dict_len = 0; 1432 } 1433 1434 /* Ensure the record is still committed and has the same @seq. */ 1435 return desc_read_committed_seq(desc_ring, id, seq, &desc); 1436 } 1437 1438 /* Get the sequence number of the tail descriptor. */ 1439 static u64 prb_first_seq(struct printk_ringbuffer *rb) 1440 { 1441 struct prb_desc_ring *desc_ring = &rb->desc_ring; 1442 enum desc_state d_state; 1443 struct prb_desc desc; 1444 unsigned long id; 1445 1446 for (;;) { 1447 id = atomic_long_read(&rb->desc_ring.tail_id); /* LMM(prb_first_seq:A) */ 1448 1449 d_state = desc_read(desc_ring, id, &desc); /* LMM(prb_first_seq:B) */ 1450 1451 /* 1452 * This loop will not be infinite because the tail is 1453 * _always_ in the committed or reusable state. 1454 */ 1455 if (d_state == desc_committed || d_state == desc_reusable) 1456 break; 1457 1458 /* 1459 * Guarantee the last state load from desc_read() is before 1460 * reloading @tail_id in order to see a new tail in the case 1461 * that the descriptor has been recycled. This pairs with 1462 * desc_reserve:D. 1463 * 1464 * Memory barrier involvement: 1465 * 1466 * If prb_first_seq:B reads from desc_reserve:F, then 1467 * prb_first_seq:A reads from desc_push_tail:B. 1468 * 1469 * Relies on: 1470 * 1471 * MB from desc_push_tail:B to desc_reserve:F 1472 * matching 1473 * RMB prb_first_seq:B to prb_first_seq:A 1474 */ 1475 smp_rmb(); /* LMM(prb_first_seq:C) */ 1476 } 1477 1478 return desc.info.seq; 1479 } 1480 1481 /* 1482 * Non-blocking read of a record. Updates @seq to the last committed record 1483 * (which may have no data). 1484 * 1485 * See the description of prb_read_valid() and prb_read_valid_info() 1486 * for details. 1487 */ 1488 static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq, 1489 struct printk_record *r, unsigned int *line_count) 1490 { 1491 u64 tail_seq; 1492 int err; 1493 1494 while ((err = prb_read(rb, *seq, r, line_count))) { 1495 tail_seq = prb_first_seq(rb); 1496 1497 if (*seq < tail_seq) { 1498 /* 1499 * Behind the tail. Catch up and try again. This 1500 * can happen for -ENOENT and -EINVAL cases. 1501 */ 1502 *seq = tail_seq; 1503 1504 } else if (err == -ENOENT) { 1505 /* Record exists, but no data available. Skip. */ 1506 (*seq)++; 1507 1508 } else { 1509 /* Non-existent/non-committed record. Must stop. */ 1510 return false; 1511 } 1512 } 1513 1514 return true; 1515 } 1516 1517 /** 1518 * prb_read_valid() - Non-blocking read of a requested record or (if gone) 1519 * the next available record. 1520 * 1521 * @rb: The ringbuffer to read from. 1522 * @seq: The sequence number of the record to read. 1523 * @r: A record data buffer to store the read record to. 1524 * 1525 * This is the public function available to readers to read a record. 1526 * 1527 * The reader provides the @info, @text_buf, @dict_buf buffers of @r to be 1528 * filled in. Any of the buffer pointers can be set to NULL if the reader 1529 * is not interested in that data. To ensure proper initialization of @r, 1530 * prb_rec_init_rd() should be used. 1531 * 1532 * Context: Any context. 1533 * Return: true if a record was read, otherwise false. 1534 * 1535 * On success, the reader must check r->info.seq to see which record was 1536 * actually read. This allows the reader to detect dropped records. 1537 * 1538 * Failure means @seq refers to a not yet written record. 1539 */ 1540 bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq, 1541 struct printk_record *r) 1542 { 1543 return _prb_read_valid(rb, &seq, r, NULL); 1544 } 1545 1546 /** 1547 * prb_read_valid_info() - Non-blocking read of meta data for a requested 1548 * record or (if gone) the next available record. 1549 * 1550 * @rb: The ringbuffer to read from. 1551 * @seq: The sequence number of the record to read. 1552 * @info: A buffer to store the read record meta data to. 1553 * @line_count: A buffer to store the number of lines in the record text. 1554 * 1555 * This is the public function available to readers to read only the 1556 * meta data of a record. 1557 * 1558 * The reader provides the @info, @line_count buffers to be filled in. 1559 * Either of the buffer pointers can be set to NULL if the reader is not 1560 * interested in that data. 1561 * 1562 * Context: Any context. 1563 * Return: true if a record's meta data was read, otherwise false. 1564 * 1565 * On success, the reader must check info->seq to see which record meta data 1566 * was actually read. This allows the reader to detect dropped records. 1567 * 1568 * Failure means @seq refers to a not yet written record. 1569 */ 1570 bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq, 1571 struct printk_info *info, unsigned int *line_count) 1572 { 1573 struct printk_record r; 1574 1575 prb_rec_init_rd(&r, info, NULL, 0, NULL, 0); 1576 1577 return _prb_read_valid(rb, &seq, &r, line_count); 1578 } 1579 1580 /** 1581 * prb_first_valid_seq() - Get the sequence number of the oldest available 1582 * record. 1583 * 1584 * @rb: The ringbuffer to get the sequence number from. 1585 * 1586 * This is the public function available to readers to see what the 1587 * first/oldest valid sequence number is. 1588 * 1589 * This provides readers a starting point to begin iterating the ringbuffer. 1590 * 1591 * Context: Any context. 1592 * Return: The sequence number of the first/oldest record or, if the 1593 * ringbuffer is empty, 0 is returned. 1594 */ 1595 u64 prb_first_valid_seq(struct printk_ringbuffer *rb) 1596 { 1597 u64 seq = 0; 1598 1599 if (!_prb_read_valid(rb, &seq, NULL, NULL)) 1600 return 0; 1601 1602 return seq; 1603 } 1604 1605 /** 1606 * prb_next_seq() - Get the sequence number after the last available record. 1607 * 1608 * @rb: The ringbuffer to get the sequence number from. 1609 * 1610 * This is the public function available to readers to see what the next 1611 * newest sequence number available to readers will be. 1612 * 1613 * This provides readers a sequence number to jump to if all currently 1614 * available records should be skipped. 1615 * 1616 * Context: Any context. 1617 * Return: The sequence number of the next newest (not yet available) record 1618 * for readers. 1619 */ 1620 u64 prb_next_seq(struct printk_ringbuffer *rb) 1621 { 1622 u64 seq = 0; 1623 1624 /* Search forward from the oldest descriptor. */ 1625 while (_prb_read_valid(rb, &seq, NULL, NULL)) 1626 seq++; 1627 1628 return seq; 1629 } 1630 1631 /** 1632 * prb_init() - Initialize a ringbuffer to use provided external buffers. 1633 * 1634 * @rb: The ringbuffer to initialize. 1635 * @text_buf: The data buffer for text data. 1636 * @textbits: The size of @text_buf as a power-of-2 value. 1637 * @dict_buf: The data buffer for dictionary data. 1638 * @dictbits: The size of @dict_buf as a power-of-2 value. 1639 * @descs: The descriptor buffer for ringbuffer records. 1640 * @descbits: The count of @descs items as a power-of-2 value. 1641 * 1642 * This is the public function available to writers to setup a ringbuffer 1643 * during runtime using provided buffers. 1644 * 1645 * This must match the initialization of DEFINE_PRINTKRB(). 1646 * 1647 * Context: Any context. 1648 */ 1649 void prb_init(struct printk_ringbuffer *rb, 1650 char *text_buf, unsigned int textbits, 1651 char *dict_buf, unsigned int dictbits, 1652 struct prb_desc *descs, unsigned int descbits) 1653 { 1654 memset(descs, 0, _DESCS_COUNT(descbits) * sizeof(descs[0])); 1655 1656 rb->desc_ring.count_bits = descbits; 1657 rb->desc_ring.descs = descs; 1658 atomic_long_set(&rb->desc_ring.head_id, DESC0_ID(descbits)); 1659 atomic_long_set(&rb->desc_ring.tail_id, DESC0_ID(descbits)); 1660 1661 rb->text_data_ring.size_bits = textbits; 1662 rb->text_data_ring.data = text_buf; 1663 atomic_long_set(&rb->text_data_ring.head_lpos, BLK0_LPOS(textbits)); 1664 atomic_long_set(&rb->text_data_ring.tail_lpos, BLK0_LPOS(textbits)); 1665 1666 rb->dict_data_ring.size_bits = dictbits; 1667 rb->dict_data_ring.data = dict_buf; 1668 atomic_long_set(&rb->dict_data_ring.head_lpos, BLK0_LPOS(dictbits)); 1669 atomic_long_set(&rb->dict_data_ring.tail_lpos, BLK0_LPOS(dictbits)); 1670 1671 atomic_long_set(&rb->fail, 0); 1672 1673 descs[0].info.seq = -(u64)_DESCS_COUNT(descbits); 1674 1675 descs[_DESCS_COUNT(descbits) - 1].info.seq = 0; 1676 atomic_long_set(&(descs[_DESCS_COUNT(descbits) - 1].state_var), DESC0_SV(descbits)); 1677 descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.begin = FAILED_LPOS; 1678 descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.next = FAILED_LPOS; 1679 descs[_DESCS_COUNT(descbits) - 1].dict_blk_lpos.begin = FAILED_LPOS; 1680 descs[_DESCS_COUNT(descbits) - 1].dict_blk_lpos.next = FAILED_LPOS; 1681 } 1682 1683 /** 1684 * prb_record_text_space() - Query the full actual used ringbuffer space for 1685 * the text data of a reserved entry. 1686 * 1687 * @e: The successfully reserved entry to query. 1688 * 1689 * This is the public function available to writers to see how much actual 1690 * space is used in the ringbuffer to store the text data of the specified 1691 * entry. 1692 * 1693 * This function is only valid if @e has been successfully reserved using 1694 * prb_reserve(). 1695 * 1696 * Context: Any context. 1697 * Return: The size in bytes used by the text data of the associated record. 1698 */ 1699 unsigned int prb_record_text_space(struct prb_reserved_entry *e) 1700 { 1701 return e->text_space; 1702 } 1703