1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/kernel.h> 4 #include <linux/irqflags.h> 5 #include <linux/string.h> 6 #include <linux/errno.h> 7 #include <linux/bug.h> 8 #include "printk_ringbuffer.h" 9 10 /** 11 * DOC: printk_ringbuffer overview 12 * 13 * Data Structure 14 * -------------- 15 * The printk_ringbuffer is made up of 3 internal ringbuffers: 16 * 17 * desc_ring 18 * A ring of descriptors and their meta data (such as sequence number, 19 * timestamp, loglevel, etc.) as well as internal state information about 20 * the record and logical positions specifying where in the other 21 * ringbuffers the text and dictionary strings are located. 22 * 23 * text_data_ring 24 * A ring of data blocks. A data block consists of an unsigned long 25 * integer (ID) that maps to a desc_ring index followed by the text 26 * string of the record. 27 * 28 * dict_data_ring 29 * A ring of data blocks. A data block consists of an unsigned long 30 * integer (ID) that maps to a desc_ring index followed by the dictionary 31 * string of the record. 32 * 33 * The internal state information of a descriptor is the key element to allow 34 * readers and writers to locklessly synchronize access to the data. 35 * 36 * Implementation 37 * -------------- 38 * 39 * Descriptor Ring 40 * ~~~~~~~~~~~~~~~ 41 * The descriptor ring is an array of descriptors. A descriptor contains 42 * essential meta data to track the data of a printk record using 43 * blk_lpos structs pointing to associated text and dictionary data blocks 44 * (see "Data Rings" below). Each descriptor is assigned an ID that maps 45 * directly to index values of the descriptor array and has a state. The ID 46 * and the state are bitwise combined into a single descriptor field named 47 * @state_var, allowing ID and state to be synchronously and atomically 48 * updated. 49 * 50 * Descriptors have four states: 51 * 52 * reserved 53 * A writer is modifying the record. 54 * 55 * committed 56 * The record and all its data are written. A writer can reopen the 57 * descriptor (transitioning it back to reserved), but in the committed 58 * state the data is consistent. 59 * 60 * finalized 61 * The record and all its data are complete and available for reading. A 62 * writer cannot reopen the descriptor. 63 * 64 * reusable 65 * The record exists, but its text and/or dictionary data may no longer 66 * be available. 67 * 68 * Querying the @state_var of a record requires providing the ID of the 69 * descriptor to query. This can yield a possible fifth (pseudo) state: 70 * 71 * miss 72 * The descriptor being queried has an unexpected ID. 73 * 74 * The descriptor ring has a @tail_id that contains the ID of the oldest 75 * descriptor and @head_id that contains the ID of the newest descriptor. 76 * 77 * When a new descriptor should be created (and the ring is full), the tail 78 * descriptor is invalidated by first transitioning to the reusable state and 79 * then invalidating all tail data blocks up to and including the data blocks 80 * associated with the tail descriptor (for text and dictionary rings). Then 81 * @tail_id is advanced, followed by advancing @head_id. And finally the 82 * @state_var of the new descriptor is initialized to the new ID and reserved 83 * state. 84 * 85 * The @tail_id can only be advanced if the new @tail_id would be in the 86 * committed or reusable queried state. This makes it possible that a valid 87 * sequence number of the tail is always available. 88 * 89 * Descriptor Finalization 90 * ~~~~~~~~~~~~~~~~~~~~~~~ 91 * When a writer calls the commit function prb_commit(), record data is 92 * fully stored and is consistent within the ringbuffer. However, a writer can 93 * reopen that record, claiming exclusive access (as with prb_reserve()), and 94 * modify that record. When finished, the writer must again commit the record. 95 * 96 * In order for a record to be made available to readers (and also become 97 * recyclable for writers), it must be finalized. A finalized record cannot be 98 * reopened and can never become "unfinalized". Record finalization can occur 99 * in three different scenarios: 100 * 101 * 1) A writer can simultaneously commit and finalize its record by calling 102 * prb_final_commit() instead of prb_commit(). 103 * 104 * 2) When a new record is reserved and the previous record has been 105 * committed via prb_commit(), that previous record is automatically 106 * finalized. 107 * 108 * 3) When a record is committed via prb_commit() and a newer record 109 * already exists, the record being committed is automatically finalized. 110 * 111 * Data Rings 112 * ~~~~~~~~~~ 113 * The two data rings (text and dictionary) function identically. They exist 114 * separately so that their buffer sizes can be individually set and they do 115 * not affect one another. 116 * 117 * Data rings are byte arrays composed of data blocks. Data blocks are 118 * referenced by blk_lpos structs that point to the logical position of the 119 * beginning of a data block and the beginning of the next adjacent data 120 * block. Logical positions are mapped directly to index values of the byte 121 * array ringbuffer. 122 * 123 * Each data block consists of an ID followed by the writer data. The ID is 124 * the identifier of a descriptor that is associated with the data block. A 125 * given data block is considered valid if all of the following conditions 126 * are met: 127 * 128 * 1) The descriptor associated with the data block is in the committed 129 * or finalized queried state. 130 * 131 * 2) The blk_lpos struct within the descriptor associated with the data 132 * block references back to the same data block. 133 * 134 * 3) The data block is within the head/tail logical position range. 135 * 136 * If the writer data of a data block would extend beyond the end of the 137 * byte array, only the ID of the data block is stored at the logical 138 * position and the full data block (ID and writer data) is stored at the 139 * beginning of the byte array. The referencing blk_lpos will point to the 140 * ID before the wrap and the next data block will be at the logical 141 * position adjacent the full data block after the wrap. 142 * 143 * Data rings have a @tail_lpos that points to the beginning of the oldest 144 * data block and a @head_lpos that points to the logical position of the 145 * next (not yet existing) data block. 146 * 147 * When a new data block should be created (and the ring is full), tail data 148 * blocks will first be invalidated by putting their associated descriptors 149 * into the reusable state and then pushing the @tail_lpos forward beyond 150 * them. Then the @head_lpos is pushed forward and is associated with a new 151 * descriptor. If a data block is not valid, the @tail_lpos cannot be 152 * advanced beyond it. 153 * 154 * Info Array 155 * ~~~~~~~~~~ 156 * The general meta data of printk records are stored in printk_info structs, 157 * stored in an array with the same number of elements as the descriptor ring. 158 * Each info corresponds to the descriptor of the same index in the 159 * descriptor ring. Info validity is confirmed by evaluating the corresponding 160 * descriptor before and after loading the info. 161 * 162 * Usage 163 * ----- 164 * Here are some simple examples demonstrating writers and readers. For the 165 * examples a global ringbuffer (test_rb) is available (which is not the 166 * actual ringbuffer used by printk):: 167 * 168 * DEFINE_PRINTKRB(test_rb, 15, 5, 3); 169 * 170 * This ringbuffer allows up to 32768 records (2 ^ 15) and has a size of 171 * 1 MiB (2 ^ (15 + 5)) for text data and 256 KiB (2 ^ (15 + 3)) for 172 * dictionary data. 173 * 174 * Sample writer code:: 175 * 176 * const char *dictstr = "dictionary text"; 177 * const char *textstr = "message text"; 178 * struct prb_reserved_entry e; 179 * struct printk_record r; 180 * 181 * // specify how much to allocate 182 * prb_rec_init_wr(&r, strlen(textstr) + 1, strlen(dictstr) + 1); 183 * 184 * if (prb_reserve(&e, &test_rb, &r)) { 185 * snprintf(r.text_buf, r.text_buf_size, "%s", textstr); 186 * r.info->text_len = strlen(textstr); 187 * 188 * // dictionary allocation may have failed 189 * if (r.dict_buf) { 190 * snprintf(r.dict_buf, r.dict_buf_size, "%s", dictstr); 191 * r.info->dict_len = strlen(dictstr); 192 * } 193 * 194 * r.info->ts_nsec = local_clock(); 195 * 196 * prb_final_commit(&e); 197 * } 198 * 199 * Note that additional writer functions are available to extend a record 200 * after it has been committed but not yet finalized. This can be done as 201 * long as no new records have been reserved and the caller is the same. 202 * 203 * Sample writer code (record extending):: 204 * 205 * // alternate rest of previous example 206 * r.info->ts_nsec = local_clock(); 207 * r.info->text_len = strlen(textstr); 208 * r.info->caller_id = printk_caller_id(); 209 * 210 * // commit the record (but do not finalize yet) 211 * prb_commit(&e); 212 * } 213 * 214 * ... 215 * 216 * // specify additional 5 bytes text space to extend 217 * prb_rec_init_wr(&r, 5, 0); 218 * 219 * if (prb_reserve_in_last(&e, &test_rb, &r, printk_caller_id())) { 220 * snprintf(&r.text_buf[r.info->text_len], 221 * r.text_buf_size - r.info->text_len, "hello"); 222 * 223 * r.info->text_len += 5; 224 * 225 * prb_final_commit(&e); 226 * } 227 * 228 * Sample reader code:: 229 * 230 * struct printk_info info; 231 * struct printk_record r; 232 * char text_buf[32]; 233 * char dict_buf[32]; 234 * u64 seq; 235 * 236 * prb_rec_init_rd(&r, &info, &text_buf[0], sizeof(text_buf), 237 * &dict_buf[0], sizeof(dict_buf)); 238 * 239 * prb_for_each_record(0, &test_rb, &seq, &r) { 240 * if (info.seq != seq) 241 * pr_warn("lost %llu records\n", info.seq - seq); 242 * 243 * if (info.text_len > r.text_buf_size) { 244 * pr_warn("record %llu text truncated\n", info.seq); 245 * text_buf[r.text_buf_size - 1] = 0; 246 * } 247 * 248 * if (info.dict_len > r.dict_buf_size) { 249 * pr_warn("record %llu dict truncated\n", info.seq); 250 * dict_buf[r.dict_buf_size - 1] = 0; 251 * } 252 * 253 * pr_info("%llu: %llu: %s;%s\n", info.seq, info.ts_nsec, 254 * &text_buf[0], info.dict_len ? &dict_buf[0] : ""); 255 * } 256 * 257 * Note that additional less convenient reader functions are available to 258 * allow complex record access. 259 * 260 * ABA Issues 261 * ~~~~~~~~~~ 262 * To help avoid ABA issues, descriptors are referenced by IDs (array index 263 * values combined with tagged bits counting array wraps) and data blocks are 264 * referenced by logical positions (array index values combined with tagged 265 * bits counting array wraps). However, on 32-bit systems the number of 266 * tagged bits is relatively small such that an ABA incident is (at least 267 * theoretically) possible. For example, if 4 million maximally sized (1KiB) 268 * printk messages were to occur in NMI context on a 32-bit system, the 269 * interrupted context would not be able to recognize that the 32-bit integer 270 * completely wrapped and thus represents a different data block than the one 271 * the interrupted context expects. 272 * 273 * To help combat this possibility, additional state checking is performed 274 * (such as using cmpxchg() even though set() would suffice). These extra 275 * checks are commented as such and will hopefully catch any ABA issue that 276 * a 32-bit system might experience. 277 * 278 * Memory Barriers 279 * ~~~~~~~~~~~~~~~ 280 * Multiple memory barriers are used. To simplify proving correctness and 281 * generating litmus tests, lines of code related to memory barriers 282 * (loads, stores, and the associated memory barriers) are labeled:: 283 * 284 * LMM(function:letter) 285 * 286 * Comments reference the labels using only the "function:letter" part. 287 * 288 * The memory barrier pairs and their ordering are: 289 * 290 * desc_reserve:D / desc_reserve:B 291 * push descriptor tail (id), then push descriptor head (id) 292 * 293 * desc_reserve:D / data_push_tail:B 294 * push data tail (lpos), then set new descriptor reserved (state) 295 * 296 * desc_reserve:D / desc_push_tail:C 297 * push descriptor tail (id), then set new descriptor reserved (state) 298 * 299 * desc_reserve:D / prb_first_seq:C 300 * push descriptor tail (id), then set new descriptor reserved (state) 301 * 302 * desc_reserve:F / desc_read:D 303 * set new descriptor id and reserved (state), then allow writer changes 304 * 305 * data_alloc:A (or data_realloc:A) / desc_read:D 306 * set old descriptor reusable (state), then modify new data block area 307 * 308 * data_alloc:A (or data_realloc:A) / data_push_tail:B 309 * push data tail (lpos), then modify new data block area 310 * 311 * _prb_commit:B / desc_read:B 312 * store writer changes, then set new descriptor committed (state) 313 * 314 * desc_reopen_last:A / _prb_commit:B 315 * set descriptor reserved (state), then read descriptor data 316 * 317 * _prb_commit:B / desc_reserve:D 318 * set new descriptor committed (state), then check descriptor head (id) 319 * 320 * data_push_tail:D / data_push_tail:A 321 * set descriptor reusable (state), then push data tail (lpos) 322 * 323 * desc_push_tail:B / desc_reserve:D 324 * set descriptor reusable (state), then push descriptor tail (id) 325 */ 326 327 #define DATA_SIZE(data_ring) _DATA_SIZE((data_ring)->size_bits) 328 #define DATA_SIZE_MASK(data_ring) (DATA_SIZE(data_ring) - 1) 329 330 #define DESCS_COUNT(desc_ring) _DESCS_COUNT((desc_ring)->count_bits) 331 #define DESCS_COUNT_MASK(desc_ring) (DESCS_COUNT(desc_ring) - 1) 332 333 /* Determine the data array index from a logical position. */ 334 #define DATA_INDEX(data_ring, lpos) ((lpos) & DATA_SIZE_MASK(data_ring)) 335 336 /* Determine the desc array index from an ID or sequence number. */ 337 #define DESC_INDEX(desc_ring, n) ((n) & DESCS_COUNT_MASK(desc_ring)) 338 339 /* Determine how many times the data array has wrapped. */ 340 #define DATA_WRAPS(data_ring, lpos) ((lpos) >> (data_ring)->size_bits) 341 342 /* Determine if a logical position refers to a data-less block. */ 343 #define LPOS_DATALESS(lpos) ((lpos) & 1UL) 344 #define BLK_DATALESS(blk) (LPOS_DATALESS((blk)->begin) && \ 345 LPOS_DATALESS((blk)->next)) 346 347 /* Get the logical position at index 0 of the current wrap. */ 348 #define DATA_THIS_WRAP_START_LPOS(data_ring, lpos) \ 349 ((lpos) & ~DATA_SIZE_MASK(data_ring)) 350 351 /* Get the ID for the same index of the previous wrap as the given ID. */ 352 #define DESC_ID_PREV_WRAP(desc_ring, id) \ 353 DESC_ID((id) - DESCS_COUNT(desc_ring)) 354 355 /* 356 * A data block: mapped directly to the beginning of the data block area 357 * specified as a logical position within the data ring. 358 * 359 * @id: the ID of the associated descriptor 360 * @data: the writer data 361 * 362 * Note that the size of a data block is only known by its associated 363 * descriptor. 364 */ 365 struct prb_data_block { 366 unsigned long id; 367 char data[0]; 368 }; 369 370 /* 371 * Return the descriptor associated with @n. @n can be either a 372 * descriptor ID or a sequence number. 373 */ 374 static struct prb_desc *to_desc(struct prb_desc_ring *desc_ring, u64 n) 375 { 376 return &desc_ring->descs[DESC_INDEX(desc_ring, n)]; 377 } 378 379 /* 380 * Return the printk_info associated with @n. @n can be either a 381 * descriptor ID or a sequence number. 382 */ 383 static struct printk_info *to_info(struct prb_desc_ring *desc_ring, u64 n) 384 { 385 return &desc_ring->infos[DESC_INDEX(desc_ring, n)]; 386 } 387 388 static struct prb_data_block *to_block(struct prb_data_ring *data_ring, 389 unsigned long begin_lpos) 390 { 391 return (void *)&data_ring->data[DATA_INDEX(data_ring, begin_lpos)]; 392 } 393 394 /* 395 * Increase the data size to account for data block meta data plus any 396 * padding so that the adjacent data block is aligned on the ID size. 397 */ 398 static unsigned int to_blk_size(unsigned int size) 399 { 400 struct prb_data_block *db = NULL; 401 402 size += sizeof(*db); 403 size = ALIGN(size, sizeof(db->id)); 404 return size; 405 } 406 407 /* 408 * Sanity checker for reserve size. The ringbuffer code assumes that a data 409 * block does not exceed the maximum possible size that could fit within the 410 * ringbuffer. This function provides that basic size check so that the 411 * assumption is safe. 412 */ 413 static bool data_check_size(struct prb_data_ring *data_ring, unsigned int size) 414 { 415 struct prb_data_block *db = NULL; 416 417 if (size == 0) 418 return true; 419 420 /* 421 * Ensure the alignment padded size could possibly fit in the data 422 * array. The largest possible data block must still leave room for 423 * at least the ID of the next block. 424 */ 425 size = to_blk_size(size); 426 if (size > DATA_SIZE(data_ring) - sizeof(db->id)) 427 return false; 428 429 return true; 430 } 431 432 /* Query the state of a descriptor. */ 433 static enum desc_state get_desc_state(unsigned long id, 434 unsigned long state_val) 435 { 436 if (id != DESC_ID(state_val)) 437 return desc_miss; 438 439 return DESC_STATE(state_val); 440 } 441 442 /* 443 * Get a copy of a specified descriptor and return its queried state. If the 444 * descriptor is in an inconsistent state (miss or reserved), the caller can 445 * only expect the descriptor's @state_var field to be valid. 446 * 447 * The sequence number and caller_id can be optionally retrieved. Like all 448 * non-state_var data, they are only valid if the descriptor is in a 449 * consistent state. 450 */ 451 static enum desc_state desc_read(struct prb_desc_ring *desc_ring, 452 unsigned long id, struct prb_desc *desc_out, 453 u64 *seq_out, u32 *caller_id_out) 454 { 455 struct printk_info *info = to_info(desc_ring, id); 456 struct prb_desc *desc = to_desc(desc_ring, id); 457 atomic_long_t *state_var = &desc->state_var; 458 enum desc_state d_state; 459 unsigned long state_val; 460 461 /* Check the descriptor state. */ 462 state_val = atomic_long_read(state_var); /* LMM(desc_read:A) */ 463 d_state = get_desc_state(id, state_val); 464 if (d_state == desc_miss || d_state == desc_reserved) { 465 /* 466 * The descriptor is in an inconsistent state. Set at least 467 * @state_var so that the caller can see the details of 468 * the inconsistent state. 469 */ 470 goto out; 471 } 472 473 /* 474 * Guarantee the state is loaded before copying the descriptor 475 * content. This avoids copying obsolete descriptor content that might 476 * not apply to the descriptor state. This pairs with _prb_commit:B. 477 * 478 * Memory barrier involvement: 479 * 480 * If desc_read:A reads from _prb_commit:B, then desc_read:C reads 481 * from _prb_commit:A. 482 * 483 * Relies on: 484 * 485 * WMB from _prb_commit:A to _prb_commit:B 486 * matching 487 * RMB from desc_read:A to desc_read:C 488 */ 489 smp_rmb(); /* LMM(desc_read:B) */ 490 491 /* 492 * Copy the descriptor data. The data is not valid until the 493 * state has been re-checked. A memcpy() for all of @desc 494 * cannot be used because of the atomic_t @state_var field. 495 */ 496 memcpy(&desc_out->text_blk_lpos, &desc->text_blk_lpos, 497 sizeof(desc_out->text_blk_lpos)); /* LMM(desc_read:C) */ 498 memcpy(&desc_out->dict_blk_lpos, &desc->dict_blk_lpos, 499 sizeof(desc_out->dict_blk_lpos)); /* also part of desc_read:C */ 500 if (seq_out) 501 *seq_out = info->seq; /* also part of desc_read:C */ 502 if (caller_id_out) 503 *caller_id_out = info->caller_id; /* also part of desc_read:C */ 504 505 /* 506 * 1. Guarantee the descriptor content is loaded before re-checking 507 * the state. This avoids reading an obsolete descriptor state 508 * that may not apply to the copied content. This pairs with 509 * desc_reserve:F. 510 * 511 * Memory barrier involvement: 512 * 513 * If desc_read:C reads from desc_reserve:G, then desc_read:E 514 * reads from desc_reserve:F. 515 * 516 * Relies on: 517 * 518 * WMB from desc_reserve:F to desc_reserve:G 519 * matching 520 * RMB from desc_read:C to desc_read:E 521 * 522 * 2. Guarantee the record data is loaded before re-checking the 523 * state. This avoids reading an obsolete descriptor state that may 524 * not apply to the copied data. This pairs with data_alloc:A and 525 * data_realloc:A. 526 * 527 * Memory barrier involvement: 528 * 529 * If copy_data:A reads from data_alloc:B, then desc_read:E 530 * reads from desc_make_reusable:A. 531 * 532 * Relies on: 533 * 534 * MB from desc_make_reusable:A to data_alloc:B 535 * matching 536 * RMB from desc_read:C to desc_read:E 537 * 538 * Note: desc_make_reusable:A and data_alloc:B can be different 539 * CPUs. However, the data_alloc:B CPU (which performs the 540 * full memory barrier) must have previously seen 541 * desc_make_reusable:A. 542 */ 543 smp_rmb(); /* LMM(desc_read:D) */ 544 545 /* 546 * The data has been copied. Return the current descriptor state, 547 * which may have changed since the load above. 548 */ 549 state_val = atomic_long_read(state_var); /* LMM(desc_read:E) */ 550 d_state = get_desc_state(id, state_val); 551 out: 552 atomic_long_set(&desc_out->state_var, state_val); 553 return d_state; 554 } 555 556 /* 557 * Take a specified descriptor out of the finalized state by attempting 558 * the transition from finalized to reusable. Either this context or some 559 * other context will have been successful. 560 */ 561 static void desc_make_reusable(struct prb_desc_ring *desc_ring, 562 unsigned long id) 563 { 564 unsigned long val_finalized = DESC_SV(id, desc_finalized); 565 unsigned long val_reusable = DESC_SV(id, desc_reusable); 566 struct prb_desc *desc = to_desc(desc_ring, id); 567 atomic_long_t *state_var = &desc->state_var; 568 569 atomic_long_cmpxchg_relaxed(state_var, val_finalized, 570 val_reusable); /* LMM(desc_make_reusable:A) */ 571 } 572 573 /* 574 * Given a data ring (text or dict), put the associated descriptor of each 575 * data block from @lpos_begin until @lpos_end into the reusable state. 576 * 577 * If there is any problem making the associated descriptor reusable, either 578 * the descriptor has not yet been finalized or another writer context has 579 * already pushed the tail lpos past the problematic data block. Regardless, 580 * on error the caller can re-load the tail lpos to determine the situation. 581 */ 582 static bool data_make_reusable(struct printk_ringbuffer *rb, 583 struct prb_data_ring *data_ring, 584 unsigned long lpos_begin, 585 unsigned long lpos_end, 586 unsigned long *lpos_out) 587 { 588 struct prb_desc_ring *desc_ring = &rb->desc_ring; 589 struct prb_data_blk_lpos *blk_lpos; 590 struct prb_data_block *blk; 591 enum desc_state d_state; 592 struct prb_desc desc; 593 unsigned long id; 594 595 /* 596 * Using the provided @data_ring, point @blk_lpos to the correct 597 * blk_lpos within the local copy of the descriptor. 598 */ 599 if (data_ring == &rb->text_data_ring) 600 blk_lpos = &desc.text_blk_lpos; 601 else 602 blk_lpos = &desc.dict_blk_lpos; 603 604 /* Loop until @lpos_begin has advanced to or beyond @lpos_end. */ 605 while ((lpos_end - lpos_begin) - 1 < DATA_SIZE(data_ring)) { 606 blk = to_block(data_ring, lpos_begin); 607 608 /* 609 * Load the block ID from the data block. This is a data race 610 * against a writer that may have newly reserved this data 611 * area. If the loaded value matches a valid descriptor ID, 612 * the blk_lpos of that descriptor will be checked to make 613 * sure it points back to this data block. If the check fails, 614 * the data area has been recycled by another writer. 615 */ 616 id = blk->id; /* LMM(data_make_reusable:A) */ 617 618 d_state = desc_read(desc_ring, id, &desc, 619 NULL, NULL); /* LMM(data_make_reusable:B) */ 620 621 switch (d_state) { 622 case desc_miss: 623 case desc_reserved: 624 case desc_committed: 625 return false; 626 case desc_finalized: 627 /* 628 * This data block is invalid if the descriptor 629 * does not point back to it. 630 */ 631 if (blk_lpos->begin != lpos_begin) 632 return false; 633 desc_make_reusable(desc_ring, id); 634 break; 635 case desc_reusable: 636 /* 637 * This data block is invalid if the descriptor 638 * does not point back to it. 639 */ 640 if (blk_lpos->begin != lpos_begin) 641 return false; 642 break; 643 } 644 645 /* Advance @lpos_begin to the next data block. */ 646 lpos_begin = blk_lpos->next; 647 } 648 649 *lpos_out = lpos_begin; 650 return true; 651 } 652 653 /* 654 * Advance the data ring tail to at least @lpos. This function puts 655 * descriptors into the reusable state if the tail is pushed beyond 656 * their associated data block. 657 */ 658 static bool data_push_tail(struct printk_ringbuffer *rb, 659 struct prb_data_ring *data_ring, 660 unsigned long lpos) 661 { 662 unsigned long tail_lpos_new; 663 unsigned long tail_lpos; 664 unsigned long next_lpos; 665 666 /* If @lpos is from a data-less block, there is nothing to do. */ 667 if (LPOS_DATALESS(lpos)) 668 return true; 669 670 /* 671 * Any descriptor states that have transitioned to reusable due to the 672 * data tail being pushed to this loaded value will be visible to this 673 * CPU. This pairs with data_push_tail:D. 674 * 675 * Memory barrier involvement: 676 * 677 * If data_push_tail:A reads from data_push_tail:D, then this CPU can 678 * see desc_make_reusable:A. 679 * 680 * Relies on: 681 * 682 * MB from desc_make_reusable:A to data_push_tail:D 683 * matches 684 * READFROM from data_push_tail:D to data_push_tail:A 685 * thus 686 * READFROM from desc_make_reusable:A to this CPU 687 */ 688 tail_lpos = atomic_long_read(&data_ring->tail_lpos); /* LMM(data_push_tail:A) */ 689 690 /* 691 * Loop until the tail lpos is at or beyond @lpos. This condition 692 * may already be satisfied, resulting in no full memory barrier 693 * from data_push_tail:D being performed. However, since this CPU 694 * sees the new tail lpos, any descriptor states that transitioned to 695 * the reusable state must already be visible. 696 */ 697 while ((lpos - tail_lpos) - 1 < DATA_SIZE(data_ring)) { 698 /* 699 * Make all descriptors reusable that are associated with 700 * data blocks before @lpos. 701 */ 702 if (!data_make_reusable(rb, data_ring, tail_lpos, lpos, 703 &next_lpos)) { 704 /* 705 * 1. Guarantee the block ID loaded in 706 * data_make_reusable() is performed before 707 * reloading the tail lpos. The failed 708 * data_make_reusable() may be due to a newly 709 * recycled data area causing the tail lpos to 710 * have been previously pushed. This pairs with 711 * data_alloc:A and data_realloc:A. 712 * 713 * Memory barrier involvement: 714 * 715 * If data_make_reusable:A reads from data_alloc:B, 716 * then data_push_tail:C reads from 717 * data_push_tail:D. 718 * 719 * Relies on: 720 * 721 * MB from data_push_tail:D to data_alloc:B 722 * matching 723 * RMB from data_make_reusable:A to 724 * data_push_tail:C 725 * 726 * Note: data_push_tail:D and data_alloc:B can be 727 * different CPUs. However, the data_alloc:B 728 * CPU (which performs the full memory 729 * barrier) must have previously seen 730 * data_push_tail:D. 731 * 732 * 2. Guarantee the descriptor state loaded in 733 * data_make_reusable() is performed before 734 * reloading the tail lpos. The failed 735 * data_make_reusable() may be due to a newly 736 * recycled descriptor causing the tail lpos to 737 * have been previously pushed. This pairs with 738 * desc_reserve:D. 739 * 740 * Memory barrier involvement: 741 * 742 * If data_make_reusable:B reads from 743 * desc_reserve:F, then data_push_tail:C reads 744 * from data_push_tail:D. 745 * 746 * Relies on: 747 * 748 * MB from data_push_tail:D to desc_reserve:F 749 * matching 750 * RMB from data_make_reusable:B to 751 * data_push_tail:C 752 * 753 * Note: data_push_tail:D and desc_reserve:F can 754 * be different CPUs. However, the 755 * desc_reserve:F CPU (which performs the 756 * full memory barrier) must have previously 757 * seen data_push_tail:D. 758 */ 759 smp_rmb(); /* LMM(data_push_tail:B) */ 760 761 tail_lpos_new = atomic_long_read(&data_ring->tail_lpos 762 ); /* LMM(data_push_tail:C) */ 763 if (tail_lpos_new == tail_lpos) 764 return false; 765 766 /* Another CPU pushed the tail. Try again. */ 767 tail_lpos = tail_lpos_new; 768 continue; 769 } 770 771 /* 772 * Guarantee any descriptor states that have transitioned to 773 * reusable are stored before pushing the tail lpos. A full 774 * memory barrier is needed since other CPUs may have made 775 * the descriptor states reusable. This pairs with 776 * data_push_tail:A. 777 */ 778 if (atomic_long_try_cmpxchg(&data_ring->tail_lpos, &tail_lpos, 779 next_lpos)) { /* LMM(data_push_tail:D) */ 780 break; 781 } 782 } 783 784 return true; 785 } 786 787 /* 788 * Advance the desc ring tail. This function advances the tail by one 789 * descriptor, thus invalidating the oldest descriptor. Before advancing 790 * the tail, the tail descriptor is made reusable and all data blocks up to 791 * and including the descriptor's data block are invalidated (i.e. the data 792 * ring tail is pushed past the data block of the descriptor being made 793 * reusable). 794 */ 795 static bool desc_push_tail(struct printk_ringbuffer *rb, 796 unsigned long tail_id) 797 { 798 struct prb_desc_ring *desc_ring = &rb->desc_ring; 799 enum desc_state d_state; 800 struct prb_desc desc; 801 802 d_state = desc_read(desc_ring, tail_id, &desc, NULL, NULL); 803 804 switch (d_state) { 805 case desc_miss: 806 /* 807 * If the ID is exactly 1 wrap behind the expected, it is 808 * in the process of being reserved by another writer and 809 * must be considered reserved. 810 */ 811 if (DESC_ID(atomic_long_read(&desc.state_var)) == 812 DESC_ID_PREV_WRAP(desc_ring, tail_id)) { 813 return false; 814 } 815 816 /* 817 * The ID has changed. Another writer must have pushed the 818 * tail and recycled the descriptor already. Success is 819 * returned because the caller is only interested in the 820 * specified tail being pushed, which it was. 821 */ 822 return true; 823 case desc_reserved: 824 case desc_committed: 825 return false; 826 case desc_finalized: 827 desc_make_reusable(desc_ring, tail_id); 828 break; 829 case desc_reusable: 830 break; 831 } 832 833 /* 834 * Data blocks must be invalidated before their associated 835 * descriptor can be made available for recycling. Invalidating 836 * them later is not possible because there is no way to trust 837 * data blocks once their associated descriptor is gone. 838 */ 839 840 if (!data_push_tail(rb, &rb->text_data_ring, desc.text_blk_lpos.next)) 841 return false; 842 if (!data_push_tail(rb, &rb->dict_data_ring, desc.dict_blk_lpos.next)) 843 return false; 844 845 /* 846 * Check the next descriptor after @tail_id before pushing the tail 847 * to it because the tail must always be in a finalized or reusable 848 * state. The implementation of prb_first_seq() relies on this. 849 * 850 * A successful read implies that the next descriptor is less than or 851 * equal to @head_id so there is no risk of pushing the tail past the 852 * head. 853 */ 854 d_state = desc_read(desc_ring, DESC_ID(tail_id + 1), &desc, 855 NULL, NULL); /* LMM(desc_push_tail:A) */ 856 857 if (d_state == desc_finalized || d_state == desc_reusable) { 858 /* 859 * Guarantee any descriptor states that have transitioned to 860 * reusable are stored before pushing the tail ID. This allows 861 * verifying the recycled descriptor state. A full memory 862 * barrier is needed since other CPUs may have made the 863 * descriptor states reusable. This pairs with desc_reserve:D. 864 */ 865 atomic_long_cmpxchg(&desc_ring->tail_id, tail_id, 866 DESC_ID(tail_id + 1)); /* LMM(desc_push_tail:B) */ 867 } else { 868 /* 869 * Guarantee the last state load from desc_read() is before 870 * reloading @tail_id in order to see a new tail ID in the 871 * case that the descriptor has been recycled. This pairs 872 * with desc_reserve:D. 873 * 874 * Memory barrier involvement: 875 * 876 * If desc_push_tail:A reads from desc_reserve:F, then 877 * desc_push_tail:D reads from desc_push_tail:B. 878 * 879 * Relies on: 880 * 881 * MB from desc_push_tail:B to desc_reserve:F 882 * matching 883 * RMB from desc_push_tail:A to desc_push_tail:D 884 * 885 * Note: desc_push_tail:B and desc_reserve:F can be different 886 * CPUs. However, the desc_reserve:F CPU (which performs 887 * the full memory barrier) must have previously seen 888 * desc_push_tail:B. 889 */ 890 smp_rmb(); /* LMM(desc_push_tail:C) */ 891 892 /* 893 * Re-check the tail ID. The descriptor following @tail_id is 894 * not in an allowed tail state. But if the tail has since 895 * been moved by another CPU, then it does not matter. 896 */ 897 if (atomic_long_read(&desc_ring->tail_id) == tail_id) /* LMM(desc_push_tail:D) */ 898 return false; 899 } 900 901 return true; 902 } 903 904 /* Reserve a new descriptor, invalidating the oldest if necessary. */ 905 static bool desc_reserve(struct printk_ringbuffer *rb, unsigned long *id_out) 906 { 907 struct prb_desc_ring *desc_ring = &rb->desc_ring; 908 unsigned long prev_state_val; 909 unsigned long id_prev_wrap; 910 struct prb_desc *desc; 911 unsigned long head_id; 912 unsigned long id; 913 914 head_id = atomic_long_read(&desc_ring->head_id); /* LMM(desc_reserve:A) */ 915 916 do { 917 desc = to_desc(desc_ring, head_id); 918 919 id = DESC_ID(head_id + 1); 920 id_prev_wrap = DESC_ID_PREV_WRAP(desc_ring, id); 921 922 /* 923 * Guarantee the head ID is read before reading the tail ID. 924 * Since the tail ID is updated before the head ID, this 925 * guarantees that @id_prev_wrap is never ahead of the tail 926 * ID. This pairs with desc_reserve:D. 927 * 928 * Memory barrier involvement: 929 * 930 * If desc_reserve:A reads from desc_reserve:D, then 931 * desc_reserve:C reads from desc_push_tail:B. 932 * 933 * Relies on: 934 * 935 * MB from desc_push_tail:B to desc_reserve:D 936 * matching 937 * RMB from desc_reserve:A to desc_reserve:C 938 * 939 * Note: desc_push_tail:B and desc_reserve:D can be different 940 * CPUs. However, the desc_reserve:D CPU (which performs 941 * the full memory barrier) must have previously seen 942 * desc_push_tail:B. 943 */ 944 smp_rmb(); /* LMM(desc_reserve:B) */ 945 946 if (id_prev_wrap == atomic_long_read(&desc_ring->tail_id 947 )) { /* LMM(desc_reserve:C) */ 948 /* 949 * Make space for the new descriptor by 950 * advancing the tail. 951 */ 952 if (!desc_push_tail(rb, id_prev_wrap)) 953 return false; 954 } 955 956 /* 957 * 1. Guarantee the tail ID is read before validating the 958 * recycled descriptor state. A read memory barrier is 959 * sufficient for this. This pairs with desc_push_tail:B. 960 * 961 * Memory barrier involvement: 962 * 963 * If desc_reserve:C reads from desc_push_tail:B, then 964 * desc_reserve:E reads from desc_make_reusable:A. 965 * 966 * Relies on: 967 * 968 * MB from desc_make_reusable:A to desc_push_tail:B 969 * matching 970 * RMB from desc_reserve:C to desc_reserve:E 971 * 972 * Note: desc_make_reusable:A and desc_push_tail:B can be 973 * different CPUs. However, the desc_push_tail:B CPU 974 * (which performs the full memory barrier) must have 975 * previously seen desc_make_reusable:A. 976 * 977 * 2. Guarantee the tail ID is stored before storing the head 978 * ID. This pairs with desc_reserve:B. 979 * 980 * 3. Guarantee any data ring tail changes are stored before 981 * recycling the descriptor. Data ring tail changes can 982 * happen via desc_push_tail()->data_push_tail(). A full 983 * memory barrier is needed since another CPU may have 984 * pushed the data ring tails. This pairs with 985 * data_push_tail:B. 986 * 987 * 4. Guarantee a new tail ID is stored before recycling the 988 * descriptor. A full memory barrier is needed since 989 * another CPU may have pushed the tail ID. This pairs 990 * with desc_push_tail:C and this also pairs with 991 * prb_first_seq:C. 992 * 993 * 5. Guarantee the head ID is stored before trying to 994 * finalize the previous descriptor. This pairs with 995 * _prb_commit:B. 996 */ 997 } while (!atomic_long_try_cmpxchg(&desc_ring->head_id, &head_id, 998 id)); /* LMM(desc_reserve:D) */ 999 1000 desc = to_desc(desc_ring, id); 1001 1002 /* 1003 * If the descriptor has been recycled, verify the old state val. 1004 * See "ABA Issues" about why this verification is performed. 1005 */ 1006 prev_state_val = atomic_long_read(&desc->state_var); /* LMM(desc_reserve:E) */ 1007 if (prev_state_val && 1008 get_desc_state(id_prev_wrap, prev_state_val) != desc_reusable) { 1009 WARN_ON_ONCE(1); 1010 return false; 1011 } 1012 1013 /* 1014 * Assign the descriptor a new ID and set its state to reserved. 1015 * See "ABA Issues" about why cmpxchg() instead of set() is used. 1016 * 1017 * Guarantee the new descriptor ID and state is stored before making 1018 * any other changes. A write memory barrier is sufficient for this. 1019 * This pairs with desc_read:D. 1020 */ 1021 if (!atomic_long_try_cmpxchg(&desc->state_var, &prev_state_val, 1022 DESC_SV(id, desc_reserved))) { /* LMM(desc_reserve:F) */ 1023 WARN_ON_ONCE(1); 1024 return false; 1025 } 1026 1027 /* Now data in @desc can be modified: LMM(desc_reserve:G) */ 1028 1029 *id_out = id; 1030 return true; 1031 } 1032 1033 /* Determine the end of a data block. */ 1034 static unsigned long get_next_lpos(struct prb_data_ring *data_ring, 1035 unsigned long lpos, unsigned int size) 1036 { 1037 unsigned long begin_lpos; 1038 unsigned long next_lpos; 1039 1040 begin_lpos = lpos; 1041 next_lpos = lpos + size; 1042 1043 /* First check if the data block does not wrap. */ 1044 if (DATA_WRAPS(data_ring, begin_lpos) == DATA_WRAPS(data_ring, next_lpos)) 1045 return next_lpos; 1046 1047 /* Wrapping data blocks store their data at the beginning. */ 1048 return (DATA_THIS_WRAP_START_LPOS(data_ring, next_lpos) + size); 1049 } 1050 1051 /* 1052 * Allocate a new data block, invalidating the oldest data block(s) 1053 * if necessary. This function also associates the data block with 1054 * a specified descriptor. 1055 */ 1056 static char *data_alloc(struct printk_ringbuffer *rb, 1057 struct prb_data_ring *data_ring, unsigned int size, 1058 struct prb_data_blk_lpos *blk_lpos, unsigned long id) 1059 { 1060 struct prb_data_block *blk; 1061 unsigned long begin_lpos; 1062 unsigned long next_lpos; 1063 1064 if (size == 0) { 1065 /* Specify a data-less block. */ 1066 blk_lpos->begin = NO_LPOS; 1067 blk_lpos->next = NO_LPOS; 1068 return NULL; 1069 } 1070 1071 size = to_blk_size(size); 1072 1073 begin_lpos = atomic_long_read(&data_ring->head_lpos); 1074 1075 do { 1076 next_lpos = get_next_lpos(data_ring, begin_lpos, size); 1077 1078 if (!data_push_tail(rb, data_ring, next_lpos - DATA_SIZE(data_ring))) { 1079 /* Failed to allocate, specify a data-less block. */ 1080 blk_lpos->begin = FAILED_LPOS; 1081 blk_lpos->next = FAILED_LPOS; 1082 return NULL; 1083 } 1084 1085 /* 1086 * 1. Guarantee any descriptor states that have transitioned 1087 * to reusable are stored before modifying the newly 1088 * allocated data area. A full memory barrier is needed 1089 * since other CPUs may have made the descriptor states 1090 * reusable. See data_push_tail:A about why the reusable 1091 * states are visible. This pairs with desc_read:D. 1092 * 1093 * 2. Guarantee any updated tail lpos is stored before 1094 * modifying the newly allocated data area. Another CPU may 1095 * be in data_make_reusable() and is reading a block ID 1096 * from this area. data_make_reusable() can handle reading 1097 * a garbage block ID value, but then it must be able to 1098 * load a new tail lpos. A full memory barrier is needed 1099 * since other CPUs may have updated the tail lpos. This 1100 * pairs with data_push_tail:B. 1101 */ 1102 } while (!atomic_long_try_cmpxchg(&data_ring->head_lpos, &begin_lpos, 1103 next_lpos)); /* LMM(data_alloc:A) */ 1104 1105 blk = to_block(data_ring, begin_lpos); 1106 blk->id = id; /* LMM(data_alloc:B) */ 1107 1108 if (DATA_WRAPS(data_ring, begin_lpos) != DATA_WRAPS(data_ring, next_lpos)) { 1109 /* Wrapping data blocks store their data at the beginning. */ 1110 blk = to_block(data_ring, 0); 1111 1112 /* 1113 * Store the ID on the wrapped block for consistency. 1114 * The printk_ringbuffer does not actually use it. 1115 */ 1116 blk->id = id; 1117 } 1118 1119 blk_lpos->begin = begin_lpos; 1120 blk_lpos->next = next_lpos; 1121 1122 return &blk->data[0]; 1123 } 1124 1125 /* 1126 * Try to resize an existing data block associated with the descriptor 1127 * specified by @id. If the resized data block should become wrapped, it 1128 * copies the old data to the new data block. If @size yields a data block 1129 * with the same or less size, the data block is left as is. 1130 * 1131 * Fail if this is not the last allocated data block or if there is not 1132 * enough space or it is not possible make enough space. 1133 * 1134 * Return a pointer to the beginning of the entire data buffer or NULL on 1135 * failure. 1136 */ 1137 static char *data_realloc(struct printk_ringbuffer *rb, 1138 struct prb_data_ring *data_ring, unsigned int size, 1139 struct prb_data_blk_lpos *blk_lpos, unsigned long id) 1140 { 1141 struct prb_data_block *blk; 1142 unsigned long head_lpos; 1143 unsigned long next_lpos; 1144 bool wrapped; 1145 1146 /* Reallocation only works if @blk_lpos is the newest data block. */ 1147 head_lpos = atomic_long_read(&data_ring->head_lpos); 1148 if (head_lpos != blk_lpos->next) 1149 return NULL; 1150 1151 /* Keep track if @blk_lpos was a wrapping data block. */ 1152 wrapped = (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, blk_lpos->next)); 1153 1154 size = to_blk_size(size); 1155 1156 next_lpos = get_next_lpos(data_ring, blk_lpos->begin, size); 1157 1158 /* If the data block does not increase, there is nothing to do. */ 1159 if (head_lpos - next_lpos < DATA_SIZE(data_ring)) { 1160 blk = to_block(data_ring, blk_lpos->begin); 1161 return &blk->data[0]; 1162 } 1163 1164 if (!data_push_tail(rb, data_ring, next_lpos - DATA_SIZE(data_ring))) 1165 return NULL; 1166 1167 /* The memory barrier involvement is the same as data_alloc:A. */ 1168 if (!atomic_long_try_cmpxchg(&data_ring->head_lpos, &head_lpos, 1169 next_lpos)) { /* LMM(data_realloc:A) */ 1170 return NULL; 1171 } 1172 1173 blk = to_block(data_ring, blk_lpos->begin); 1174 1175 if (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, next_lpos)) { 1176 struct prb_data_block *old_blk = blk; 1177 1178 /* Wrapping data blocks store their data at the beginning. */ 1179 blk = to_block(data_ring, 0); 1180 1181 /* 1182 * Store the ID on the wrapped block for consistency. 1183 * The printk_ringbuffer does not actually use it. 1184 */ 1185 blk->id = id; 1186 1187 if (!wrapped) { 1188 /* 1189 * Since the allocated space is now in the newly 1190 * created wrapping data block, copy the content 1191 * from the old data block. 1192 */ 1193 memcpy(&blk->data[0], &old_blk->data[0], 1194 (blk_lpos->next - blk_lpos->begin) - sizeof(blk->id)); 1195 } 1196 } 1197 1198 blk_lpos->next = next_lpos; 1199 1200 return &blk->data[0]; 1201 } 1202 1203 /* Return the number of bytes used by a data block. */ 1204 static unsigned int space_used(struct prb_data_ring *data_ring, 1205 struct prb_data_blk_lpos *blk_lpos) 1206 { 1207 /* Data-less blocks take no space. */ 1208 if (BLK_DATALESS(blk_lpos)) 1209 return 0; 1210 1211 if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next)) { 1212 /* Data block does not wrap. */ 1213 return (DATA_INDEX(data_ring, blk_lpos->next) - 1214 DATA_INDEX(data_ring, blk_lpos->begin)); 1215 } 1216 1217 /* 1218 * For wrapping data blocks, the trailing (wasted) space is 1219 * also counted. 1220 */ 1221 return (DATA_INDEX(data_ring, blk_lpos->next) + 1222 DATA_SIZE(data_ring) - DATA_INDEX(data_ring, blk_lpos->begin)); 1223 } 1224 1225 /* 1226 * Given @blk_lpos, return a pointer to the writer data from the data block 1227 * and calculate the size of the data part. A NULL pointer is returned if 1228 * @blk_lpos specifies values that could never be legal. 1229 * 1230 * This function (used by readers) performs strict validation on the lpos 1231 * values to possibly detect bugs in the writer code. A WARN_ON_ONCE() is 1232 * triggered if an internal error is detected. 1233 */ 1234 static const char *get_data(struct prb_data_ring *data_ring, 1235 struct prb_data_blk_lpos *blk_lpos, 1236 unsigned int *data_size) 1237 { 1238 struct prb_data_block *db; 1239 1240 /* Data-less data block description. */ 1241 if (BLK_DATALESS(blk_lpos)) { 1242 if (blk_lpos->begin == NO_LPOS && blk_lpos->next == NO_LPOS) { 1243 *data_size = 0; 1244 return ""; 1245 } 1246 return NULL; 1247 } 1248 1249 /* Regular data block: @begin less than @next and in same wrap. */ 1250 if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next) && 1251 blk_lpos->begin < blk_lpos->next) { 1252 db = to_block(data_ring, blk_lpos->begin); 1253 *data_size = blk_lpos->next - blk_lpos->begin; 1254 1255 /* Wrapping data block: @begin is one wrap behind @next. */ 1256 } else if (DATA_WRAPS(data_ring, blk_lpos->begin + DATA_SIZE(data_ring)) == 1257 DATA_WRAPS(data_ring, blk_lpos->next)) { 1258 db = to_block(data_ring, 0); 1259 *data_size = DATA_INDEX(data_ring, blk_lpos->next); 1260 1261 /* Illegal block description. */ 1262 } else { 1263 WARN_ON_ONCE(1); 1264 return NULL; 1265 } 1266 1267 /* A valid data block will always be aligned to the ID size. */ 1268 if (WARN_ON_ONCE(blk_lpos->begin != ALIGN(blk_lpos->begin, sizeof(db->id))) || 1269 WARN_ON_ONCE(blk_lpos->next != ALIGN(blk_lpos->next, sizeof(db->id)))) { 1270 return NULL; 1271 } 1272 1273 /* A valid data block will always have at least an ID. */ 1274 if (WARN_ON_ONCE(*data_size < sizeof(db->id))) 1275 return NULL; 1276 1277 /* Subtract block ID space from size to reflect data size. */ 1278 *data_size -= sizeof(db->id); 1279 1280 return &db->data[0]; 1281 } 1282 1283 /* 1284 * Attempt to transition the newest descriptor from committed back to reserved 1285 * so that the record can be modified by a writer again. This is only possible 1286 * if the descriptor is not yet finalized and the provided @caller_id matches. 1287 */ 1288 static struct prb_desc *desc_reopen_last(struct prb_desc_ring *desc_ring, 1289 u32 caller_id, unsigned long *id_out) 1290 { 1291 unsigned long prev_state_val; 1292 enum desc_state d_state; 1293 struct prb_desc desc; 1294 struct prb_desc *d; 1295 unsigned long id; 1296 u32 cid; 1297 1298 id = atomic_long_read(&desc_ring->head_id); 1299 1300 /* 1301 * To reduce unnecessarily reopening, first check if the descriptor 1302 * state and caller ID are correct. 1303 */ 1304 d_state = desc_read(desc_ring, id, &desc, NULL, &cid); 1305 if (d_state != desc_committed || cid != caller_id) 1306 return NULL; 1307 1308 d = to_desc(desc_ring, id); 1309 1310 prev_state_val = DESC_SV(id, desc_committed); 1311 1312 /* 1313 * Guarantee the reserved state is stored before reading any 1314 * record data. A full memory barrier is needed because @state_var 1315 * modification is followed by reading. This pairs with _prb_commit:B. 1316 * 1317 * Memory barrier involvement: 1318 * 1319 * If desc_reopen_last:A reads from _prb_commit:B, then 1320 * prb_reserve_in_last:A reads from _prb_commit:A. 1321 * 1322 * Relies on: 1323 * 1324 * WMB from _prb_commit:A to _prb_commit:B 1325 * matching 1326 * MB If desc_reopen_last:A to prb_reserve_in_last:A 1327 */ 1328 if (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val, 1329 DESC_SV(id, desc_reserved))) { /* LMM(desc_reopen_last:A) */ 1330 return NULL; 1331 } 1332 1333 *id_out = id; 1334 return d; 1335 } 1336 1337 /** 1338 * prb_reserve_in_last() - Re-reserve and extend the space in the ringbuffer 1339 * used by the newest record. 1340 * 1341 * @e: The entry structure to setup. 1342 * @rb: The ringbuffer to re-reserve and extend data in. 1343 * @r: The record structure to allocate buffers for. 1344 * @caller_id: The caller ID of the caller (reserving writer). 1345 * 1346 * This is the public function available to writers to re-reserve and extend 1347 * data. 1348 * 1349 * The writer specifies the text size to extend (not the new total size) by 1350 * setting the @text_buf_size field of @r. Extending dictionaries is not 1351 * supported, so @dict_buf_size of @r should be set to 0. To ensure proper 1352 * initialization of @r, prb_rec_init_wr() should be used. 1353 * 1354 * This function will fail if @caller_id does not match the caller ID of the 1355 * newest record. In that case the caller must reserve new data using 1356 * prb_reserve(). 1357 * 1358 * Context: Any context. Disables local interrupts on success. 1359 * Return: true if text data could be extended, otherwise false. 1360 * 1361 * On success: 1362 * 1363 * - @r->text_buf points to the beginning of the entire text buffer. 1364 * 1365 * - @r->text_buf_size is set to the new total size of the buffer. 1366 * 1367 * - @r->dict_buf and @r->dict_buf_size are cleared because extending 1368 * the dict buffer is not supported. 1369 * 1370 * - @r->info is not touched so that @r->info->text_len could be used 1371 * to append the text. 1372 * 1373 * - prb_record_text_space() can be used on @e to query the new 1374 * actually used space. 1375 * 1376 * Important: All @r->info fields will already be set with the current values 1377 * for the record. I.e. @r->info->text_len will be less than 1378 * @text_buf_size and @r->info->dict_len may be set, even though 1379 * @dict_buf_size is 0. Writers can use @r->info->text_len to know 1380 * where concatenation begins and writers should update 1381 * @r->info->text_len after concatenating. 1382 */ 1383 bool prb_reserve_in_last(struct prb_reserved_entry *e, struct printk_ringbuffer *rb, 1384 struct printk_record *r, u32 caller_id) 1385 { 1386 struct prb_desc_ring *desc_ring = &rb->desc_ring; 1387 struct printk_info *info; 1388 unsigned int data_size; 1389 struct prb_desc *d; 1390 unsigned long id; 1391 1392 local_irq_save(e->irqflags); 1393 1394 /* Transition the newest descriptor back to the reserved state. */ 1395 d = desc_reopen_last(desc_ring, caller_id, &id); 1396 if (!d) { 1397 local_irq_restore(e->irqflags); 1398 goto fail_reopen; 1399 } 1400 1401 /* Now the writer has exclusive access: LMM(prb_reserve_in_last:A) */ 1402 1403 info = to_info(desc_ring, id); 1404 1405 /* 1406 * Set the @e fields here so that prb_commit() can be used if 1407 * anything fails from now on. 1408 */ 1409 e->rb = rb; 1410 e->id = id; 1411 1412 /* 1413 * desc_reopen_last() checked the caller_id, but there was no 1414 * exclusive access at that point. The descriptor may have 1415 * changed since then. 1416 */ 1417 if (caller_id != info->caller_id) 1418 goto fail; 1419 1420 if (BLK_DATALESS(&d->text_blk_lpos)) { 1421 if (WARN_ON_ONCE(info->text_len != 0)) { 1422 pr_warn_once("wrong text_len value (%hu, expecting 0)\n", 1423 info->text_len); 1424 info->text_len = 0; 1425 } 1426 1427 if (!data_check_size(&rb->text_data_ring, r->text_buf_size)) 1428 goto fail; 1429 1430 r->text_buf = data_alloc(rb, &rb->text_data_ring, r->text_buf_size, 1431 &d->text_blk_lpos, id); 1432 } else { 1433 if (!get_data(&rb->text_data_ring, &d->text_blk_lpos, &data_size)) 1434 goto fail; 1435 1436 /* 1437 * Increase the buffer size to include the original size. If 1438 * the meta data (@text_len) is not sane, use the full data 1439 * block size. 1440 */ 1441 if (WARN_ON_ONCE(info->text_len > data_size)) { 1442 pr_warn_once("wrong text_len value (%hu, expecting <=%u)\n", 1443 info->text_len, data_size); 1444 info->text_len = data_size; 1445 } 1446 r->text_buf_size += info->text_len; 1447 1448 if (!data_check_size(&rb->text_data_ring, r->text_buf_size)) 1449 goto fail; 1450 1451 r->text_buf = data_realloc(rb, &rb->text_data_ring, r->text_buf_size, 1452 &d->text_blk_lpos, id); 1453 } 1454 if (r->text_buf_size && !r->text_buf) 1455 goto fail; 1456 1457 /* Although dictionary data may be in use, it cannot be extended. */ 1458 r->dict_buf = NULL; 1459 r->dict_buf_size = 0; 1460 1461 r->info = info; 1462 1463 e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos); 1464 1465 return true; 1466 fail: 1467 prb_commit(e); 1468 /* prb_commit() re-enabled interrupts. */ 1469 fail_reopen: 1470 /* Make it clear to the caller that the re-reserve failed. */ 1471 memset(r, 0, sizeof(*r)); 1472 return false; 1473 } 1474 1475 /* 1476 * Attempt to finalize a specified descriptor. If this fails, the descriptor 1477 * is either already final or it will finalize itself when the writer commits. 1478 */ 1479 static void desc_make_final(struct prb_desc_ring *desc_ring, unsigned long id) 1480 { 1481 unsigned long prev_state_val = DESC_SV(id, desc_committed); 1482 struct prb_desc *d = to_desc(desc_ring, id); 1483 1484 atomic_long_cmpxchg_relaxed(&d->state_var, prev_state_val, 1485 DESC_SV(id, desc_finalized)); /* LMM(desc_make_final:A) */ 1486 } 1487 1488 /** 1489 * prb_reserve() - Reserve space in the ringbuffer. 1490 * 1491 * @e: The entry structure to setup. 1492 * @rb: The ringbuffer to reserve data in. 1493 * @r: The record structure to allocate buffers for. 1494 * 1495 * This is the public function available to writers to reserve data. 1496 * 1497 * The writer specifies the text and dict sizes to reserve by setting the 1498 * @text_buf_size and @dict_buf_size fields of @r, respectively. Dictionaries 1499 * are optional, so @dict_buf_size is allowed to be 0. To ensure proper 1500 * initialization of @r, prb_rec_init_wr() should be used. 1501 * 1502 * Context: Any context. Disables local interrupts on success. 1503 * Return: true if at least text data could be allocated, otherwise false. 1504 * 1505 * On success, the fields @info, @text_buf, @dict_buf of @r will be set by 1506 * this function and should be filled in by the writer before committing. Also 1507 * on success, prb_record_text_space() can be used on @e to query the actual 1508 * space used for the text data block. 1509 * 1510 * If the function fails to reserve dictionary space (but all else succeeded), 1511 * it will still report success. In that case @dict_buf is set to NULL and 1512 * @dict_buf_size is set to 0. Writers must check this before writing to 1513 * dictionary space. 1514 * 1515 * Important: @info->text_len and @info->dict_len need to be set correctly by 1516 * the writer in order for data to be readable and/or extended. 1517 * Their values are initialized to 0. 1518 */ 1519 bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb, 1520 struct printk_record *r) 1521 { 1522 struct prb_desc_ring *desc_ring = &rb->desc_ring; 1523 struct printk_info *info; 1524 struct prb_desc *d; 1525 unsigned long id; 1526 u64 seq; 1527 1528 if (!data_check_size(&rb->text_data_ring, r->text_buf_size)) 1529 goto fail; 1530 1531 if (!data_check_size(&rb->dict_data_ring, r->dict_buf_size)) 1532 goto fail; 1533 1534 /* 1535 * Descriptors in the reserved state act as blockers to all further 1536 * reservations once the desc_ring has fully wrapped. Disable 1537 * interrupts during the reserve/commit window in order to minimize 1538 * the likelihood of this happening. 1539 */ 1540 local_irq_save(e->irqflags); 1541 1542 if (!desc_reserve(rb, &id)) { 1543 /* Descriptor reservation failures are tracked. */ 1544 atomic_long_inc(&rb->fail); 1545 local_irq_restore(e->irqflags); 1546 goto fail; 1547 } 1548 1549 d = to_desc(desc_ring, id); 1550 info = to_info(desc_ring, id); 1551 1552 /* 1553 * All @info fields (except @seq) are cleared and must be filled in 1554 * by the writer. Save @seq before clearing because it is used to 1555 * determine the new sequence number. 1556 */ 1557 seq = info->seq; 1558 memset(info, 0, sizeof(*info)); 1559 1560 /* 1561 * Set the @e fields here so that prb_commit() can be used if 1562 * text data allocation fails. 1563 */ 1564 e->rb = rb; 1565 e->id = id; 1566 1567 /* 1568 * Initialize the sequence number if it has "never been set". 1569 * Otherwise just increment it by a full wrap. 1570 * 1571 * @seq is considered "never been set" if it has a value of 0, 1572 * _except_ for @infos[0], which was specially setup by the ringbuffer 1573 * initializer and therefore is always considered as set. 1574 * 1575 * See the "Bootstrap" comment block in printk_ringbuffer.h for 1576 * details about how the initializer bootstraps the descriptors. 1577 */ 1578 if (seq == 0 && DESC_INDEX(desc_ring, id) != 0) 1579 info->seq = DESC_INDEX(desc_ring, id); 1580 else 1581 info->seq = seq + DESCS_COUNT(desc_ring); 1582 1583 /* 1584 * New data is about to be reserved. Once that happens, previous 1585 * descriptors are no longer able to be extended. Finalize the 1586 * previous descriptor now so that it can be made available to 1587 * readers. (For seq==0 there is no previous descriptor.) 1588 */ 1589 if (info->seq > 0) 1590 desc_make_final(desc_ring, DESC_ID(id - 1)); 1591 1592 r->text_buf = data_alloc(rb, &rb->text_data_ring, r->text_buf_size, 1593 &d->text_blk_lpos, id); 1594 /* If text data allocation fails, a data-less record is committed. */ 1595 if (r->text_buf_size && !r->text_buf) { 1596 prb_commit(e); 1597 /* prb_commit() re-enabled interrupts. */ 1598 goto fail; 1599 } 1600 1601 r->dict_buf = data_alloc(rb, &rb->dict_data_ring, r->dict_buf_size, 1602 &d->dict_blk_lpos, id); 1603 /* 1604 * If dict data allocation fails, the caller can still commit 1605 * text. But dictionary information will not be available. 1606 */ 1607 if (r->dict_buf_size && !r->dict_buf) 1608 r->dict_buf_size = 0; 1609 1610 r->info = info; 1611 1612 /* Record full text space used by record. */ 1613 e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos); 1614 1615 return true; 1616 fail: 1617 /* Make it clear to the caller that the reserve failed. */ 1618 memset(r, 0, sizeof(*r)); 1619 return false; 1620 } 1621 1622 /* Commit the data (possibly finalizing it) and restore interrupts. */ 1623 static void _prb_commit(struct prb_reserved_entry *e, unsigned long state_val) 1624 { 1625 struct prb_desc_ring *desc_ring = &e->rb->desc_ring; 1626 struct prb_desc *d = to_desc(desc_ring, e->id); 1627 unsigned long prev_state_val = DESC_SV(e->id, desc_reserved); 1628 1629 /* Now the writer has finished all writing: LMM(_prb_commit:A) */ 1630 1631 /* 1632 * Set the descriptor as committed. See "ABA Issues" about why 1633 * cmpxchg() instead of set() is used. 1634 * 1635 * 1 Guarantee all record data is stored before the descriptor state 1636 * is stored as committed. A write memory barrier is sufficient 1637 * for this. This pairs with desc_read:B and desc_reopen_last:A. 1638 * 1639 * 2. Guarantee the descriptor state is stored as committed before 1640 * re-checking the head ID in order to possibly finalize this 1641 * descriptor. This pairs with desc_reserve:D. 1642 * 1643 * Memory barrier involvement: 1644 * 1645 * If prb_commit:A reads from desc_reserve:D, then 1646 * desc_make_final:A reads from _prb_commit:B. 1647 * 1648 * Relies on: 1649 * 1650 * MB _prb_commit:B to prb_commit:A 1651 * matching 1652 * MB desc_reserve:D to desc_make_final:A 1653 */ 1654 if (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val, 1655 DESC_SV(e->id, state_val))) { /* LMM(_prb_commit:B) */ 1656 WARN_ON_ONCE(1); 1657 } 1658 1659 /* Restore interrupts, the reserve/commit window is finished. */ 1660 local_irq_restore(e->irqflags); 1661 } 1662 1663 /** 1664 * prb_commit() - Commit (previously reserved) data to the ringbuffer. 1665 * 1666 * @e: The entry containing the reserved data information. 1667 * 1668 * This is the public function available to writers to commit data. 1669 * 1670 * Note that the data is not yet available to readers until it is finalized. 1671 * Finalizing happens automatically when space for the next record is 1672 * reserved. 1673 * 1674 * See prb_final_commit() for a version of this function that finalizes 1675 * immediately. 1676 * 1677 * Context: Any context. Enables local interrupts. 1678 */ 1679 void prb_commit(struct prb_reserved_entry *e) 1680 { 1681 struct prb_desc_ring *desc_ring = &e->rb->desc_ring; 1682 unsigned long head_id; 1683 1684 _prb_commit(e, desc_committed); 1685 1686 /* 1687 * If this descriptor is no longer the head (i.e. a new record has 1688 * been allocated), extending the data for this record is no longer 1689 * allowed and therefore it must be finalized. 1690 */ 1691 head_id = atomic_long_read(&desc_ring->head_id); /* LMM(prb_commit:A) */ 1692 if (head_id != e->id) 1693 desc_make_final(desc_ring, e->id); 1694 } 1695 1696 /** 1697 * prb_final_commit() - Commit and finalize (previously reserved) data to 1698 * the ringbuffer. 1699 * 1700 * @e: The entry containing the reserved data information. 1701 * 1702 * This is the public function available to writers to commit+finalize data. 1703 * 1704 * By finalizing, the data is made immediately available to readers. 1705 * 1706 * This function should only be used if there are no intentions of extending 1707 * this data using prb_reserve_in_last(). 1708 * 1709 * Context: Any context. Enables local interrupts. 1710 */ 1711 void prb_final_commit(struct prb_reserved_entry *e) 1712 { 1713 _prb_commit(e, desc_finalized); 1714 } 1715 1716 /* 1717 * Count the number of lines in provided text. All text has at least 1 line 1718 * (even if @text_size is 0). Each '\n' processed is counted as an additional 1719 * line. 1720 */ 1721 static unsigned int count_lines(const char *text, unsigned int text_size) 1722 { 1723 unsigned int next_size = text_size; 1724 unsigned int line_count = 1; 1725 const char *next = text; 1726 1727 while (next_size) { 1728 next = memchr(next, '\n', next_size); 1729 if (!next) 1730 break; 1731 line_count++; 1732 next++; 1733 next_size = text_size - (next - text); 1734 } 1735 1736 return line_count; 1737 } 1738 1739 /* 1740 * Given @blk_lpos, copy an expected @len of data into the provided buffer. 1741 * If @line_count is provided, count the number of lines in the data. 1742 * 1743 * This function (used by readers) performs strict validation on the data 1744 * size to possibly detect bugs in the writer code. A WARN_ON_ONCE() is 1745 * triggered if an internal error is detected. 1746 */ 1747 static bool copy_data(struct prb_data_ring *data_ring, 1748 struct prb_data_blk_lpos *blk_lpos, u16 len, char *buf, 1749 unsigned int buf_size, unsigned int *line_count) 1750 { 1751 unsigned int data_size; 1752 const char *data; 1753 1754 /* Caller might not want any data. */ 1755 if ((!buf || !buf_size) && !line_count) 1756 return true; 1757 1758 data = get_data(data_ring, blk_lpos, &data_size); 1759 if (!data) 1760 return false; 1761 1762 /* 1763 * Actual cannot be less than expected. It can be more than expected 1764 * because of the trailing alignment padding. 1765 * 1766 * Note that invalid @len values can occur because the caller loads 1767 * the value during an allowed data race. 1768 */ 1769 if (data_size < (unsigned int)len) 1770 return false; 1771 1772 /* Caller interested in the line count? */ 1773 if (line_count) 1774 *line_count = count_lines(data, data_size); 1775 1776 /* Caller interested in the data content? */ 1777 if (!buf || !buf_size) 1778 return true; 1779 1780 data_size = min_t(u16, buf_size, len); 1781 1782 memcpy(&buf[0], data, data_size); /* LMM(copy_data:A) */ 1783 return true; 1784 } 1785 1786 /* 1787 * This is an extended version of desc_read(). It gets a copy of a specified 1788 * descriptor. However, it also verifies that the record is finalized and has 1789 * the sequence number @seq. On success, 0 is returned. 1790 * 1791 * Error return values: 1792 * -EINVAL: A finalized record with sequence number @seq does not exist. 1793 * -ENOENT: A finalized record with sequence number @seq exists, but its data 1794 * is not available. This is a valid record, so readers should 1795 * continue with the next record. 1796 */ 1797 static int desc_read_finalized_seq(struct prb_desc_ring *desc_ring, 1798 unsigned long id, u64 seq, 1799 struct prb_desc *desc_out) 1800 { 1801 struct prb_data_blk_lpos *blk_lpos = &desc_out->text_blk_lpos; 1802 enum desc_state d_state; 1803 u64 s; 1804 1805 d_state = desc_read(desc_ring, id, desc_out, &s, NULL); 1806 1807 /* 1808 * An unexpected @id (desc_miss) or @seq mismatch means the record 1809 * does not exist. A descriptor in the reserved or committed state 1810 * means the record does not yet exist for the reader. 1811 */ 1812 if (d_state == desc_miss || 1813 d_state == desc_reserved || 1814 d_state == desc_committed || 1815 s != seq) { 1816 return -EINVAL; 1817 } 1818 1819 /* 1820 * A descriptor in the reusable state may no longer have its data 1821 * available; report it as existing but with lost data. Or the record 1822 * may actually be a record with lost data. 1823 */ 1824 if (d_state == desc_reusable || 1825 (blk_lpos->begin == FAILED_LPOS && blk_lpos->next == FAILED_LPOS)) { 1826 return -ENOENT; 1827 } 1828 1829 return 0; 1830 } 1831 1832 /* 1833 * Copy the ringbuffer data from the record with @seq to the provided 1834 * @r buffer. On success, 0 is returned. 1835 * 1836 * See desc_read_finalized_seq() for error return values. 1837 */ 1838 static int prb_read(struct printk_ringbuffer *rb, u64 seq, 1839 struct printk_record *r, unsigned int *line_count) 1840 { 1841 struct prb_desc_ring *desc_ring = &rb->desc_ring; 1842 struct printk_info *info = to_info(desc_ring, seq); 1843 struct prb_desc *rdesc = to_desc(desc_ring, seq); 1844 atomic_long_t *state_var = &rdesc->state_var; 1845 struct prb_desc desc; 1846 unsigned long id; 1847 int err; 1848 1849 /* Extract the ID, used to specify the descriptor to read. */ 1850 id = DESC_ID(atomic_long_read(state_var)); 1851 1852 /* Get a local copy of the correct descriptor (if available). */ 1853 err = desc_read_finalized_seq(desc_ring, id, seq, &desc); 1854 1855 /* 1856 * If @r is NULL, the caller is only interested in the availability 1857 * of the record. 1858 */ 1859 if (err || !r) 1860 return err; 1861 1862 /* If requested, copy meta data. */ 1863 if (r->info) 1864 memcpy(r->info, info, sizeof(*(r->info))); 1865 1866 /* Copy text data. If it fails, this is a data-less record. */ 1867 if (!copy_data(&rb->text_data_ring, &desc.text_blk_lpos, info->text_len, 1868 r->text_buf, r->text_buf_size, line_count)) { 1869 return -ENOENT; 1870 } 1871 1872 /* 1873 * Copy dict data. Although this should not fail, dict data is not 1874 * important. So if it fails, modify the copied meta data to report 1875 * that there is no dict data, thus silently dropping the dict data. 1876 */ 1877 if (!copy_data(&rb->dict_data_ring, &desc.dict_blk_lpos, info->dict_len, 1878 r->dict_buf, r->dict_buf_size, NULL)) { 1879 if (r->info) 1880 r->info->dict_len = 0; 1881 } 1882 1883 /* Ensure the record is still finalized and has the same @seq. */ 1884 return desc_read_finalized_seq(desc_ring, id, seq, &desc); 1885 } 1886 1887 /* Get the sequence number of the tail descriptor. */ 1888 static u64 prb_first_seq(struct printk_ringbuffer *rb) 1889 { 1890 struct prb_desc_ring *desc_ring = &rb->desc_ring; 1891 enum desc_state d_state; 1892 struct prb_desc desc; 1893 unsigned long id; 1894 u64 seq; 1895 1896 for (;;) { 1897 id = atomic_long_read(&rb->desc_ring.tail_id); /* LMM(prb_first_seq:A) */ 1898 1899 d_state = desc_read(desc_ring, id, &desc, &seq, NULL); /* LMM(prb_first_seq:B) */ 1900 1901 /* 1902 * This loop will not be infinite because the tail is 1903 * _always_ in the finalized or reusable state. 1904 */ 1905 if (d_state == desc_finalized || d_state == desc_reusable) 1906 break; 1907 1908 /* 1909 * Guarantee the last state load from desc_read() is before 1910 * reloading @tail_id in order to see a new tail in the case 1911 * that the descriptor has been recycled. This pairs with 1912 * desc_reserve:D. 1913 * 1914 * Memory barrier involvement: 1915 * 1916 * If prb_first_seq:B reads from desc_reserve:F, then 1917 * prb_first_seq:A reads from desc_push_tail:B. 1918 * 1919 * Relies on: 1920 * 1921 * MB from desc_push_tail:B to desc_reserve:F 1922 * matching 1923 * RMB prb_first_seq:B to prb_first_seq:A 1924 */ 1925 smp_rmb(); /* LMM(prb_first_seq:C) */ 1926 } 1927 1928 return seq; 1929 } 1930 1931 /* 1932 * Non-blocking read of a record. Updates @seq to the last finalized record 1933 * (which may have no data available). 1934 * 1935 * See the description of prb_read_valid() and prb_read_valid_info() 1936 * for details. 1937 */ 1938 static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq, 1939 struct printk_record *r, unsigned int *line_count) 1940 { 1941 u64 tail_seq; 1942 int err; 1943 1944 while ((err = prb_read(rb, *seq, r, line_count))) { 1945 tail_seq = prb_first_seq(rb); 1946 1947 if (*seq < tail_seq) { 1948 /* 1949 * Behind the tail. Catch up and try again. This 1950 * can happen for -ENOENT and -EINVAL cases. 1951 */ 1952 *seq = tail_seq; 1953 1954 } else if (err == -ENOENT) { 1955 /* Record exists, but no data available. Skip. */ 1956 (*seq)++; 1957 1958 } else { 1959 /* Non-existent/non-finalized record. Must stop. */ 1960 return false; 1961 } 1962 } 1963 1964 return true; 1965 } 1966 1967 /** 1968 * prb_read_valid() - Non-blocking read of a requested record or (if gone) 1969 * the next available record. 1970 * 1971 * @rb: The ringbuffer to read from. 1972 * @seq: The sequence number of the record to read. 1973 * @r: A record data buffer to store the read record to. 1974 * 1975 * This is the public function available to readers to read a record. 1976 * 1977 * The reader provides the @info, @text_buf, @dict_buf buffers of @r to be 1978 * filled in. Any of the buffer pointers can be set to NULL if the reader 1979 * is not interested in that data. To ensure proper initialization of @r, 1980 * prb_rec_init_rd() should be used. 1981 * 1982 * Context: Any context. 1983 * Return: true if a record was read, otherwise false. 1984 * 1985 * On success, the reader must check r->info.seq to see which record was 1986 * actually read. This allows the reader to detect dropped records. 1987 * 1988 * Failure means @seq refers to a not yet written record. 1989 */ 1990 bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq, 1991 struct printk_record *r) 1992 { 1993 return _prb_read_valid(rb, &seq, r, NULL); 1994 } 1995 1996 /** 1997 * prb_read_valid_info() - Non-blocking read of meta data for a requested 1998 * record or (if gone) the next available record. 1999 * 2000 * @rb: The ringbuffer to read from. 2001 * @seq: The sequence number of the record to read. 2002 * @info: A buffer to store the read record meta data to. 2003 * @line_count: A buffer to store the number of lines in the record text. 2004 * 2005 * This is the public function available to readers to read only the 2006 * meta data of a record. 2007 * 2008 * The reader provides the @info, @line_count buffers to be filled in. 2009 * Either of the buffer pointers can be set to NULL if the reader is not 2010 * interested in that data. 2011 * 2012 * Context: Any context. 2013 * Return: true if a record's meta data was read, otherwise false. 2014 * 2015 * On success, the reader must check info->seq to see which record meta data 2016 * was actually read. This allows the reader to detect dropped records. 2017 * 2018 * Failure means @seq refers to a not yet written record. 2019 */ 2020 bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq, 2021 struct printk_info *info, unsigned int *line_count) 2022 { 2023 struct printk_record r; 2024 2025 prb_rec_init_rd(&r, info, NULL, 0, NULL, 0); 2026 2027 return _prb_read_valid(rb, &seq, &r, line_count); 2028 } 2029 2030 /** 2031 * prb_first_valid_seq() - Get the sequence number of the oldest available 2032 * record. 2033 * 2034 * @rb: The ringbuffer to get the sequence number from. 2035 * 2036 * This is the public function available to readers to see what the 2037 * first/oldest valid sequence number is. 2038 * 2039 * This provides readers a starting point to begin iterating the ringbuffer. 2040 * 2041 * Context: Any context. 2042 * Return: The sequence number of the first/oldest record or, if the 2043 * ringbuffer is empty, 0 is returned. 2044 */ 2045 u64 prb_first_valid_seq(struct printk_ringbuffer *rb) 2046 { 2047 u64 seq = 0; 2048 2049 if (!_prb_read_valid(rb, &seq, NULL, NULL)) 2050 return 0; 2051 2052 return seq; 2053 } 2054 2055 /** 2056 * prb_next_seq() - Get the sequence number after the last available record. 2057 * 2058 * @rb: The ringbuffer to get the sequence number from. 2059 * 2060 * This is the public function available to readers to see what the next 2061 * newest sequence number available to readers will be. 2062 * 2063 * This provides readers a sequence number to jump to if all currently 2064 * available records should be skipped. 2065 * 2066 * Context: Any context. 2067 * Return: The sequence number of the next newest (not yet available) record 2068 * for readers. 2069 */ 2070 u64 prb_next_seq(struct printk_ringbuffer *rb) 2071 { 2072 u64 seq = 0; 2073 2074 /* Search forward from the oldest descriptor. */ 2075 while (_prb_read_valid(rb, &seq, NULL, NULL)) 2076 seq++; 2077 2078 return seq; 2079 } 2080 2081 /** 2082 * prb_init() - Initialize a ringbuffer to use provided external buffers. 2083 * 2084 * @rb: The ringbuffer to initialize. 2085 * @text_buf: The data buffer for text data. 2086 * @textbits: The size of @text_buf as a power-of-2 value. 2087 * @dict_buf: The data buffer for dictionary data. 2088 * @dictbits: The size of @dict_buf as a power-of-2 value. 2089 * @descs: The descriptor buffer for ringbuffer records. 2090 * @descbits: The count of @descs items as a power-of-2 value. 2091 * @infos: The printk_info buffer for ringbuffer records. 2092 * 2093 * This is the public function available to writers to setup a ringbuffer 2094 * during runtime using provided buffers. 2095 * 2096 * This must match the initialization of DEFINE_PRINTKRB(). 2097 * 2098 * Context: Any context. 2099 */ 2100 void prb_init(struct printk_ringbuffer *rb, 2101 char *text_buf, unsigned int textbits, 2102 char *dict_buf, unsigned int dictbits, 2103 struct prb_desc *descs, unsigned int descbits, 2104 struct printk_info *infos) 2105 { 2106 memset(descs, 0, _DESCS_COUNT(descbits) * sizeof(descs[0])); 2107 memset(infos, 0, _DESCS_COUNT(descbits) * sizeof(infos[0])); 2108 2109 rb->desc_ring.count_bits = descbits; 2110 rb->desc_ring.descs = descs; 2111 rb->desc_ring.infos = infos; 2112 atomic_long_set(&rb->desc_ring.head_id, DESC0_ID(descbits)); 2113 atomic_long_set(&rb->desc_ring.tail_id, DESC0_ID(descbits)); 2114 2115 rb->text_data_ring.size_bits = textbits; 2116 rb->text_data_ring.data = text_buf; 2117 atomic_long_set(&rb->text_data_ring.head_lpos, BLK0_LPOS(textbits)); 2118 atomic_long_set(&rb->text_data_ring.tail_lpos, BLK0_LPOS(textbits)); 2119 2120 rb->dict_data_ring.size_bits = dictbits; 2121 rb->dict_data_ring.data = dict_buf; 2122 atomic_long_set(&rb->dict_data_ring.head_lpos, BLK0_LPOS(dictbits)); 2123 atomic_long_set(&rb->dict_data_ring.tail_lpos, BLK0_LPOS(dictbits)); 2124 2125 atomic_long_set(&rb->fail, 0); 2126 2127 atomic_long_set(&(descs[_DESCS_COUNT(descbits) - 1].state_var), DESC0_SV(descbits)); 2128 descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.begin = FAILED_LPOS; 2129 descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.next = FAILED_LPOS; 2130 descs[_DESCS_COUNT(descbits) - 1].dict_blk_lpos.begin = FAILED_LPOS; 2131 descs[_DESCS_COUNT(descbits) - 1].dict_blk_lpos.next = FAILED_LPOS; 2132 2133 infos[0].seq = -(u64)_DESCS_COUNT(descbits); 2134 infos[_DESCS_COUNT(descbits) - 1].seq = 0; 2135 } 2136 2137 /** 2138 * prb_record_text_space() - Query the full actual used ringbuffer space for 2139 * the text data of a reserved entry. 2140 * 2141 * @e: The successfully reserved entry to query. 2142 * 2143 * This is the public function available to writers to see how much actual 2144 * space is used in the ringbuffer to store the text data of the specified 2145 * entry. 2146 * 2147 * This function is only valid if @e has been successfully reserved using 2148 * prb_reserve(). 2149 * 2150 * Context: Any context. 2151 * Return: The size in bytes used by the text data of the associated record. 2152 */ 2153 unsigned int prb_record_text_space(struct prb_reserved_entry *e) 2154 { 2155 return e->text_space; 2156 } 2157