1b6cf8b3fSJohn Ogness // SPDX-License-Identifier: GPL-2.0 2b6cf8b3fSJohn Ogness 3b6cf8b3fSJohn Ogness #include <linux/kernel.h> 4b6cf8b3fSJohn Ogness #include <linux/irqflags.h> 5b6cf8b3fSJohn Ogness #include <linux/string.h> 6b6cf8b3fSJohn Ogness #include <linux/errno.h> 7b6cf8b3fSJohn Ogness #include <linux/bug.h> 8b6cf8b3fSJohn Ogness #include "printk_ringbuffer.h" 9b6cf8b3fSJohn Ogness 10b6cf8b3fSJohn Ogness /** 11b6cf8b3fSJohn Ogness * DOC: printk_ringbuffer overview 12b6cf8b3fSJohn Ogness * 13b6cf8b3fSJohn Ogness * Data Structure 14b6cf8b3fSJohn Ogness * -------------- 15b6cf8b3fSJohn Ogness * The printk_ringbuffer is made up of 3 internal ringbuffers: 16b6cf8b3fSJohn Ogness * 17b6cf8b3fSJohn Ogness * desc_ring 18cfe2790bSJohn Ogness * A ring of descriptors and their meta data (such as sequence number, 19cfe2790bSJohn Ogness * timestamp, loglevel, etc.) as well as internal state information about 20cfe2790bSJohn Ogness * the record and logical positions specifying where in the other 21f35efc78SJohn Ogness * ringbuffer the text strings are located. 22b6cf8b3fSJohn Ogness * 23b6cf8b3fSJohn Ogness * text_data_ring 24b6cf8b3fSJohn Ogness * A ring of data blocks. A data block consists of an unsigned long 25b6cf8b3fSJohn Ogness * integer (ID) that maps to a desc_ring index followed by the text 26b6cf8b3fSJohn Ogness * string of the record. 27b6cf8b3fSJohn Ogness * 28b6cf8b3fSJohn Ogness * The internal state information of a descriptor is the key element to allow 29b6cf8b3fSJohn Ogness * readers and writers to locklessly synchronize access to the data. 30b6cf8b3fSJohn Ogness * 31b6cf8b3fSJohn Ogness * Implementation 32b6cf8b3fSJohn Ogness * -------------- 33b6cf8b3fSJohn Ogness * 34b6cf8b3fSJohn Ogness * Descriptor Ring 35b6cf8b3fSJohn Ogness * ~~~~~~~~~~~~~~~ 36cfe2790bSJohn Ogness * The descriptor ring is an array of descriptors. A descriptor contains 37cfe2790bSJohn Ogness * essential meta data to track the data of a printk record using 38f35efc78SJohn Ogness * blk_lpos structs pointing to associated text data blocks (see 39f35efc78SJohn Ogness * "Data Rings" below). Each descriptor is assigned an ID that maps 40cfe2790bSJohn Ogness * directly to index values of the descriptor array and has a state. The ID 41cfe2790bSJohn Ogness * and the state are bitwise combined into a single descriptor field named 42cfe2790bSJohn Ogness * @state_var, allowing ID and state to be synchronously and atomically 43cfe2790bSJohn Ogness * updated. 44b6cf8b3fSJohn Ogness * 454cfc7258SJohn Ogness * Descriptors have four states: 46b6cf8b3fSJohn Ogness * 47b6cf8b3fSJohn Ogness * reserved 48b6cf8b3fSJohn Ogness * A writer is modifying the record. 49b6cf8b3fSJohn Ogness * 50b6cf8b3fSJohn Ogness * committed 514cfc7258SJohn Ogness * The record and all its data are written. A writer can reopen the 524cfc7258SJohn Ogness * descriptor (transitioning it back to reserved), but in the committed 534cfc7258SJohn Ogness * state the data is consistent. 544cfc7258SJohn Ogness * 554cfc7258SJohn Ogness * finalized 564cfc7258SJohn Ogness * The record and all its data are complete and available for reading. A 574cfc7258SJohn Ogness * writer cannot reopen the descriptor. 58b6cf8b3fSJohn Ogness * 59b6cf8b3fSJohn Ogness * reusable 60f35efc78SJohn Ogness * The record exists, but its text and/or meta data may no longer be 61f35efc78SJohn Ogness * available. 62b6cf8b3fSJohn Ogness * 63b6cf8b3fSJohn Ogness * Querying the @state_var of a record requires providing the ID of the 644cfc7258SJohn Ogness * descriptor to query. This can yield a possible fifth (pseudo) state: 65b6cf8b3fSJohn Ogness * 66b6cf8b3fSJohn Ogness * miss 67b6cf8b3fSJohn Ogness * The descriptor being queried has an unexpected ID. 68b6cf8b3fSJohn Ogness * 69b6cf8b3fSJohn Ogness * The descriptor ring has a @tail_id that contains the ID of the oldest 70b6cf8b3fSJohn Ogness * descriptor and @head_id that contains the ID of the newest descriptor. 71b6cf8b3fSJohn Ogness * 72b6cf8b3fSJohn Ogness * When a new descriptor should be created (and the ring is full), the tail 73b6cf8b3fSJohn Ogness * descriptor is invalidated by first transitioning to the reusable state and 74b6cf8b3fSJohn Ogness * then invalidating all tail data blocks up to and including the data blocks 75f35efc78SJohn Ogness * associated with the tail descriptor (for the text ring). Then 76b6cf8b3fSJohn Ogness * @tail_id is advanced, followed by advancing @head_id. And finally the 77b6cf8b3fSJohn Ogness * @state_var of the new descriptor is initialized to the new ID and reserved 78b6cf8b3fSJohn Ogness * state. 79b6cf8b3fSJohn Ogness * 80b6cf8b3fSJohn Ogness * The @tail_id can only be advanced if the new @tail_id would be in the 81b6cf8b3fSJohn Ogness * committed or reusable queried state. This makes it possible that a valid 82b6cf8b3fSJohn Ogness * sequence number of the tail is always available. 83b6cf8b3fSJohn Ogness * 844cfc7258SJohn Ogness * Descriptor Finalization 854cfc7258SJohn Ogness * ~~~~~~~~~~~~~~~~~~~~~~~ 864cfc7258SJohn Ogness * When a writer calls the commit function prb_commit(), record data is 874cfc7258SJohn Ogness * fully stored and is consistent within the ringbuffer. However, a writer can 884cfc7258SJohn Ogness * reopen that record, claiming exclusive access (as with prb_reserve()), and 894cfc7258SJohn Ogness * modify that record. When finished, the writer must again commit the record. 904cfc7258SJohn Ogness * 914cfc7258SJohn Ogness * In order for a record to be made available to readers (and also become 924cfc7258SJohn Ogness * recyclable for writers), it must be finalized. A finalized record cannot be 934cfc7258SJohn Ogness * reopened and can never become "unfinalized". Record finalization can occur 944cfc7258SJohn Ogness * in three different scenarios: 954cfc7258SJohn Ogness * 964cfc7258SJohn Ogness * 1) A writer can simultaneously commit and finalize its record by calling 974cfc7258SJohn Ogness * prb_final_commit() instead of prb_commit(). 984cfc7258SJohn Ogness * 994cfc7258SJohn Ogness * 2) When a new record is reserved and the previous record has been 1004cfc7258SJohn Ogness * committed via prb_commit(), that previous record is automatically 1014cfc7258SJohn Ogness * finalized. 1024cfc7258SJohn Ogness * 1034cfc7258SJohn Ogness * 3) When a record is committed via prb_commit() and a newer record 1044cfc7258SJohn Ogness * already exists, the record being committed is automatically finalized. 1054cfc7258SJohn Ogness * 106f35efc78SJohn Ogness * Data Ring 107f35efc78SJohn Ogness * ~~~~~~~~~ 108f35efc78SJohn Ogness * The text data ring is a byte array composed of data blocks. Data blocks are 109b6cf8b3fSJohn Ogness * referenced by blk_lpos structs that point to the logical position of the 110b6cf8b3fSJohn Ogness * beginning of a data block and the beginning of the next adjacent data 111b6cf8b3fSJohn Ogness * block. Logical positions are mapped directly to index values of the byte 112b6cf8b3fSJohn Ogness * array ringbuffer. 113b6cf8b3fSJohn Ogness * 114b6cf8b3fSJohn Ogness * Each data block consists of an ID followed by the writer data. The ID is 115b6cf8b3fSJohn Ogness * the identifier of a descriptor that is associated with the data block. A 116b6cf8b3fSJohn Ogness * given data block is considered valid if all of the following conditions 117b6cf8b3fSJohn Ogness * are met: 118b6cf8b3fSJohn Ogness * 119b6cf8b3fSJohn Ogness * 1) The descriptor associated with the data block is in the committed 1204cfc7258SJohn Ogness * or finalized queried state. 121b6cf8b3fSJohn Ogness * 122b6cf8b3fSJohn Ogness * 2) The blk_lpos struct within the descriptor associated with the data 123b6cf8b3fSJohn Ogness * block references back to the same data block. 124b6cf8b3fSJohn Ogness * 125b6cf8b3fSJohn Ogness * 3) The data block is within the head/tail logical position range. 126b6cf8b3fSJohn Ogness * 127b6cf8b3fSJohn Ogness * If the writer data of a data block would extend beyond the end of the 128b6cf8b3fSJohn Ogness * byte array, only the ID of the data block is stored at the logical 129b6cf8b3fSJohn Ogness * position and the full data block (ID and writer data) is stored at the 130b6cf8b3fSJohn Ogness * beginning of the byte array. The referencing blk_lpos will point to the 131b6cf8b3fSJohn Ogness * ID before the wrap and the next data block will be at the logical 132b6cf8b3fSJohn Ogness * position adjacent the full data block after the wrap. 133b6cf8b3fSJohn Ogness * 134b6cf8b3fSJohn Ogness * Data rings have a @tail_lpos that points to the beginning of the oldest 135b6cf8b3fSJohn Ogness * data block and a @head_lpos that points to the logical position of the 136b6cf8b3fSJohn Ogness * next (not yet existing) data block. 137b6cf8b3fSJohn Ogness * 138b6cf8b3fSJohn Ogness * When a new data block should be created (and the ring is full), tail data 139b6cf8b3fSJohn Ogness * blocks will first be invalidated by putting their associated descriptors 140b6cf8b3fSJohn Ogness * into the reusable state and then pushing the @tail_lpos forward beyond 141b6cf8b3fSJohn Ogness * them. Then the @head_lpos is pushed forward and is associated with a new 142b6cf8b3fSJohn Ogness * descriptor. If a data block is not valid, the @tail_lpos cannot be 143b6cf8b3fSJohn Ogness * advanced beyond it. 144b6cf8b3fSJohn Ogness * 145cfe2790bSJohn Ogness * Info Array 146cfe2790bSJohn Ogness * ~~~~~~~~~~ 147cfe2790bSJohn Ogness * The general meta data of printk records are stored in printk_info structs, 148cfe2790bSJohn Ogness * stored in an array with the same number of elements as the descriptor ring. 149cfe2790bSJohn Ogness * Each info corresponds to the descriptor of the same index in the 150cfe2790bSJohn Ogness * descriptor ring. Info validity is confirmed by evaluating the corresponding 151cfe2790bSJohn Ogness * descriptor before and after loading the info. 152cfe2790bSJohn Ogness * 153b6cf8b3fSJohn Ogness * Usage 154b6cf8b3fSJohn Ogness * ----- 155b6cf8b3fSJohn Ogness * Here are some simple examples demonstrating writers and readers. For the 156b6cf8b3fSJohn Ogness * examples a global ringbuffer (test_rb) is available (which is not the 157b6cf8b3fSJohn Ogness * actual ringbuffer used by printk):: 158b6cf8b3fSJohn Ogness * 159f35efc78SJohn Ogness * DEFINE_PRINTKRB(test_rb, 15, 5); 160b6cf8b3fSJohn Ogness * 161b6cf8b3fSJohn Ogness * This ringbuffer allows up to 32768 records (2 ^ 15) and has a size of 162f35efc78SJohn Ogness * 1 MiB (2 ^ (15 + 5)) for text data. 163b6cf8b3fSJohn Ogness * 164b6cf8b3fSJohn Ogness * Sample writer code:: 165b6cf8b3fSJohn Ogness * 166b6cf8b3fSJohn Ogness * const char *textstr = "message text"; 167b6cf8b3fSJohn Ogness * struct prb_reserved_entry e; 168b6cf8b3fSJohn Ogness * struct printk_record r; 169b6cf8b3fSJohn Ogness * 170b6cf8b3fSJohn Ogness * // specify how much to allocate 171f35efc78SJohn Ogness * prb_rec_init_wr(&r, strlen(textstr) + 1); 172b6cf8b3fSJohn Ogness * 173b6cf8b3fSJohn Ogness * if (prb_reserve(&e, &test_rb, &r)) { 174b6cf8b3fSJohn Ogness * snprintf(r.text_buf, r.text_buf_size, "%s", textstr); 175f35efc78SJohn Ogness * 176cc5c7041SJohn Ogness * r.info->text_len = strlen(textstr); 177b6cf8b3fSJohn Ogness * r.info->ts_nsec = local_clock(); 178f35efc78SJohn Ogness * r.info->caller_id = printk_caller_id(); 179b6cf8b3fSJohn Ogness * 180f35efc78SJohn Ogness * // commit and finalize the record 1814cfc7258SJohn Ogness * prb_final_commit(&e); 1824cfc7258SJohn Ogness * } 1834cfc7258SJohn Ogness * 1844cfc7258SJohn Ogness * Note that additional writer functions are available to extend a record 1854cfc7258SJohn Ogness * after it has been committed but not yet finalized. This can be done as 1864cfc7258SJohn Ogness * long as no new records have been reserved and the caller is the same. 1874cfc7258SJohn Ogness * 1884cfc7258SJohn Ogness * Sample writer code (record extending):: 1894cfc7258SJohn Ogness * 1904cfc7258SJohn Ogness * // alternate rest of previous example 191f35efc78SJohn Ogness * 1924cfc7258SJohn Ogness * r.info->text_len = strlen(textstr); 193f35efc78SJohn Ogness * r.info->ts_nsec = local_clock(); 1944cfc7258SJohn Ogness * r.info->caller_id = printk_caller_id(); 1954cfc7258SJohn Ogness * 1964cfc7258SJohn Ogness * // commit the record (but do not finalize yet) 197b6cf8b3fSJohn Ogness * prb_commit(&e); 198b6cf8b3fSJohn Ogness * } 199b6cf8b3fSJohn Ogness * 2004cfc7258SJohn Ogness * ... 2014cfc7258SJohn Ogness * 2024cfc7258SJohn Ogness * // specify additional 5 bytes text space to extend 203f35efc78SJohn Ogness * prb_rec_init_wr(&r, 5); 2044cfc7258SJohn Ogness * 20559f8bccaSJohn Ogness * // try to extend, but only if it does not exceed 32 bytes 20659f8bccaSJohn Ogness * if (prb_reserve_in_last(&e, &test_rb, &r, printk_caller_id()), 32) { 2074cfc7258SJohn Ogness * snprintf(&r.text_buf[r.info->text_len], 2084cfc7258SJohn Ogness * r.text_buf_size - r.info->text_len, "hello"); 2094cfc7258SJohn Ogness * 2104cfc7258SJohn Ogness * r.info->text_len += 5; 2114cfc7258SJohn Ogness * 212f35efc78SJohn Ogness * // commit and finalize the record 2134cfc7258SJohn Ogness * prb_final_commit(&e); 2144cfc7258SJohn Ogness * } 2154cfc7258SJohn Ogness * 216b6cf8b3fSJohn Ogness * Sample reader code:: 217b6cf8b3fSJohn Ogness * 218b6cf8b3fSJohn Ogness * struct printk_info info; 219b6cf8b3fSJohn Ogness * struct printk_record r; 220b6cf8b3fSJohn Ogness * char text_buf[32]; 221b6cf8b3fSJohn Ogness * u64 seq; 222b6cf8b3fSJohn Ogness * 223f35efc78SJohn Ogness * prb_rec_init_rd(&r, &info, &text_buf[0], sizeof(text_buf)); 224b6cf8b3fSJohn Ogness * 225b6cf8b3fSJohn Ogness * prb_for_each_record(0, &test_rb, &seq, &r) { 226b6cf8b3fSJohn Ogness * if (info.seq != seq) 227b6cf8b3fSJohn Ogness * pr_warn("lost %llu records\n", info.seq - seq); 228b6cf8b3fSJohn Ogness * 229b6cf8b3fSJohn Ogness * if (info.text_len > r.text_buf_size) { 230b6cf8b3fSJohn Ogness * pr_warn("record %llu text truncated\n", info.seq); 231b6cf8b3fSJohn Ogness * text_buf[r.text_buf_size - 1] = 0; 232b6cf8b3fSJohn Ogness * } 233b6cf8b3fSJohn Ogness * 234f35efc78SJohn Ogness * pr_info("%llu: %llu: %s\n", info.seq, info.ts_nsec, 235f35efc78SJohn Ogness * &text_buf[0]); 236b6cf8b3fSJohn Ogness * } 237b6cf8b3fSJohn Ogness * 238b6cf8b3fSJohn Ogness * Note that additional less convenient reader functions are available to 239b6cf8b3fSJohn Ogness * allow complex record access. 240b6cf8b3fSJohn Ogness * 241b6cf8b3fSJohn Ogness * ABA Issues 242b6cf8b3fSJohn Ogness * ~~~~~~~~~~ 243b6cf8b3fSJohn Ogness * To help avoid ABA issues, descriptors are referenced by IDs (array index 244b6cf8b3fSJohn Ogness * values combined with tagged bits counting array wraps) and data blocks are 245b6cf8b3fSJohn Ogness * referenced by logical positions (array index values combined with tagged 246b6cf8b3fSJohn Ogness * bits counting array wraps). However, on 32-bit systems the number of 247b6cf8b3fSJohn Ogness * tagged bits is relatively small such that an ABA incident is (at least 248b6cf8b3fSJohn Ogness * theoretically) possible. For example, if 4 million maximally sized (1KiB) 249b6cf8b3fSJohn Ogness * printk messages were to occur in NMI context on a 32-bit system, the 250b6cf8b3fSJohn Ogness * interrupted context would not be able to recognize that the 32-bit integer 251b6cf8b3fSJohn Ogness * completely wrapped and thus represents a different data block than the one 252b6cf8b3fSJohn Ogness * the interrupted context expects. 253b6cf8b3fSJohn Ogness * 254b6cf8b3fSJohn Ogness * To help combat this possibility, additional state checking is performed 255b6cf8b3fSJohn Ogness * (such as using cmpxchg() even though set() would suffice). These extra 256b6cf8b3fSJohn Ogness * checks are commented as such and will hopefully catch any ABA issue that 257b6cf8b3fSJohn Ogness * a 32-bit system might experience. 258b6cf8b3fSJohn Ogness * 259b6cf8b3fSJohn Ogness * Memory Barriers 260b6cf8b3fSJohn Ogness * ~~~~~~~~~~~~~~~ 261b6cf8b3fSJohn Ogness * Multiple memory barriers are used. To simplify proving correctness and 262b6cf8b3fSJohn Ogness * generating litmus tests, lines of code related to memory barriers 263b6cf8b3fSJohn Ogness * (loads, stores, and the associated memory barriers) are labeled:: 264b6cf8b3fSJohn Ogness * 265b6cf8b3fSJohn Ogness * LMM(function:letter) 266b6cf8b3fSJohn Ogness * 267b6cf8b3fSJohn Ogness * Comments reference the labels using only the "function:letter" part. 268b6cf8b3fSJohn Ogness * 269b6cf8b3fSJohn Ogness * The memory barrier pairs and their ordering are: 270b6cf8b3fSJohn Ogness * 271b6cf8b3fSJohn Ogness * desc_reserve:D / desc_reserve:B 272b6cf8b3fSJohn Ogness * push descriptor tail (id), then push descriptor head (id) 273b6cf8b3fSJohn Ogness * 274b6cf8b3fSJohn Ogness * desc_reserve:D / data_push_tail:B 275b6cf8b3fSJohn Ogness * push data tail (lpos), then set new descriptor reserved (state) 276b6cf8b3fSJohn Ogness * 277b6cf8b3fSJohn Ogness * desc_reserve:D / desc_push_tail:C 278b6cf8b3fSJohn Ogness * push descriptor tail (id), then set new descriptor reserved (state) 279b6cf8b3fSJohn Ogness * 280b6cf8b3fSJohn Ogness * desc_reserve:D / prb_first_seq:C 281b6cf8b3fSJohn Ogness * push descriptor tail (id), then set new descriptor reserved (state) 282b6cf8b3fSJohn Ogness * 283b6cf8b3fSJohn Ogness * desc_reserve:F / desc_read:D 284b6cf8b3fSJohn Ogness * set new descriptor id and reserved (state), then allow writer changes 285b6cf8b3fSJohn Ogness * 2864cfc7258SJohn Ogness * data_alloc:A (or data_realloc:A) / desc_read:D 287b6cf8b3fSJohn Ogness * set old descriptor reusable (state), then modify new data block area 288b6cf8b3fSJohn Ogness * 2894cfc7258SJohn Ogness * data_alloc:A (or data_realloc:A) / data_push_tail:B 290b6cf8b3fSJohn Ogness * push data tail (lpos), then modify new data block area 291b6cf8b3fSJohn Ogness * 2924cfc7258SJohn Ogness * _prb_commit:B / desc_read:B 293b6cf8b3fSJohn Ogness * store writer changes, then set new descriptor committed (state) 294b6cf8b3fSJohn Ogness * 2954cfc7258SJohn Ogness * desc_reopen_last:A / _prb_commit:B 2964cfc7258SJohn Ogness * set descriptor reserved (state), then read descriptor data 2974cfc7258SJohn Ogness * 2984cfc7258SJohn Ogness * _prb_commit:B / desc_reserve:D 2994cfc7258SJohn Ogness * set new descriptor committed (state), then check descriptor head (id) 3004cfc7258SJohn Ogness * 301b6cf8b3fSJohn Ogness * data_push_tail:D / data_push_tail:A 302b6cf8b3fSJohn Ogness * set descriptor reusable (state), then push data tail (lpos) 303b6cf8b3fSJohn Ogness * 304b6cf8b3fSJohn Ogness * desc_push_tail:B / desc_reserve:D 305b6cf8b3fSJohn Ogness * set descriptor reusable (state), then push descriptor tail (id) 306b6cf8b3fSJohn Ogness */ 307b6cf8b3fSJohn Ogness 308b6cf8b3fSJohn Ogness #define DATA_SIZE(data_ring) _DATA_SIZE((data_ring)->size_bits) 309b6cf8b3fSJohn Ogness #define DATA_SIZE_MASK(data_ring) (DATA_SIZE(data_ring) - 1) 310b6cf8b3fSJohn Ogness 311b6cf8b3fSJohn Ogness #define DESCS_COUNT(desc_ring) _DESCS_COUNT((desc_ring)->count_bits) 312b6cf8b3fSJohn Ogness #define DESCS_COUNT_MASK(desc_ring) (DESCS_COUNT(desc_ring) - 1) 313b6cf8b3fSJohn Ogness 314b6cf8b3fSJohn Ogness /* Determine the data array index from a logical position. */ 315b6cf8b3fSJohn Ogness #define DATA_INDEX(data_ring, lpos) ((lpos) & DATA_SIZE_MASK(data_ring)) 316b6cf8b3fSJohn Ogness 317b6cf8b3fSJohn Ogness /* Determine the desc array index from an ID or sequence number. */ 318b6cf8b3fSJohn Ogness #define DESC_INDEX(desc_ring, n) ((n) & DESCS_COUNT_MASK(desc_ring)) 319b6cf8b3fSJohn Ogness 320b6cf8b3fSJohn Ogness /* Determine how many times the data array has wrapped. */ 321b6cf8b3fSJohn Ogness #define DATA_WRAPS(data_ring, lpos) ((lpos) >> (data_ring)->size_bits) 322b6cf8b3fSJohn Ogness 323d397820fSJohn Ogness /* Determine if a logical position refers to a data-less block. */ 324d397820fSJohn Ogness #define LPOS_DATALESS(lpos) ((lpos) & 1UL) 325e3bc0401SJohn Ogness #define BLK_DATALESS(blk) (LPOS_DATALESS((blk)->begin) && \ 326e3bc0401SJohn Ogness LPOS_DATALESS((blk)->next)) 327d397820fSJohn Ogness 328b6cf8b3fSJohn Ogness /* Get the logical position at index 0 of the current wrap. */ 329b6cf8b3fSJohn Ogness #define DATA_THIS_WRAP_START_LPOS(data_ring, lpos) \ 330b6cf8b3fSJohn Ogness ((lpos) & ~DATA_SIZE_MASK(data_ring)) 331b6cf8b3fSJohn Ogness 332b6cf8b3fSJohn Ogness /* Get the ID for the same index of the previous wrap as the given ID. */ 333b6cf8b3fSJohn Ogness #define DESC_ID_PREV_WRAP(desc_ring, id) \ 334b6cf8b3fSJohn Ogness DESC_ID((id) - DESCS_COUNT(desc_ring)) 335b6cf8b3fSJohn Ogness 336b6cf8b3fSJohn Ogness /* 337b6cf8b3fSJohn Ogness * A data block: mapped directly to the beginning of the data block area 338b6cf8b3fSJohn Ogness * specified as a logical position within the data ring. 339b6cf8b3fSJohn Ogness * 340b6cf8b3fSJohn Ogness * @id: the ID of the associated descriptor 341b6cf8b3fSJohn Ogness * @data: the writer data 342b6cf8b3fSJohn Ogness * 343b6cf8b3fSJohn Ogness * Note that the size of a data block is only known by its associated 344b6cf8b3fSJohn Ogness * descriptor. 345b6cf8b3fSJohn Ogness */ 346b6cf8b3fSJohn Ogness struct prb_data_block { 347b6cf8b3fSJohn Ogness unsigned long id; 348a38283daSGustavo A. R. Silva char data[]; 349b6cf8b3fSJohn Ogness }; 350b6cf8b3fSJohn Ogness 351b6cf8b3fSJohn Ogness /* 352b6cf8b3fSJohn Ogness * Return the descriptor associated with @n. @n can be either a 353b6cf8b3fSJohn Ogness * descriptor ID or a sequence number. 354b6cf8b3fSJohn Ogness */ 355b6cf8b3fSJohn Ogness static struct prb_desc *to_desc(struct prb_desc_ring *desc_ring, u64 n) 356b6cf8b3fSJohn Ogness { 357b6cf8b3fSJohn Ogness return &desc_ring->descs[DESC_INDEX(desc_ring, n)]; 358b6cf8b3fSJohn Ogness } 359b6cf8b3fSJohn Ogness 360cfe2790bSJohn Ogness /* 361cfe2790bSJohn Ogness * Return the printk_info associated with @n. @n can be either a 362cfe2790bSJohn Ogness * descriptor ID or a sequence number. 363cfe2790bSJohn Ogness */ 364cfe2790bSJohn Ogness static struct printk_info *to_info(struct prb_desc_ring *desc_ring, u64 n) 365cfe2790bSJohn Ogness { 366cfe2790bSJohn Ogness return &desc_ring->infos[DESC_INDEX(desc_ring, n)]; 367cfe2790bSJohn Ogness } 368cfe2790bSJohn Ogness 369b6cf8b3fSJohn Ogness static struct prb_data_block *to_block(struct prb_data_ring *data_ring, 370b6cf8b3fSJohn Ogness unsigned long begin_lpos) 371b6cf8b3fSJohn Ogness { 372b6cf8b3fSJohn Ogness return (void *)&data_ring->data[DATA_INDEX(data_ring, begin_lpos)]; 373b6cf8b3fSJohn Ogness } 374b6cf8b3fSJohn Ogness 375b6cf8b3fSJohn Ogness /* 376b6cf8b3fSJohn Ogness * Increase the data size to account for data block meta data plus any 377b6cf8b3fSJohn Ogness * padding so that the adjacent data block is aligned on the ID size. 378b6cf8b3fSJohn Ogness */ 379b6cf8b3fSJohn Ogness static unsigned int to_blk_size(unsigned int size) 380b6cf8b3fSJohn Ogness { 381b6cf8b3fSJohn Ogness struct prb_data_block *db = NULL; 382b6cf8b3fSJohn Ogness 383b6cf8b3fSJohn Ogness size += sizeof(*db); 384b6cf8b3fSJohn Ogness size = ALIGN(size, sizeof(db->id)); 385b6cf8b3fSJohn Ogness return size; 386b6cf8b3fSJohn Ogness } 387b6cf8b3fSJohn Ogness 388b6cf8b3fSJohn Ogness /* 389b6cf8b3fSJohn Ogness * Sanity checker for reserve size. The ringbuffer code assumes that a data 390b6cf8b3fSJohn Ogness * block does not exceed the maximum possible size that could fit within the 391b6cf8b3fSJohn Ogness * ringbuffer. This function provides that basic size check so that the 392b6cf8b3fSJohn Ogness * assumption is safe. 393b6cf8b3fSJohn Ogness */ 394b6cf8b3fSJohn Ogness static bool data_check_size(struct prb_data_ring *data_ring, unsigned int size) 395b6cf8b3fSJohn Ogness { 396b6cf8b3fSJohn Ogness struct prb_data_block *db = NULL; 397b6cf8b3fSJohn Ogness 398b6cf8b3fSJohn Ogness if (size == 0) 399d397820fSJohn Ogness return true; 400b6cf8b3fSJohn Ogness 401b6cf8b3fSJohn Ogness /* 402b6cf8b3fSJohn Ogness * Ensure the alignment padded size could possibly fit in the data 403b6cf8b3fSJohn Ogness * array. The largest possible data block must still leave room for 404b6cf8b3fSJohn Ogness * at least the ID of the next block. 405b6cf8b3fSJohn Ogness */ 406b6cf8b3fSJohn Ogness size = to_blk_size(size); 407b6cf8b3fSJohn Ogness if (size > DATA_SIZE(data_ring) - sizeof(db->id)) 408b6cf8b3fSJohn Ogness return false; 409b6cf8b3fSJohn Ogness 410b6cf8b3fSJohn Ogness return true; 411b6cf8b3fSJohn Ogness } 412b6cf8b3fSJohn Ogness 413b6cf8b3fSJohn Ogness /* Query the state of a descriptor. */ 414b6cf8b3fSJohn Ogness static enum desc_state get_desc_state(unsigned long id, 415b6cf8b3fSJohn Ogness unsigned long state_val) 416b6cf8b3fSJohn Ogness { 417b6cf8b3fSJohn Ogness if (id != DESC_ID(state_val)) 418b6cf8b3fSJohn Ogness return desc_miss; 419b6cf8b3fSJohn Ogness 42010dcb06dSJohn Ogness return DESC_STATE(state_val); 421b6cf8b3fSJohn Ogness } 422b6cf8b3fSJohn Ogness 423b6cf8b3fSJohn Ogness /* 424ce003d67SJohn Ogness * Get a copy of a specified descriptor and return its queried state. If the 425ce003d67SJohn Ogness * descriptor is in an inconsistent state (miss or reserved), the caller can 426ce003d67SJohn Ogness * only expect the descriptor's @state_var field to be valid. 427cfe2790bSJohn Ogness * 428cfe2790bSJohn Ogness * The sequence number and caller_id can be optionally retrieved. Like all 429cfe2790bSJohn Ogness * non-state_var data, they are only valid if the descriptor is in a 430cfe2790bSJohn Ogness * consistent state. 431b6cf8b3fSJohn Ogness */ 432b6cf8b3fSJohn Ogness static enum desc_state desc_read(struct prb_desc_ring *desc_ring, 433cfe2790bSJohn Ogness unsigned long id, struct prb_desc *desc_out, 434cfe2790bSJohn Ogness u64 *seq_out, u32 *caller_id_out) 435b6cf8b3fSJohn Ogness { 436cfe2790bSJohn Ogness struct printk_info *info = to_info(desc_ring, id); 437b6cf8b3fSJohn Ogness struct prb_desc *desc = to_desc(desc_ring, id); 438b6cf8b3fSJohn Ogness atomic_long_t *state_var = &desc->state_var; 439b6cf8b3fSJohn Ogness enum desc_state d_state; 440b6cf8b3fSJohn Ogness unsigned long state_val; 441b6cf8b3fSJohn Ogness 442b6cf8b3fSJohn Ogness /* Check the descriptor state. */ 443b6cf8b3fSJohn Ogness state_val = atomic_long_read(state_var); /* LMM(desc_read:A) */ 444b6cf8b3fSJohn Ogness d_state = get_desc_state(id, state_val); 445ce003d67SJohn Ogness if (d_state == desc_miss || d_state == desc_reserved) { 446ce003d67SJohn Ogness /* 447ce003d67SJohn Ogness * The descriptor is in an inconsistent state. Set at least 448ce003d67SJohn Ogness * @state_var so that the caller can see the details of 449ce003d67SJohn Ogness * the inconsistent state. 450ce003d67SJohn Ogness */ 451ce003d67SJohn Ogness goto out; 452ce003d67SJohn Ogness } 453b6cf8b3fSJohn Ogness 454b6cf8b3fSJohn Ogness /* 455b6cf8b3fSJohn Ogness * Guarantee the state is loaded before copying the descriptor 456b6cf8b3fSJohn Ogness * content. This avoids copying obsolete descriptor content that might 4574cfc7258SJohn Ogness * not apply to the descriptor state. This pairs with _prb_commit:B. 458b6cf8b3fSJohn Ogness * 459b6cf8b3fSJohn Ogness * Memory barrier involvement: 460b6cf8b3fSJohn Ogness * 4614cfc7258SJohn Ogness * If desc_read:A reads from _prb_commit:B, then desc_read:C reads 4624cfc7258SJohn Ogness * from _prb_commit:A. 463b6cf8b3fSJohn Ogness * 464b6cf8b3fSJohn Ogness * Relies on: 465b6cf8b3fSJohn Ogness * 4664cfc7258SJohn Ogness * WMB from _prb_commit:A to _prb_commit:B 467b6cf8b3fSJohn Ogness * matching 468b6cf8b3fSJohn Ogness * RMB from desc_read:A to desc_read:C 469b6cf8b3fSJohn Ogness */ 470b6cf8b3fSJohn Ogness smp_rmb(); /* LMM(desc_read:B) */ 471b6cf8b3fSJohn Ogness 472b6cf8b3fSJohn Ogness /* 473b6cf8b3fSJohn Ogness * Copy the descriptor data. The data is not valid until the 474e7c1fe21SJohn Ogness * state has been re-checked. A memcpy() for all of @desc 475e7c1fe21SJohn Ogness * cannot be used because of the atomic_t @state_var field. 476b6cf8b3fSJohn Ogness */ 477*f244b4dcSPetr Mladek if (desc_out) { 478e7c1fe21SJohn Ogness memcpy(&desc_out->text_blk_lpos, &desc->text_blk_lpos, 479cfe2790bSJohn Ogness sizeof(desc_out->text_blk_lpos)); /* LMM(desc_read:C) */ 480*f244b4dcSPetr Mladek } 481cfe2790bSJohn Ogness if (seq_out) 482cfe2790bSJohn Ogness *seq_out = info->seq; /* also part of desc_read:C */ 483cfe2790bSJohn Ogness if (caller_id_out) 484cfe2790bSJohn Ogness *caller_id_out = info->caller_id; /* also part of desc_read:C */ 485b6cf8b3fSJohn Ogness 486b6cf8b3fSJohn Ogness /* 487b6cf8b3fSJohn Ogness * 1. Guarantee the descriptor content is loaded before re-checking 488b6cf8b3fSJohn Ogness * the state. This avoids reading an obsolete descriptor state 489b6cf8b3fSJohn Ogness * that may not apply to the copied content. This pairs with 490b6cf8b3fSJohn Ogness * desc_reserve:F. 491b6cf8b3fSJohn Ogness * 492b6cf8b3fSJohn Ogness * Memory barrier involvement: 493b6cf8b3fSJohn Ogness * 494b6cf8b3fSJohn Ogness * If desc_read:C reads from desc_reserve:G, then desc_read:E 495b6cf8b3fSJohn Ogness * reads from desc_reserve:F. 496b6cf8b3fSJohn Ogness * 497b6cf8b3fSJohn Ogness * Relies on: 498b6cf8b3fSJohn Ogness * 499b6cf8b3fSJohn Ogness * WMB from desc_reserve:F to desc_reserve:G 500b6cf8b3fSJohn Ogness * matching 501b6cf8b3fSJohn Ogness * RMB from desc_read:C to desc_read:E 502b6cf8b3fSJohn Ogness * 503b6cf8b3fSJohn Ogness * 2. Guarantee the record data is loaded before re-checking the 504b6cf8b3fSJohn Ogness * state. This avoids reading an obsolete descriptor state that may 5054cfc7258SJohn Ogness * not apply to the copied data. This pairs with data_alloc:A and 5064cfc7258SJohn Ogness * data_realloc:A. 507b6cf8b3fSJohn Ogness * 508b6cf8b3fSJohn Ogness * Memory barrier involvement: 509b6cf8b3fSJohn Ogness * 510b6cf8b3fSJohn Ogness * If copy_data:A reads from data_alloc:B, then desc_read:E 511b6cf8b3fSJohn Ogness * reads from desc_make_reusable:A. 512b6cf8b3fSJohn Ogness * 513b6cf8b3fSJohn Ogness * Relies on: 514b6cf8b3fSJohn Ogness * 515b6cf8b3fSJohn Ogness * MB from desc_make_reusable:A to data_alloc:B 516b6cf8b3fSJohn Ogness * matching 517b6cf8b3fSJohn Ogness * RMB from desc_read:C to desc_read:E 518b6cf8b3fSJohn Ogness * 519b6cf8b3fSJohn Ogness * Note: desc_make_reusable:A and data_alloc:B can be different 520b6cf8b3fSJohn Ogness * CPUs. However, the data_alloc:B CPU (which performs the 521b6cf8b3fSJohn Ogness * full memory barrier) must have previously seen 522b6cf8b3fSJohn Ogness * desc_make_reusable:A. 523b6cf8b3fSJohn Ogness */ 524b6cf8b3fSJohn Ogness smp_rmb(); /* LMM(desc_read:D) */ 525b6cf8b3fSJohn Ogness 526ce003d67SJohn Ogness /* 527ce003d67SJohn Ogness * The data has been copied. Return the current descriptor state, 528ce003d67SJohn Ogness * which may have changed since the load above. 529ce003d67SJohn Ogness */ 530b6cf8b3fSJohn Ogness state_val = atomic_long_read(state_var); /* LMM(desc_read:E) */ 531ce003d67SJohn Ogness d_state = get_desc_state(id, state_val); 532ce003d67SJohn Ogness out: 533*f244b4dcSPetr Mladek if (desc_out) 534ce003d67SJohn Ogness atomic_long_set(&desc_out->state_var, state_val); 535ce003d67SJohn Ogness return d_state; 536b6cf8b3fSJohn Ogness } 537b6cf8b3fSJohn Ogness 538b6cf8b3fSJohn Ogness /* 5394cfc7258SJohn Ogness * Take a specified descriptor out of the finalized state by attempting 5404cfc7258SJohn Ogness * the transition from finalized to reusable. Either this context or some 541b6cf8b3fSJohn Ogness * other context will have been successful. 542b6cf8b3fSJohn Ogness */ 543b6cf8b3fSJohn Ogness static void desc_make_reusable(struct prb_desc_ring *desc_ring, 544b6cf8b3fSJohn Ogness unsigned long id) 545b6cf8b3fSJohn Ogness { 5464cfc7258SJohn Ogness unsigned long val_finalized = DESC_SV(id, desc_finalized); 54710dcb06dSJohn Ogness unsigned long val_reusable = DESC_SV(id, desc_reusable); 548b6cf8b3fSJohn Ogness struct prb_desc *desc = to_desc(desc_ring, id); 549b6cf8b3fSJohn Ogness atomic_long_t *state_var = &desc->state_var; 550b6cf8b3fSJohn Ogness 5514cfc7258SJohn Ogness atomic_long_cmpxchg_relaxed(state_var, val_finalized, 552b6cf8b3fSJohn Ogness val_reusable); /* LMM(desc_make_reusable:A) */ 553b6cf8b3fSJohn Ogness } 554b6cf8b3fSJohn Ogness 555b6cf8b3fSJohn Ogness /* 556f35efc78SJohn Ogness * Given the text data ring, put the associated descriptor of each 557b6cf8b3fSJohn Ogness * data block from @lpos_begin until @lpos_end into the reusable state. 558b6cf8b3fSJohn Ogness * 559b6cf8b3fSJohn Ogness * If there is any problem making the associated descriptor reusable, either 5604cfc7258SJohn Ogness * the descriptor has not yet been finalized or another writer context has 561b6cf8b3fSJohn Ogness * already pushed the tail lpos past the problematic data block. Regardless, 562b6cf8b3fSJohn Ogness * on error the caller can re-load the tail lpos to determine the situation. 563b6cf8b3fSJohn Ogness */ 564b6cf8b3fSJohn Ogness static bool data_make_reusable(struct printk_ringbuffer *rb, 565b6cf8b3fSJohn Ogness unsigned long lpos_begin, 566b6cf8b3fSJohn Ogness unsigned long lpos_end, 567b6cf8b3fSJohn Ogness unsigned long *lpos_out) 568b6cf8b3fSJohn Ogness { 569584da076SNikolay Borisov 570584da076SNikolay Borisov struct prb_data_ring *data_ring = &rb->text_data_ring; 571b6cf8b3fSJohn Ogness struct prb_desc_ring *desc_ring = &rb->desc_ring; 572b6cf8b3fSJohn Ogness struct prb_data_block *blk; 573b6cf8b3fSJohn Ogness enum desc_state d_state; 574b6cf8b3fSJohn Ogness struct prb_desc desc; 575f35efc78SJohn Ogness struct prb_data_blk_lpos *blk_lpos = &desc.text_blk_lpos; 576b6cf8b3fSJohn Ogness unsigned long id; 577b6cf8b3fSJohn Ogness 578b6cf8b3fSJohn Ogness /* Loop until @lpos_begin has advanced to or beyond @lpos_end. */ 579b6cf8b3fSJohn Ogness while ((lpos_end - lpos_begin) - 1 < DATA_SIZE(data_ring)) { 580b6cf8b3fSJohn Ogness blk = to_block(data_ring, lpos_begin); 581b6cf8b3fSJohn Ogness 582b6cf8b3fSJohn Ogness /* 583b6cf8b3fSJohn Ogness * Load the block ID from the data block. This is a data race 584b6cf8b3fSJohn Ogness * against a writer that may have newly reserved this data 585b6cf8b3fSJohn Ogness * area. If the loaded value matches a valid descriptor ID, 586b6cf8b3fSJohn Ogness * the blk_lpos of that descriptor will be checked to make 587b6cf8b3fSJohn Ogness * sure it points back to this data block. If the check fails, 588b6cf8b3fSJohn Ogness * the data area has been recycled by another writer. 589b6cf8b3fSJohn Ogness */ 590b6cf8b3fSJohn Ogness id = blk->id; /* LMM(data_make_reusable:A) */ 591b6cf8b3fSJohn Ogness 592cfe2790bSJohn Ogness d_state = desc_read(desc_ring, id, &desc, 593cfe2790bSJohn Ogness NULL, NULL); /* LMM(data_make_reusable:B) */ 594b6cf8b3fSJohn Ogness 595b6cf8b3fSJohn Ogness switch (d_state) { 596b6cf8b3fSJohn Ogness case desc_miss: 597b6cf8b3fSJohn Ogness case desc_reserved: 598b6cf8b3fSJohn Ogness case desc_committed: 5994cfc7258SJohn Ogness return false; 6004cfc7258SJohn Ogness case desc_finalized: 601b6cf8b3fSJohn Ogness /* 602b6cf8b3fSJohn Ogness * This data block is invalid if the descriptor 603b6cf8b3fSJohn Ogness * does not point back to it. 604b6cf8b3fSJohn Ogness */ 605b6cf8b3fSJohn Ogness if (blk_lpos->begin != lpos_begin) 606b6cf8b3fSJohn Ogness return false; 607b6cf8b3fSJohn Ogness desc_make_reusable(desc_ring, id); 608b6cf8b3fSJohn Ogness break; 609b6cf8b3fSJohn Ogness case desc_reusable: 610b6cf8b3fSJohn Ogness /* 611b6cf8b3fSJohn Ogness * This data block is invalid if the descriptor 612b6cf8b3fSJohn Ogness * does not point back to it. 613b6cf8b3fSJohn Ogness */ 614b6cf8b3fSJohn Ogness if (blk_lpos->begin != lpos_begin) 615b6cf8b3fSJohn Ogness return false; 616b6cf8b3fSJohn Ogness break; 617b6cf8b3fSJohn Ogness } 618b6cf8b3fSJohn Ogness 619b6cf8b3fSJohn Ogness /* Advance @lpos_begin to the next data block. */ 620b6cf8b3fSJohn Ogness lpos_begin = blk_lpos->next; 621b6cf8b3fSJohn Ogness } 622b6cf8b3fSJohn Ogness 623b6cf8b3fSJohn Ogness *lpos_out = lpos_begin; 624b6cf8b3fSJohn Ogness return true; 625b6cf8b3fSJohn Ogness } 626b6cf8b3fSJohn Ogness 627b6cf8b3fSJohn Ogness /* 628b6cf8b3fSJohn Ogness * Advance the data ring tail to at least @lpos. This function puts 629b6cf8b3fSJohn Ogness * descriptors into the reusable state if the tail is pushed beyond 630b6cf8b3fSJohn Ogness * their associated data block. 631b6cf8b3fSJohn Ogness */ 632584da076SNikolay Borisov static bool data_push_tail(struct printk_ringbuffer *rb, unsigned long lpos) 633b6cf8b3fSJohn Ogness { 634584da076SNikolay Borisov struct prb_data_ring *data_ring = &rb->text_data_ring; 635b6cf8b3fSJohn Ogness unsigned long tail_lpos_new; 636b6cf8b3fSJohn Ogness unsigned long tail_lpos; 637b6cf8b3fSJohn Ogness unsigned long next_lpos; 638b6cf8b3fSJohn Ogness 639d397820fSJohn Ogness /* If @lpos is from a data-less block, there is nothing to do. */ 640d397820fSJohn Ogness if (LPOS_DATALESS(lpos)) 641b6cf8b3fSJohn Ogness return true; 642b6cf8b3fSJohn Ogness 643b6cf8b3fSJohn Ogness /* 644b6cf8b3fSJohn Ogness * Any descriptor states that have transitioned to reusable due to the 645b6cf8b3fSJohn Ogness * data tail being pushed to this loaded value will be visible to this 646b6cf8b3fSJohn Ogness * CPU. This pairs with data_push_tail:D. 647b6cf8b3fSJohn Ogness * 648b6cf8b3fSJohn Ogness * Memory barrier involvement: 649b6cf8b3fSJohn Ogness * 650b6cf8b3fSJohn Ogness * If data_push_tail:A reads from data_push_tail:D, then this CPU can 651b6cf8b3fSJohn Ogness * see desc_make_reusable:A. 652b6cf8b3fSJohn Ogness * 653b6cf8b3fSJohn Ogness * Relies on: 654b6cf8b3fSJohn Ogness * 655b6cf8b3fSJohn Ogness * MB from desc_make_reusable:A to data_push_tail:D 656b6cf8b3fSJohn Ogness * matches 657b6cf8b3fSJohn Ogness * READFROM from data_push_tail:D to data_push_tail:A 658b6cf8b3fSJohn Ogness * thus 659b6cf8b3fSJohn Ogness * READFROM from desc_make_reusable:A to this CPU 660b6cf8b3fSJohn Ogness */ 661b6cf8b3fSJohn Ogness tail_lpos = atomic_long_read(&data_ring->tail_lpos); /* LMM(data_push_tail:A) */ 662b6cf8b3fSJohn Ogness 663b6cf8b3fSJohn Ogness /* 664b6cf8b3fSJohn Ogness * Loop until the tail lpos is at or beyond @lpos. This condition 665b6cf8b3fSJohn Ogness * may already be satisfied, resulting in no full memory barrier 666b6cf8b3fSJohn Ogness * from data_push_tail:D being performed. However, since this CPU 667b6cf8b3fSJohn Ogness * sees the new tail lpos, any descriptor states that transitioned to 668b6cf8b3fSJohn Ogness * the reusable state must already be visible. 669b6cf8b3fSJohn Ogness */ 670b6cf8b3fSJohn Ogness while ((lpos - tail_lpos) - 1 < DATA_SIZE(data_ring)) { 671b6cf8b3fSJohn Ogness /* 672b6cf8b3fSJohn Ogness * Make all descriptors reusable that are associated with 673b6cf8b3fSJohn Ogness * data blocks before @lpos. 674b6cf8b3fSJohn Ogness */ 675584da076SNikolay Borisov if (!data_make_reusable(rb, tail_lpos, lpos, &next_lpos)) { 676b6cf8b3fSJohn Ogness /* 677b6cf8b3fSJohn Ogness * 1. Guarantee the block ID loaded in 678b6cf8b3fSJohn Ogness * data_make_reusable() is performed before 679b6cf8b3fSJohn Ogness * reloading the tail lpos. The failed 680b6cf8b3fSJohn Ogness * data_make_reusable() may be due to a newly 681b6cf8b3fSJohn Ogness * recycled data area causing the tail lpos to 682b6cf8b3fSJohn Ogness * have been previously pushed. This pairs with 6834cfc7258SJohn Ogness * data_alloc:A and data_realloc:A. 684b6cf8b3fSJohn Ogness * 685b6cf8b3fSJohn Ogness * Memory barrier involvement: 686b6cf8b3fSJohn Ogness * 687b6cf8b3fSJohn Ogness * If data_make_reusable:A reads from data_alloc:B, 688b6cf8b3fSJohn Ogness * then data_push_tail:C reads from 689b6cf8b3fSJohn Ogness * data_push_tail:D. 690b6cf8b3fSJohn Ogness * 691b6cf8b3fSJohn Ogness * Relies on: 692b6cf8b3fSJohn Ogness * 693b6cf8b3fSJohn Ogness * MB from data_push_tail:D to data_alloc:B 694b6cf8b3fSJohn Ogness * matching 695b6cf8b3fSJohn Ogness * RMB from data_make_reusable:A to 696b6cf8b3fSJohn Ogness * data_push_tail:C 697b6cf8b3fSJohn Ogness * 698b6cf8b3fSJohn Ogness * Note: data_push_tail:D and data_alloc:B can be 699b6cf8b3fSJohn Ogness * different CPUs. However, the data_alloc:B 700b6cf8b3fSJohn Ogness * CPU (which performs the full memory 701b6cf8b3fSJohn Ogness * barrier) must have previously seen 702b6cf8b3fSJohn Ogness * data_push_tail:D. 703b6cf8b3fSJohn Ogness * 704b6cf8b3fSJohn Ogness * 2. Guarantee the descriptor state loaded in 705b6cf8b3fSJohn Ogness * data_make_reusable() is performed before 706b6cf8b3fSJohn Ogness * reloading the tail lpos. The failed 707b6cf8b3fSJohn Ogness * data_make_reusable() may be due to a newly 708b6cf8b3fSJohn Ogness * recycled descriptor causing the tail lpos to 709b6cf8b3fSJohn Ogness * have been previously pushed. This pairs with 710b6cf8b3fSJohn Ogness * desc_reserve:D. 711b6cf8b3fSJohn Ogness * 712b6cf8b3fSJohn Ogness * Memory barrier involvement: 713b6cf8b3fSJohn Ogness * 714b6cf8b3fSJohn Ogness * If data_make_reusable:B reads from 715b6cf8b3fSJohn Ogness * desc_reserve:F, then data_push_tail:C reads 716b6cf8b3fSJohn Ogness * from data_push_tail:D. 717b6cf8b3fSJohn Ogness * 718b6cf8b3fSJohn Ogness * Relies on: 719b6cf8b3fSJohn Ogness * 720b6cf8b3fSJohn Ogness * MB from data_push_tail:D to desc_reserve:F 721b6cf8b3fSJohn Ogness * matching 722b6cf8b3fSJohn Ogness * RMB from data_make_reusable:B to 723b6cf8b3fSJohn Ogness * data_push_tail:C 724b6cf8b3fSJohn Ogness * 725b6cf8b3fSJohn Ogness * Note: data_push_tail:D and desc_reserve:F can 726b6cf8b3fSJohn Ogness * be different CPUs. However, the 727b6cf8b3fSJohn Ogness * desc_reserve:F CPU (which performs the 728b6cf8b3fSJohn Ogness * full memory barrier) must have previously 729b6cf8b3fSJohn Ogness * seen data_push_tail:D. 730b6cf8b3fSJohn Ogness */ 731b6cf8b3fSJohn Ogness smp_rmb(); /* LMM(data_push_tail:B) */ 732b6cf8b3fSJohn Ogness 733b6cf8b3fSJohn Ogness tail_lpos_new = atomic_long_read(&data_ring->tail_lpos 734b6cf8b3fSJohn Ogness ); /* LMM(data_push_tail:C) */ 735b6cf8b3fSJohn Ogness if (tail_lpos_new == tail_lpos) 736b6cf8b3fSJohn Ogness return false; 737b6cf8b3fSJohn Ogness 738b6cf8b3fSJohn Ogness /* Another CPU pushed the tail. Try again. */ 739b6cf8b3fSJohn Ogness tail_lpos = tail_lpos_new; 740b6cf8b3fSJohn Ogness continue; 741b6cf8b3fSJohn Ogness } 742b6cf8b3fSJohn Ogness 743b6cf8b3fSJohn Ogness /* 744b6cf8b3fSJohn Ogness * Guarantee any descriptor states that have transitioned to 745b6cf8b3fSJohn Ogness * reusable are stored before pushing the tail lpos. A full 746b6cf8b3fSJohn Ogness * memory barrier is needed since other CPUs may have made 747b6cf8b3fSJohn Ogness * the descriptor states reusable. This pairs with 748b6cf8b3fSJohn Ogness * data_push_tail:A. 749b6cf8b3fSJohn Ogness */ 750b6cf8b3fSJohn Ogness if (atomic_long_try_cmpxchg(&data_ring->tail_lpos, &tail_lpos, 751b6cf8b3fSJohn Ogness next_lpos)) { /* LMM(data_push_tail:D) */ 752b6cf8b3fSJohn Ogness break; 753b6cf8b3fSJohn Ogness } 754b6cf8b3fSJohn Ogness } 755b6cf8b3fSJohn Ogness 756b6cf8b3fSJohn Ogness return true; 757b6cf8b3fSJohn Ogness } 758b6cf8b3fSJohn Ogness 759b6cf8b3fSJohn Ogness /* 760b6cf8b3fSJohn Ogness * Advance the desc ring tail. This function advances the tail by one 761b6cf8b3fSJohn Ogness * descriptor, thus invalidating the oldest descriptor. Before advancing 762b6cf8b3fSJohn Ogness * the tail, the tail descriptor is made reusable and all data blocks up to 763b6cf8b3fSJohn Ogness * and including the descriptor's data block are invalidated (i.e. the data 764b6cf8b3fSJohn Ogness * ring tail is pushed past the data block of the descriptor being made 765b6cf8b3fSJohn Ogness * reusable). 766b6cf8b3fSJohn Ogness */ 767b6cf8b3fSJohn Ogness static bool desc_push_tail(struct printk_ringbuffer *rb, 768b6cf8b3fSJohn Ogness unsigned long tail_id) 769b6cf8b3fSJohn Ogness { 770b6cf8b3fSJohn Ogness struct prb_desc_ring *desc_ring = &rb->desc_ring; 771b6cf8b3fSJohn Ogness enum desc_state d_state; 772b6cf8b3fSJohn Ogness struct prb_desc desc; 773b6cf8b3fSJohn Ogness 774cfe2790bSJohn Ogness d_state = desc_read(desc_ring, tail_id, &desc, NULL, NULL); 775b6cf8b3fSJohn Ogness 776b6cf8b3fSJohn Ogness switch (d_state) { 777b6cf8b3fSJohn Ogness case desc_miss: 778b6cf8b3fSJohn Ogness /* 779b6cf8b3fSJohn Ogness * If the ID is exactly 1 wrap behind the expected, it is 780b6cf8b3fSJohn Ogness * in the process of being reserved by another writer and 781b6cf8b3fSJohn Ogness * must be considered reserved. 782b6cf8b3fSJohn Ogness */ 783b6cf8b3fSJohn Ogness if (DESC_ID(atomic_long_read(&desc.state_var)) == 784b6cf8b3fSJohn Ogness DESC_ID_PREV_WRAP(desc_ring, tail_id)) { 785b6cf8b3fSJohn Ogness return false; 786b6cf8b3fSJohn Ogness } 787b6cf8b3fSJohn Ogness 788b6cf8b3fSJohn Ogness /* 789b6cf8b3fSJohn Ogness * The ID has changed. Another writer must have pushed the 790b6cf8b3fSJohn Ogness * tail and recycled the descriptor already. Success is 791b6cf8b3fSJohn Ogness * returned because the caller is only interested in the 792b6cf8b3fSJohn Ogness * specified tail being pushed, which it was. 793b6cf8b3fSJohn Ogness */ 794b6cf8b3fSJohn Ogness return true; 795b6cf8b3fSJohn Ogness case desc_reserved: 796b6cf8b3fSJohn Ogness case desc_committed: 7974cfc7258SJohn Ogness return false; 7984cfc7258SJohn Ogness case desc_finalized: 799b6cf8b3fSJohn Ogness desc_make_reusable(desc_ring, tail_id); 800b6cf8b3fSJohn Ogness break; 801b6cf8b3fSJohn Ogness case desc_reusable: 802b6cf8b3fSJohn Ogness break; 803b6cf8b3fSJohn Ogness } 804b6cf8b3fSJohn Ogness 805b6cf8b3fSJohn Ogness /* 806b6cf8b3fSJohn Ogness * Data blocks must be invalidated before their associated 807b6cf8b3fSJohn Ogness * descriptor can be made available for recycling. Invalidating 808b6cf8b3fSJohn Ogness * them later is not possible because there is no way to trust 809b6cf8b3fSJohn Ogness * data blocks once their associated descriptor is gone. 810b6cf8b3fSJohn Ogness */ 811b6cf8b3fSJohn Ogness 812584da076SNikolay Borisov if (!data_push_tail(rb, desc.text_blk_lpos.next)) 813b6cf8b3fSJohn Ogness return false; 814b6cf8b3fSJohn Ogness 815b6cf8b3fSJohn Ogness /* 816b6cf8b3fSJohn Ogness * Check the next descriptor after @tail_id before pushing the tail 8174cfc7258SJohn Ogness * to it because the tail must always be in a finalized or reusable 818b6cf8b3fSJohn Ogness * state. The implementation of prb_first_seq() relies on this. 819b6cf8b3fSJohn Ogness * 820b6cf8b3fSJohn Ogness * A successful read implies that the next descriptor is less than or 821b6cf8b3fSJohn Ogness * equal to @head_id so there is no risk of pushing the tail past the 822b6cf8b3fSJohn Ogness * head. 823b6cf8b3fSJohn Ogness */ 824cfe2790bSJohn Ogness d_state = desc_read(desc_ring, DESC_ID(tail_id + 1), &desc, 825cfe2790bSJohn Ogness NULL, NULL); /* LMM(desc_push_tail:A) */ 826b6cf8b3fSJohn Ogness 8274cfc7258SJohn Ogness if (d_state == desc_finalized || d_state == desc_reusable) { 828b6cf8b3fSJohn Ogness /* 829b6cf8b3fSJohn Ogness * Guarantee any descriptor states that have transitioned to 830b6cf8b3fSJohn Ogness * reusable are stored before pushing the tail ID. This allows 831b6cf8b3fSJohn Ogness * verifying the recycled descriptor state. A full memory 832b6cf8b3fSJohn Ogness * barrier is needed since other CPUs may have made the 833b6cf8b3fSJohn Ogness * descriptor states reusable. This pairs with desc_reserve:D. 834b6cf8b3fSJohn Ogness */ 835b6cf8b3fSJohn Ogness atomic_long_cmpxchg(&desc_ring->tail_id, tail_id, 836b6cf8b3fSJohn Ogness DESC_ID(tail_id + 1)); /* LMM(desc_push_tail:B) */ 837b6cf8b3fSJohn Ogness } else { 838b6cf8b3fSJohn Ogness /* 839b6cf8b3fSJohn Ogness * Guarantee the last state load from desc_read() is before 840b6cf8b3fSJohn Ogness * reloading @tail_id in order to see a new tail ID in the 841b6cf8b3fSJohn Ogness * case that the descriptor has been recycled. This pairs 842b6cf8b3fSJohn Ogness * with desc_reserve:D. 843b6cf8b3fSJohn Ogness * 844b6cf8b3fSJohn Ogness * Memory barrier involvement: 845b6cf8b3fSJohn Ogness * 846b6cf8b3fSJohn Ogness * If desc_push_tail:A reads from desc_reserve:F, then 847b6cf8b3fSJohn Ogness * desc_push_tail:D reads from desc_push_tail:B. 848b6cf8b3fSJohn Ogness * 849b6cf8b3fSJohn Ogness * Relies on: 850b6cf8b3fSJohn Ogness * 851b6cf8b3fSJohn Ogness * MB from desc_push_tail:B to desc_reserve:F 852b6cf8b3fSJohn Ogness * matching 853b6cf8b3fSJohn Ogness * RMB from desc_push_tail:A to desc_push_tail:D 854b6cf8b3fSJohn Ogness * 855b6cf8b3fSJohn Ogness * Note: desc_push_tail:B and desc_reserve:F can be different 856b6cf8b3fSJohn Ogness * CPUs. However, the desc_reserve:F CPU (which performs 857b6cf8b3fSJohn Ogness * the full memory barrier) must have previously seen 858b6cf8b3fSJohn Ogness * desc_push_tail:B. 859b6cf8b3fSJohn Ogness */ 860b6cf8b3fSJohn Ogness smp_rmb(); /* LMM(desc_push_tail:C) */ 861b6cf8b3fSJohn Ogness 862b6cf8b3fSJohn Ogness /* 863b6cf8b3fSJohn Ogness * Re-check the tail ID. The descriptor following @tail_id is 864b6cf8b3fSJohn Ogness * not in an allowed tail state. But if the tail has since 865b6cf8b3fSJohn Ogness * been moved by another CPU, then it does not matter. 866b6cf8b3fSJohn Ogness */ 867b6cf8b3fSJohn Ogness if (atomic_long_read(&desc_ring->tail_id) == tail_id) /* LMM(desc_push_tail:D) */ 868b6cf8b3fSJohn Ogness return false; 869b6cf8b3fSJohn Ogness } 870b6cf8b3fSJohn Ogness 871b6cf8b3fSJohn Ogness return true; 872b6cf8b3fSJohn Ogness } 873b6cf8b3fSJohn Ogness 874b6cf8b3fSJohn Ogness /* Reserve a new descriptor, invalidating the oldest if necessary. */ 875b6cf8b3fSJohn Ogness static bool desc_reserve(struct printk_ringbuffer *rb, unsigned long *id_out) 876b6cf8b3fSJohn Ogness { 877b6cf8b3fSJohn Ogness struct prb_desc_ring *desc_ring = &rb->desc_ring; 878b6cf8b3fSJohn Ogness unsigned long prev_state_val; 879b6cf8b3fSJohn Ogness unsigned long id_prev_wrap; 880b6cf8b3fSJohn Ogness struct prb_desc *desc; 881b6cf8b3fSJohn Ogness unsigned long head_id; 882b6cf8b3fSJohn Ogness unsigned long id; 883b6cf8b3fSJohn Ogness 884b6cf8b3fSJohn Ogness head_id = atomic_long_read(&desc_ring->head_id); /* LMM(desc_reserve:A) */ 885b6cf8b3fSJohn Ogness 886b6cf8b3fSJohn Ogness do { 887b6cf8b3fSJohn Ogness id = DESC_ID(head_id + 1); 888b6cf8b3fSJohn Ogness id_prev_wrap = DESC_ID_PREV_WRAP(desc_ring, id); 889b6cf8b3fSJohn Ogness 890b6cf8b3fSJohn Ogness /* 891b6cf8b3fSJohn Ogness * Guarantee the head ID is read before reading the tail ID. 892b6cf8b3fSJohn Ogness * Since the tail ID is updated before the head ID, this 893b6cf8b3fSJohn Ogness * guarantees that @id_prev_wrap is never ahead of the tail 894b6cf8b3fSJohn Ogness * ID. This pairs with desc_reserve:D. 895b6cf8b3fSJohn Ogness * 896b6cf8b3fSJohn Ogness * Memory barrier involvement: 897b6cf8b3fSJohn Ogness * 898b6cf8b3fSJohn Ogness * If desc_reserve:A reads from desc_reserve:D, then 899b6cf8b3fSJohn Ogness * desc_reserve:C reads from desc_push_tail:B. 900b6cf8b3fSJohn Ogness * 901b6cf8b3fSJohn Ogness * Relies on: 902b6cf8b3fSJohn Ogness * 903b6cf8b3fSJohn Ogness * MB from desc_push_tail:B to desc_reserve:D 904b6cf8b3fSJohn Ogness * matching 905b6cf8b3fSJohn Ogness * RMB from desc_reserve:A to desc_reserve:C 906b6cf8b3fSJohn Ogness * 907b6cf8b3fSJohn Ogness * Note: desc_push_tail:B and desc_reserve:D can be different 908b6cf8b3fSJohn Ogness * CPUs. However, the desc_reserve:D CPU (which performs 909b6cf8b3fSJohn Ogness * the full memory barrier) must have previously seen 910b6cf8b3fSJohn Ogness * desc_push_tail:B. 911b6cf8b3fSJohn Ogness */ 912b6cf8b3fSJohn Ogness smp_rmb(); /* LMM(desc_reserve:B) */ 913b6cf8b3fSJohn Ogness 914b6cf8b3fSJohn Ogness if (id_prev_wrap == atomic_long_read(&desc_ring->tail_id 915b6cf8b3fSJohn Ogness )) { /* LMM(desc_reserve:C) */ 916b6cf8b3fSJohn Ogness /* 917b6cf8b3fSJohn Ogness * Make space for the new descriptor by 918b6cf8b3fSJohn Ogness * advancing the tail. 919b6cf8b3fSJohn Ogness */ 920b6cf8b3fSJohn Ogness if (!desc_push_tail(rb, id_prev_wrap)) 921b6cf8b3fSJohn Ogness return false; 922b6cf8b3fSJohn Ogness } 923b6cf8b3fSJohn Ogness 924b6cf8b3fSJohn Ogness /* 925b6cf8b3fSJohn Ogness * 1. Guarantee the tail ID is read before validating the 926b6cf8b3fSJohn Ogness * recycled descriptor state. A read memory barrier is 927b6cf8b3fSJohn Ogness * sufficient for this. This pairs with desc_push_tail:B. 928b6cf8b3fSJohn Ogness * 929b6cf8b3fSJohn Ogness * Memory barrier involvement: 930b6cf8b3fSJohn Ogness * 931b6cf8b3fSJohn Ogness * If desc_reserve:C reads from desc_push_tail:B, then 932b6cf8b3fSJohn Ogness * desc_reserve:E reads from desc_make_reusable:A. 933b6cf8b3fSJohn Ogness * 934b6cf8b3fSJohn Ogness * Relies on: 935b6cf8b3fSJohn Ogness * 936b6cf8b3fSJohn Ogness * MB from desc_make_reusable:A to desc_push_tail:B 937b6cf8b3fSJohn Ogness * matching 938b6cf8b3fSJohn Ogness * RMB from desc_reserve:C to desc_reserve:E 939b6cf8b3fSJohn Ogness * 940b6cf8b3fSJohn Ogness * Note: desc_make_reusable:A and desc_push_tail:B can be 941b6cf8b3fSJohn Ogness * different CPUs. However, the desc_push_tail:B CPU 942b6cf8b3fSJohn Ogness * (which performs the full memory barrier) must have 943b6cf8b3fSJohn Ogness * previously seen desc_make_reusable:A. 944b6cf8b3fSJohn Ogness * 945b6cf8b3fSJohn Ogness * 2. Guarantee the tail ID is stored before storing the head 946b6cf8b3fSJohn Ogness * ID. This pairs with desc_reserve:B. 947b6cf8b3fSJohn Ogness * 948b6cf8b3fSJohn Ogness * 3. Guarantee any data ring tail changes are stored before 949b6cf8b3fSJohn Ogness * recycling the descriptor. Data ring tail changes can 950b6cf8b3fSJohn Ogness * happen via desc_push_tail()->data_push_tail(). A full 951b6cf8b3fSJohn Ogness * memory barrier is needed since another CPU may have 952b6cf8b3fSJohn Ogness * pushed the data ring tails. This pairs with 953b6cf8b3fSJohn Ogness * data_push_tail:B. 954b6cf8b3fSJohn Ogness * 955b6cf8b3fSJohn Ogness * 4. Guarantee a new tail ID is stored before recycling the 956b6cf8b3fSJohn Ogness * descriptor. A full memory barrier is needed since 957b6cf8b3fSJohn Ogness * another CPU may have pushed the tail ID. This pairs 958b6cf8b3fSJohn Ogness * with desc_push_tail:C and this also pairs with 959b6cf8b3fSJohn Ogness * prb_first_seq:C. 9604cfc7258SJohn Ogness * 9614cfc7258SJohn Ogness * 5. Guarantee the head ID is stored before trying to 9624cfc7258SJohn Ogness * finalize the previous descriptor. This pairs with 9634cfc7258SJohn Ogness * _prb_commit:B. 964b6cf8b3fSJohn Ogness */ 965b6cf8b3fSJohn Ogness } while (!atomic_long_try_cmpxchg(&desc_ring->head_id, &head_id, 966b6cf8b3fSJohn Ogness id)); /* LMM(desc_reserve:D) */ 967b6cf8b3fSJohn Ogness 968b6cf8b3fSJohn Ogness desc = to_desc(desc_ring, id); 969b6cf8b3fSJohn Ogness 970b6cf8b3fSJohn Ogness /* 971b6cf8b3fSJohn Ogness * If the descriptor has been recycled, verify the old state val. 972b6cf8b3fSJohn Ogness * See "ABA Issues" about why this verification is performed. 973b6cf8b3fSJohn Ogness */ 974b6cf8b3fSJohn Ogness prev_state_val = atomic_long_read(&desc->state_var); /* LMM(desc_reserve:E) */ 975b6cf8b3fSJohn Ogness if (prev_state_val && 97610dcb06dSJohn Ogness get_desc_state(id_prev_wrap, prev_state_val) != desc_reusable) { 977b6cf8b3fSJohn Ogness WARN_ON_ONCE(1); 978b6cf8b3fSJohn Ogness return false; 979b6cf8b3fSJohn Ogness } 980b6cf8b3fSJohn Ogness 981b6cf8b3fSJohn Ogness /* 982b6cf8b3fSJohn Ogness * Assign the descriptor a new ID and set its state to reserved. 983b6cf8b3fSJohn Ogness * See "ABA Issues" about why cmpxchg() instead of set() is used. 984b6cf8b3fSJohn Ogness * 985b6cf8b3fSJohn Ogness * Guarantee the new descriptor ID and state is stored before making 986b6cf8b3fSJohn Ogness * any other changes. A write memory barrier is sufficient for this. 987b6cf8b3fSJohn Ogness * This pairs with desc_read:D. 988b6cf8b3fSJohn Ogness */ 989b6cf8b3fSJohn Ogness if (!atomic_long_try_cmpxchg(&desc->state_var, &prev_state_val, 99010dcb06dSJohn Ogness DESC_SV(id, desc_reserved))) { /* LMM(desc_reserve:F) */ 991b6cf8b3fSJohn Ogness WARN_ON_ONCE(1); 992b6cf8b3fSJohn Ogness return false; 993b6cf8b3fSJohn Ogness } 994b6cf8b3fSJohn Ogness 995b6cf8b3fSJohn Ogness /* Now data in @desc can be modified: LMM(desc_reserve:G) */ 996b6cf8b3fSJohn Ogness 997b6cf8b3fSJohn Ogness *id_out = id; 998b6cf8b3fSJohn Ogness return true; 999b6cf8b3fSJohn Ogness } 1000b6cf8b3fSJohn Ogness 1001b6cf8b3fSJohn Ogness /* Determine the end of a data block. */ 1002b6cf8b3fSJohn Ogness static unsigned long get_next_lpos(struct prb_data_ring *data_ring, 1003b6cf8b3fSJohn Ogness unsigned long lpos, unsigned int size) 1004b6cf8b3fSJohn Ogness { 1005b6cf8b3fSJohn Ogness unsigned long begin_lpos; 1006b6cf8b3fSJohn Ogness unsigned long next_lpos; 1007b6cf8b3fSJohn Ogness 1008b6cf8b3fSJohn Ogness begin_lpos = lpos; 1009b6cf8b3fSJohn Ogness next_lpos = lpos + size; 1010b6cf8b3fSJohn Ogness 1011b6cf8b3fSJohn Ogness /* First check if the data block does not wrap. */ 1012b6cf8b3fSJohn Ogness if (DATA_WRAPS(data_ring, begin_lpos) == DATA_WRAPS(data_ring, next_lpos)) 1013b6cf8b3fSJohn Ogness return next_lpos; 1014b6cf8b3fSJohn Ogness 1015b6cf8b3fSJohn Ogness /* Wrapping data blocks store their data at the beginning. */ 1016b6cf8b3fSJohn Ogness return (DATA_THIS_WRAP_START_LPOS(data_ring, next_lpos) + size); 1017b6cf8b3fSJohn Ogness } 1018b6cf8b3fSJohn Ogness 1019b6cf8b3fSJohn Ogness /* 1020b6cf8b3fSJohn Ogness * Allocate a new data block, invalidating the oldest data block(s) 1021b6cf8b3fSJohn Ogness * if necessary. This function also associates the data block with 1022b6cf8b3fSJohn Ogness * a specified descriptor. 1023b6cf8b3fSJohn Ogness */ 1024584da076SNikolay Borisov static char *data_alloc(struct printk_ringbuffer *rb, unsigned int size, 1025b6cf8b3fSJohn Ogness struct prb_data_blk_lpos *blk_lpos, unsigned long id) 1026b6cf8b3fSJohn Ogness { 1027584da076SNikolay Borisov struct prb_data_ring *data_ring = &rb->text_data_ring; 1028b6cf8b3fSJohn Ogness struct prb_data_block *blk; 1029b6cf8b3fSJohn Ogness unsigned long begin_lpos; 1030b6cf8b3fSJohn Ogness unsigned long next_lpos; 1031b6cf8b3fSJohn Ogness 1032b6cf8b3fSJohn Ogness if (size == 0) { 1033b6cf8b3fSJohn Ogness /* Specify a data-less block. */ 1034d397820fSJohn Ogness blk_lpos->begin = NO_LPOS; 1035d397820fSJohn Ogness blk_lpos->next = NO_LPOS; 1036b6cf8b3fSJohn Ogness return NULL; 1037b6cf8b3fSJohn Ogness } 1038b6cf8b3fSJohn Ogness 1039b6cf8b3fSJohn Ogness size = to_blk_size(size); 1040b6cf8b3fSJohn Ogness 1041b6cf8b3fSJohn Ogness begin_lpos = atomic_long_read(&data_ring->head_lpos); 1042b6cf8b3fSJohn Ogness 1043b6cf8b3fSJohn Ogness do { 1044b6cf8b3fSJohn Ogness next_lpos = get_next_lpos(data_ring, begin_lpos, size); 1045b6cf8b3fSJohn Ogness 1046584da076SNikolay Borisov if (!data_push_tail(rb, next_lpos - DATA_SIZE(data_ring))) { 1047b6cf8b3fSJohn Ogness /* Failed to allocate, specify a data-less block. */ 1048d397820fSJohn Ogness blk_lpos->begin = FAILED_LPOS; 1049d397820fSJohn Ogness blk_lpos->next = FAILED_LPOS; 1050b6cf8b3fSJohn Ogness return NULL; 1051b6cf8b3fSJohn Ogness } 1052b6cf8b3fSJohn Ogness 1053b6cf8b3fSJohn Ogness /* 1054b6cf8b3fSJohn Ogness * 1. Guarantee any descriptor states that have transitioned 1055b6cf8b3fSJohn Ogness * to reusable are stored before modifying the newly 1056b6cf8b3fSJohn Ogness * allocated data area. A full memory barrier is needed 1057b6cf8b3fSJohn Ogness * since other CPUs may have made the descriptor states 1058b6cf8b3fSJohn Ogness * reusable. See data_push_tail:A about why the reusable 1059b6cf8b3fSJohn Ogness * states are visible. This pairs with desc_read:D. 1060b6cf8b3fSJohn Ogness * 1061b6cf8b3fSJohn Ogness * 2. Guarantee any updated tail lpos is stored before 1062b6cf8b3fSJohn Ogness * modifying the newly allocated data area. Another CPU may 1063b6cf8b3fSJohn Ogness * be in data_make_reusable() and is reading a block ID 1064b6cf8b3fSJohn Ogness * from this area. data_make_reusable() can handle reading 1065b6cf8b3fSJohn Ogness * a garbage block ID value, but then it must be able to 1066b6cf8b3fSJohn Ogness * load a new tail lpos. A full memory barrier is needed 1067b6cf8b3fSJohn Ogness * since other CPUs may have updated the tail lpos. This 1068b6cf8b3fSJohn Ogness * pairs with data_push_tail:B. 1069b6cf8b3fSJohn Ogness */ 1070b6cf8b3fSJohn Ogness } while (!atomic_long_try_cmpxchg(&data_ring->head_lpos, &begin_lpos, 1071b6cf8b3fSJohn Ogness next_lpos)); /* LMM(data_alloc:A) */ 1072b6cf8b3fSJohn Ogness 1073b6cf8b3fSJohn Ogness blk = to_block(data_ring, begin_lpos); 1074b6cf8b3fSJohn Ogness blk->id = id; /* LMM(data_alloc:B) */ 1075b6cf8b3fSJohn Ogness 1076b6cf8b3fSJohn Ogness if (DATA_WRAPS(data_ring, begin_lpos) != DATA_WRAPS(data_ring, next_lpos)) { 1077b6cf8b3fSJohn Ogness /* Wrapping data blocks store their data at the beginning. */ 1078b6cf8b3fSJohn Ogness blk = to_block(data_ring, 0); 1079b6cf8b3fSJohn Ogness 1080b6cf8b3fSJohn Ogness /* 1081b6cf8b3fSJohn Ogness * Store the ID on the wrapped block for consistency. 1082b6cf8b3fSJohn Ogness * The printk_ringbuffer does not actually use it. 1083b6cf8b3fSJohn Ogness */ 1084b6cf8b3fSJohn Ogness blk->id = id; 1085b6cf8b3fSJohn Ogness } 1086b6cf8b3fSJohn Ogness 1087b6cf8b3fSJohn Ogness blk_lpos->begin = begin_lpos; 1088b6cf8b3fSJohn Ogness blk_lpos->next = next_lpos; 1089b6cf8b3fSJohn Ogness 1090b6cf8b3fSJohn Ogness return &blk->data[0]; 1091b6cf8b3fSJohn Ogness } 1092b6cf8b3fSJohn Ogness 10934cfc7258SJohn Ogness /* 10944cfc7258SJohn Ogness * Try to resize an existing data block associated with the descriptor 10954cfc7258SJohn Ogness * specified by @id. If the resized data block should become wrapped, it 10964cfc7258SJohn Ogness * copies the old data to the new data block. If @size yields a data block 10974cfc7258SJohn Ogness * with the same or less size, the data block is left as is. 10984cfc7258SJohn Ogness * 10994cfc7258SJohn Ogness * Fail if this is not the last allocated data block or if there is not 11004cfc7258SJohn Ogness * enough space or it is not possible make enough space. 11014cfc7258SJohn Ogness * 11024cfc7258SJohn Ogness * Return a pointer to the beginning of the entire data buffer or NULL on 11034cfc7258SJohn Ogness * failure. 11044cfc7258SJohn Ogness */ 1105584da076SNikolay Borisov static char *data_realloc(struct printk_ringbuffer *rb, unsigned int size, 11064cfc7258SJohn Ogness struct prb_data_blk_lpos *blk_lpos, unsigned long id) 11074cfc7258SJohn Ogness { 1108584da076SNikolay Borisov struct prb_data_ring *data_ring = &rb->text_data_ring; 11094cfc7258SJohn Ogness struct prb_data_block *blk; 11104cfc7258SJohn Ogness unsigned long head_lpos; 11114cfc7258SJohn Ogness unsigned long next_lpos; 11124cfc7258SJohn Ogness bool wrapped; 11134cfc7258SJohn Ogness 11144cfc7258SJohn Ogness /* Reallocation only works if @blk_lpos is the newest data block. */ 11154cfc7258SJohn Ogness head_lpos = atomic_long_read(&data_ring->head_lpos); 11164cfc7258SJohn Ogness if (head_lpos != blk_lpos->next) 11174cfc7258SJohn Ogness return NULL; 11184cfc7258SJohn Ogness 11194cfc7258SJohn Ogness /* Keep track if @blk_lpos was a wrapping data block. */ 11204cfc7258SJohn Ogness wrapped = (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, blk_lpos->next)); 11214cfc7258SJohn Ogness 11224cfc7258SJohn Ogness size = to_blk_size(size); 11234cfc7258SJohn Ogness 11244cfc7258SJohn Ogness next_lpos = get_next_lpos(data_ring, blk_lpos->begin, size); 11254cfc7258SJohn Ogness 11264cfc7258SJohn Ogness /* If the data block does not increase, there is nothing to do. */ 11274cfc7258SJohn Ogness if (head_lpos - next_lpos < DATA_SIZE(data_ring)) { 1128eac48eb6SPetr Mladek if (wrapped) 1129eac48eb6SPetr Mladek blk = to_block(data_ring, 0); 1130eac48eb6SPetr Mladek else 11314cfc7258SJohn Ogness blk = to_block(data_ring, blk_lpos->begin); 11324cfc7258SJohn Ogness return &blk->data[0]; 11334cfc7258SJohn Ogness } 11344cfc7258SJohn Ogness 1135584da076SNikolay Borisov if (!data_push_tail(rb, next_lpos - DATA_SIZE(data_ring))) 11364cfc7258SJohn Ogness return NULL; 11374cfc7258SJohn Ogness 11384cfc7258SJohn Ogness /* The memory barrier involvement is the same as data_alloc:A. */ 11394cfc7258SJohn Ogness if (!atomic_long_try_cmpxchg(&data_ring->head_lpos, &head_lpos, 11404cfc7258SJohn Ogness next_lpos)) { /* LMM(data_realloc:A) */ 11414cfc7258SJohn Ogness return NULL; 11424cfc7258SJohn Ogness } 11434cfc7258SJohn Ogness 11444cfc7258SJohn Ogness blk = to_block(data_ring, blk_lpos->begin); 11454cfc7258SJohn Ogness 11464cfc7258SJohn Ogness if (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, next_lpos)) { 11474cfc7258SJohn Ogness struct prb_data_block *old_blk = blk; 11484cfc7258SJohn Ogness 11494cfc7258SJohn Ogness /* Wrapping data blocks store their data at the beginning. */ 11504cfc7258SJohn Ogness blk = to_block(data_ring, 0); 11514cfc7258SJohn Ogness 11524cfc7258SJohn Ogness /* 11534cfc7258SJohn Ogness * Store the ID on the wrapped block for consistency. 11544cfc7258SJohn Ogness * The printk_ringbuffer does not actually use it. 11554cfc7258SJohn Ogness */ 11564cfc7258SJohn Ogness blk->id = id; 11574cfc7258SJohn Ogness 11584cfc7258SJohn Ogness if (!wrapped) { 11594cfc7258SJohn Ogness /* 11604cfc7258SJohn Ogness * Since the allocated space is now in the newly 11614cfc7258SJohn Ogness * created wrapping data block, copy the content 11624cfc7258SJohn Ogness * from the old data block. 11634cfc7258SJohn Ogness */ 11644cfc7258SJohn Ogness memcpy(&blk->data[0], &old_blk->data[0], 11654cfc7258SJohn Ogness (blk_lpos->next - blk_lpos->begin) - sizeof(blk->id)); 11664cfc7258SJohn Ogness } 11674cfc7258SJohn Ogness } 11684cfc7258SJohn Ogness 11694cfc7258SJohn Ogness blk_lpos->next = next_lpos; 11704cfc7258SJohn Ogness 11714cfc7258SJohn Ogness return &blk->data[0]; 11724cfc7258SJohn Ogness } 11734cfc7258SJohn Ogness 1174b6cf8b3fSJohn Ogness /* Return the number of bytes used by a data block. */ 1175b6cf8b3fSJohn Ogness static unsigned int space_used(struct prb_data_ring *data_ring, 1176b6cf8b3fSJohn Ogness struct prb_data_blk_lpos *blk_lpos) 1177b6cf8b3fSJohn Ogness { 1178d397820fSJohn Ogness /* Data-less blocks take no space. */ 1179e3bc0401SJohn Ogness if (BLK_DATALESS(blk_lpos)) 1180d397820fSJohn Ogness return 0; 1181d397820fSJohn Ogness 1182b6cf8b3fSJohn Ogness if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next)) { 1183b6cf8b3fSJohn Ogness /* Data block does not wrap. */ 1184b6cf8b3fSJohn Ogness return (DATA_INDEX(data_ring, blk_lpos->next) - 1185b6cf8b3fSJohn Ogness DATA_INDEX(data_ring, blk_lpos->begin)); 1186b6cf8b3fSJohn Ogness } 1187b6cf8b3fSJohn Ogness 1188b6cf8b3fSJohn Ogness /* 1189b6cf8b3fSJohn Ogness * For wrapping data blocks, the trailing (wasted) space is 1190b6cf8b3fSJohn Ogness * also counted. 1191b6cf8b3fSJohn Ogness */ 1192b6cf8b3fSJohn Ogness return (DATA_INDEX(data_ring, blk_lpos->next) + 1193b6cf8b3fSJohn Ogness DATA_SIZE(data_ring) - DATA_INDEX(data_ring, blk_lpos->begin)); 1194b6cf8b3fSJohn Ogness } 1195b6cf8b3fSJohn Ogness 11962a7f87edSJohn Ogness /* 11972a7f87edSJohn Ogness * Given @blk_lpos, return a pointer to the writer data from the data block 11982a7f87edSJohn Ogness * and calculate the size of the data part. A NULL pointer is returned if 11992a7f87edSJohn Ogness * @blk_lpos specifies values that could never be legal. 12002a7f87edSJohn Ogness * 12012a7f87edSJohn Ogness * This function (used by readers) performs strict validation on the lpos 12022a7f87edSJohn Ogness * values to possibly detect bugs in the writer code. A WARN_ON_ONCE() is 12032a7f87edSJohn Ogness * triggered if an internal error is detected. 12042a7f87edSJohn Ogness */ 12052a7f87edSJohn Ogness static const char *get_data(struct prb_data_ring *data_ring, 12062a7f87edSJohn Ogness struct prb_data_blk_lpos *blk_lpos, 12072a7f87edSJohn Ogness unsigned int *data_size) 12082a7f87edSJohn Ogness { 12092a7f87edSJohn Ogness struct prb_data_block *db; 12102a7f87edSJohn Ogness 12112a7f87edSJohn Ogness /* Data-less data block description. */ 1212e3bc0401SJohn Ogness if (BLK_DATALESS(blk_lpos)) { 12132a7f87edSJohn Ogness if (blk_lpos->begin == NO_LPOS && blk_lpos->next == NO_LPOS) { 12142a7f87edSJohn Ogness *data_size = 0; 12152a7f87edSJohn Ogness return ""; 12162a7f87edSJohn Ogness } 12172a7f87edSJohn Ogness return NULL; 12182a7f87edSJohn Ogness } 12192a7f87edSJohn Ogness 12202a7f87edSJohn Ogness /* Regular data block: @begin less than @next and in same wrap. */ 12212a7f87edSJohn Ogness if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next) && 12222a7f87edSJohn Ogness blk_lpos->begin < blk_lpos->next) { 12232a7f87edSJohn Ogness db = to_block(data_ring, blk_lpos->begin); 12242a7f87edSJohn Ogness *data_size = blk_lpos->next - blk_lpos->begin; 12252a7f87edSJohn Ogness 12262a7f87edSJohn Ogness /* Wrapping data block: @begin is one wrap behind @next. */ 12272a7f87edSJohn Ogness } else if (DATA_WRAPS(data_ring, blk_lpos->begin + DATA_SIZE(data_ring)) == 12282a7f87edSJohn Ogness DATA_WRAPS(data_ring, blk_lpos->next)) { 12292a7f87edSJohn Ogness db = to_block(data_ring, 0); 12302a7f87edSJohn Ogness *data_size = DATA_INDEX(data_ring, blk_lpos->next); 12312a7f87edSJohn Ogness 12322a7f87edSJohn Ogness /* Illegal block description. */ 12332a7f87edSJohn Ogness } else { 12342a7f87edSJohn Ogness WARN_ON_ONCE(1); 12352a7f87edSJohn Ogness return NULL; 12362a7f87edSJohn Ogness } 12372a7f87edSJohn Ogness 12382a7f87edSJohn Ogness /* A valid data block will always be aligned to the ID size. */ 12392a7f87edSJohn Ogness if (WARN_ON_ONCE(blk_lpos->begin != ALIGN(blk_lpos->begin, sizeof(db->id))) || 12402a7f87edSJohn Ogness WARN_ON_ONCE(blk_lpos->next != ALIGN(blk_lpos->next, sizeof(db->id)))) { 12412a7f87edSJohn Ogness return NULL; 12422a7f87edSJohn Ogness } 12432a7f87edSJohn Ogness 12442a7f87edSJohn Ogness /* A valid data block will always have at least an ID. */ 12452a7f87edSJohn Ogness if (WARN_ON_ONCE(*data_size < sizeof(db->id))) 12462a7f87edSJohn Ogness return NULL; 12472a7f87edSJohn Ogness 12482a7f87edSJohn Ogness /* Subtract block ID space from size to reflect data size. */ 12492a7f87edSJohn Ogness *data_size -= sizeof(db->id); 12502a7f87edSJohn Ogness 12512a7f87edSJohn Ogness return &db->data[0]; 12522a7f87edSJohn Ogness } 12532a7f87edSJohn Ogness 12544cfc7258SJohn Ogness /* 12554cfc7258SJohn Ogness * Attempt to transition the newest descriptor from committed back to reserved 12564cfc7258SJohn Ogness * so that the record can be modified by a writer again. This is only possible 12574cfc7258SJohn Ogness * if the descriptor is not yet finalized and the provided @caller_id matches. 12584cfc7258SJohn Ogness */ 12594cfc7258SJohn Ogness static struct prb_desc *desc_reopen_last(struct prb_desc_ring *desc_ring, 12604cfc7258SJohn Ogness u32 caller_id, unsigned long *id_out) 12614cfc7258SJohn Ogness { 12624cfc7258SJohn Ogness unsigned long prev_state_val; 12634cfc7258SJohn Ogness enum desc_state d_state; 12644cfc7258SJohn Ogness struct prb_desc desc; 12654cfc7258SJohn Ogness struct prb_desc *d; 12664cfc7258SJohn Ogness unsigned long id; 1267cfe2790bSJohn Ogness u32 cid; 12684cfc7258SJohn Ogness 12694cfc7258SJohn Ogness id = atomic_long_read(&desc_ring->head_id); 12704cfc7258SJohn Ogness 12714cfc7258SJohn Ogness /* 12724cfc7258SJohn Ogness * To reduce unnecessarily reopening, first check if the descriptor 12734cfc7258SJohn Ogness * state and caller ID are correct. 12744cfc7258SJohn Ogness */ 1275cfe2790bSJohn Ogness d_state = desc_read(desc_ring, id, &desc, NULL, &cid); 1276cfe2790bSJohn Ogness if (d_state != desc_committed || cid != caller_id) 12774cfc7258SJohn Ogness return NULL; 12784cfc7258SJohn Ogness 12794cfc7258SJohn Ogness d = to_desc(desc_ring, id); 12804cfc7258SJohn Ogness 12814cfc7258SJohn Ogness prev_state_val = DESC_SV(id, desc_committed); 12824cfc7258SJohn Ogness 12834cfc7258SJohn Ogness /* 12844cfc7258SJohn Ogness * Guarantee the reserved state is stored before reading any 12854cfc7258SJohn Ogness * record data. A full memory barrier is needed because @state_var 12864cfc7258SJohn Ogness * modification is followed by reading. This pairs with _prb_commit:B. 12874cfc7258SJohn Ogness * 12884cfc7258SJohn Ogness * Memory barrier involvement: 12894cfc7258SJohn Ogness * 12904cfc7258SJohn Ogness * If desc_reopen_last:A reads from _prb_commit:B, then 12914cfc7258SJohn Ogness * prb_reserve_in_last:A reads from _prb_commit:A. 12924cfc7258SJohn Ogness * 12934cfc7258SJohn Ogness * Relies on: 12944cfc7258SJohn Ogness * 12954cfc7258SJohn Ogness * WMB from _prb_commit:A to _prb_commit:B 12964cfc7258SJohn Ogness * matching 12974cfc7258SJohn Ogness * MB If desc_reopen_last:A to prb_reserve_in_last:A 12984cfc7258SJohn Ogness */ 12994cfc7258SJohn Ogness if (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val, 13004cfc7258SJohn Ogness DESC_SV(id, desc_reserved))) { /* LMM(desc_reopen_last:A) */ 13014cfc7258SJohn Ogness return NULL; 13024cfc7258SJohn Ogness } 13034cfc7258SJohn Ogness 13044cfc7258SJohn Ogness *id_out = id; 13054cfc7258SJohn Ogness return d; 13064cfc7258SJohn Ogness } 13074cfc7258SJohn Ogness 13084cfc7258SJohn Ogness /** 13094cfc7258SJohn Ogness * prb_reserve_in_last() - Re-reserve and extend the space in the ringbuffer 13104cfc7258SJohn Ogness * used by the newest record. 13114cfc7258SJohn Ogness * 13124cfc7258SJohn Ogness * @e: The entry structure to setup. 13134cfc7258SJohn Ogness * @rb: The ringbuffer to re-reserve and extend data in. 13144cfc7258SJohn Ogness * @r: The record structure to allocate buffers for. 13154cfc7258SJohn Ogness * @caller_id: The caller ID of the caller (reserving writer). 131659f8bccaSJohn Ogness * @max_size: Fail if the extended size would be greater than this. 13174cfc7258SJohn Ogness * 13184cfc7258SJohn Ogness * This is the public function available to writers to re-reserve and extend 13194cfc7258SJohn Ogness * data. 13204cfc7258SJohn Ogness * 13214cfc7258SJohn Ogness * The writer specifies the text size to extend (not the new total size) by 1322f35efc78SJohn Ogness * setting the @text_buf_size field of @r. To ensure proper initialization 1323f35efc78SJohn Ogness * of @r, prb_rec_init_wr() should be used. 13244cfc7258SJohn Ogness * 13254cfc7258SJohn Ogness * This function will fail if @caller_id does not match the caller ID of the 13264cfc7258SJohn Ogness * newest record. In that case the caller must reserve new data using 13274cfc7258SJohn Ogness * prb_reserve(). 13284cfc7258SJohn Ogness * 13294cfc7258SJohn Ogness * Context: Any context. Disables local interrupts on success. 13304cfc7258SJohn Ogness * Return: true if text data could be extended, otherwise false. 13314cfc7258SJohn Ogness * 13324cfc7258SJohn Ogness * On success: 13334cfc7258SJohn Ogness * 13344cfc7258SJohn Ogness * - @r->text_buf points to the beginning of the entire text buffer. 13354cfc7258SJohn Ogness * 13364cfc7258SJohn Ogness * - @r->text_buf_size is set to the new total size of the buffer. 13374cfc7258SJohn Ogness * 13384cfc7258SJohn Ogness * - @r->info is not touched so that @r->info->text_len could be used 13394cfc7258SJohn Ogness * to append the text. 13404cfc7258SJohn Ogness * 13414cfc7258SJohn Ogness * - prb_record_text_space() can be used on @e to query the new 13424cfc7258SJohn Ogness * actually used space. 13434cfc7258SJohn Ogness * 13444cfc7258SJohn Ogness * Important: All @r->info fields will already be set with the current values 13454cfc7258SJohn Ogness * for the record. I.e. @r->info->text_len will be less than 1346f35efc78SJohn Ogness * @text_buf_size. Writers can use @r->info->text_len to know 13474cfc7258SJohn Ogness * where concatenation begins and writers should update 13484cfc7258SJohn Ogness * @r->info->text_len after concatenating. 13494cfc7258SJohn Ogness */ 13504cfc7258SJohn Ogness bool prb_reserve_in_last(struct prb_reserved_entry *e, struct printk_ringbuffer *rb, 135159f8bccaSJohn Ogness struct printk_record *r, u32 caller_id, unsigned int max_size) 13524cfc7258SJohn Ogness { 1353cfe2790bSJohn Ogness struct prb_desc_ring *desc_ring = &rb->desc_ring; 1354cfe2790bSJohn Ogness struct printk_info *info; 13554cfc7258SJohn Ogness unsigned int data_size; 13564cfc7258SJohn Ogness struct prb_desc *d; 13574cfc7258SJohn Ogness unsigned long id; 13584cfc7258SJohn Ogness 13594cfc7258SJohn Ogness local_irq_save(e->irqflags); 13604cfc7258SJohn Ogness 13614cfc7258SJohn Ogness /* Transition the newest descriptor back to the reserved state. */ 1362cfe2790bSJohn Ogness d = desc_reopen_last(desc_ring, caller_id, &id); 13634cfc7258SJohn Ogness if (!d) { 13644cfc7258SJohn Ogness local_irq_restore(e->irqflags); 13654cfc7258SJohn Ogness goto fail_reopen; 13664cfc7258SJohn Ogness } 13674cfc7258SJohn Ogness 13684cfc7258SJohn Ogness /* Now the writer has exclusive access: LMM(prb_reserve_in_last:A) */ 13694cfc7258SJohn Ogness 1370cfe2790bSJohn Ogness info = to_info(desc_ring, id); 1371cfe2790bSJohn Ogness 13724cfc7258SJohn Ogness /* 13734cfc7258SJohn Ogness * Set the @e fields here so that prb_commit() can be used if 13744cfc7258SJohn Ogness * anything fails from now on. 13754cfc7258SJohn Ogness */ 13764cfc7258SJohn Ogness e->rb = rb; 13774cfc7258SJohn Ogness e->id = id; 13784cfc7258SJohn Ogness 13794cfc7258SJohn Ogness /* 13804cfc7258SJohn Ogness * desc_reopen_last() checked the caller_id, but there was no 13814cfc7258SJohn Ogness * exclusive access at that point. The descriptor may have 13824cfc7258SJohn Ogness * changed since then. 13834cfc7258SJohn Ogness */ 1384cfe2790bSJohn Ogness if (caller_id != info->caller_id) 13854cfc7258SJohn Ogness goto fail; 13864cfc7258SJohn Ogness 13874cfc7258SJohn Ogness if (BLK_DATALESS(&d->text_blk_lpos)) { 1388cfe2790bSJohn Ogness if (WARN_ON_ONCE(info->text_len != 0)) { 13894cfc7258SJohn Ogness pr_warn_once("wrong text_len value (%hu, expecting 0)\n", 1390cfe2790bSJohn Ogness info->text_len); 1391cfe2790bSJohn Ogness info->text_len = 0; 13924cfc7258SJohn Ogness } 13934cfc7258SJohn Ogness 13944cfc7258SJohn Ogness if (!data_check_size(&rb->text_data_ring, r->text_buf_size)) 13954cfc7258SJohn Ogness goto fail; 13964cfc7258SJohn Ogness 139759f8bccaSJohn Ogness if (r->text_buf_size > max_size) 139859f8bccaSJohn Ogness goto fail; 139959f8bccaSJohn Ogness 1400584da076SNikolay Borisov r->text_buf = data_alloc(rb, r->text_buf_size, 14014cfc7258SJohn Ogness &d->text_blk_lpos, id); 14024cfc7258SJohn Ogness } else { 14034cfc7258SJohn Ogness if (!get_data(&rb->text_data_ring, &d->text_blk_lpos, &data_size)) 14044cfc7258SJohn Ogness goto fail; 14054cfc7258SJohn Ogness 14064cfc7258SJohn Ogness /* 14074cfc7258SJohn Ogness * Increase the buffer size to include the original size. If 14084cfc7258SJohn Ogness * the meta data (@text_len) is not sane, use the full data 14094cfc7258SJohn Ogness * block size. 14104cfc7258SJohn Ogness */ 1411cfe2790bSJohn Ogness if (WARN_ON_ONCE(info->text_len > data_size)) { 14124cfc7258SJohn Ogness pr_warn_once("wrong text_len value (%hu, expecting <=%u)\n", 1413cfe2790bSJohn Ogness info->text_len, data_size); 1414cfe2790bSJohn Ogness info->text_len = data_size; 14154cfc7258SJohn Ogness } 1416cfe2790bSJohn Ogness r->text_buf_size += info->text_len; 14174cfc7258SJohn Ogness 14184cfc7258SJohn Ogness if (!data_check_size(&rb->text_data_ring, r->text_buf_size)) 14194cfc7258SJohn Ogness goto fail; 14204cfc7258SJohn Ogness 142159f8bccaSJohn Ogness if (r->text_buf_size > max_size) 142259f8bccaSJohn Ogness goto fail; 142359f8bccaSJohn Ogness 1424584da076SNikolay Borisov r->text_buf = data_realloc(rb, r->text_buf_size, 14254cfc7258SJohn Ogness &d->text_blk_lpos, id); 14264cfc7258SJohn Ogness } 14274cfc7258SJohn Ogness if (r->text_buf_size && !r->text_buf) 14284cfc7258SJohn Ogness goto fail; 14294cfc7258SJohn Ogness 1430cfe2790bSJohn Ogness r->info = info; 14314cfc7258SJohn Ogness 14324cfc7258SJohn Ogness e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos); 14334cfc7258SJohn Ogness 14344cfc7258SJohn Ogness return true; 14354cfc7258SJohn Ogness fail: 14364cfc7258SJohn Ogness prb_commit(e); 14374cfc7258SJohn Ogness /* prb_commit() re-enabled interrupts. */ 14384cfc7258SJohn Ogness fail_reopen: 14394cfc7258SJohn Ogness /* Make it clear to the caller that the re-reserve failed. */ 14404cfc7258SJohn Ogness memset(r, 0, sizeof(*r)); 14414cfc7258SJohn Ogness return false; 14424cfc7258SJohn Ogness } 14434cfc7258SJohn Ogness 14444cfc7258SJohn Ogness /* 14454cfc7258SJohn Ogness * Attempt to finalize a specified descriptor. If this fails, the descriptor 14464cfc7258SJohn Ogness * is either already final or it will finalize itself when the writer commits. 14474cfc7258SJohn Ogness */ 14484cfc7258SJohn Ogness static void desc_make_final(struct prb_desc_ring *desc_ring, unsigned long id) 14494cfc7258SJohn Ogness { 14504cfc7258SJohn Ogness unsigned long prev_state_val = DESC_SV(id, desc_committed); 14514cfc7258SJohn Ogness struct prb_desc *d = to_desc(desc_ring, id); 14524cfc7258SJohn Ogness 14534cfc7258SJohn Ogness atomic_long_cmpxchg_relaxed(&d->state_var, prev_state_val, 14544cfc7258SJohn Ogness DESC_SV(id, desc_finalized)); /* LMM(desc_make_final:A) */ 1455*f244b4dcSPetr Mladek 1456*f244b4dcSPetr Mladek /* Best effort to remember the last finalized @id. */ 1457*f244b4dcSPetr Mladek atomic_long_set(&desc_ring->last_finalized_id, id); 14584cfc7258SJohn Ogness } 14594cfc7258SJohn Ogness 1460b6cf8b3fSJohn Ogness /** 1461b6cf8b3fSJohn Ogness * prb_reserve() - Reserve space in the ringbuffer. 1462b6cf8b3fSJohn Ogness * 1463b6cf8b3fSJohn Ogness * @e: The entry structure to setup. 1464b6cf8b3fSJohn Ogness * @rb: The ringbuffer to reserve data in. 1465b6cf8b3fSJohn Ogness * @r: The record structure to allocate buffers for. 1466b6cf8b3fSJohn Ogness * 1467b6cf8b3fSJohn Ogness * This is the public function available to writers to reserve data. 1468b6cf8b3fSJohn Ogness * 1469f35efc78SJohn Ogness * The writer specifies the text size to reserve by setting the 1470f35efc78SJohn Ogness * @text_buf_size field of @r. To ensure proper initialization of @r, 1471f35efc78SJohn Ogness * prb_rec_init_wr() should be used. 1472b6cf8b3fSJohn Ogness * 1473b6cf8b3fSJohn Ogness * Context: Any context. Disables local interrupts on success. 1474b6cf8b3fSJohn Ogness * Return: true if at least text data could be allocated, otherwise false. 1475b6cf8b3fSJohn Ogness * 1476f35efc78SJohn Ogness * On success, the fields @info and @text_buf of @r will be set by this 1477f35efc78SJohn Ogness * function and should be filled in by the writer before committing. Also 1478b6cf8b3fSJohn Ogness * on success, prb_record_text_space() can be used on @e to query the actual 1479b6cf8b3fSJohn Ogness * space used for the text data block. 1480b6cf8b3fSJohn Ogness * 1481f35efc78SJohn Ogness * Important: @info->text_len needs to be set correctly by the writer in 1482f35efc78SJohn Ogness * order for data to be readable and/or extended. Its value 1483f35efc78SJohn Ogness * is initialized to 0. 1484b6cf8b3fSJohn Ogness */ 1485b6cf8b3fSJohn Ogness bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb, 1486b6cf8b3fSJohn Ogness struct printk_record *r) 1487b6cf8b3fSJohn Ogness { 1488b6cf8b3fSJohn Ogness struct prb_desc_ring *desc_ring = &rb->desc_ring; 1489cfe2790bSJohn Ogness struct printk_info *info; 1490b6cf8b3fSJohn Ogness struct prb_desc *d; 1491b6cf8b3fSJohn Ogness unsigned long id; 1492cc5c7041SJohn Ogness u64 seq; 1493b6cf8b3fSJohn Ogness 1494b6cf8b3fSJohn Ogness if (!data_check_size(&rb->text_data_ring, r->text_buf_size)) 1495b6cf8b3fSJohn Ogness goto fail; 1496b6cf8b3fSJohn Ogness 1497b6cf8b3fSJohn Ogness /* 1498b6cf8b3fSJohn Ogness * Descriptors in the reserved state act as blockers to all further 1499b6cf8b3fSJohn Ogness * reservations once the desc_ring has fully wrapped. Disable 1500b6cf8b3fSJohn Ogness * interrupts during the reserve/commit window in order to minimize 1501b6cf8b3fSJohn Ogness * the likelihood of this happening. 1502b6cf8b3fSJohn Ogness */ 1503b6cf8b3fSJohn Ogness local_irq_save(e->irqflags); 1504b6cf8b3fSJohn Ogness 1505b6cf8b3fSJohn Ogness if (!desc_reserve(rb, &id)) { 1506b6cf8b3fSJohn Ogness /* Descriptor reservation failures are tracked. */ 1507b6cf8b3fSJohn Ogness atomic_long_inc(&rb->fail); 1508b6cf8b3fSJohn Ogness local_irq_restore(e->irqflags); 1509b6cf8b3fSJohn Ogness goto fail; 1510b6cf8b3fSJohn Ogness } 1511b6cf8b3fSJohn Ogness 1512b6cf8b3fSJohn Ogness d = to_desc(desc_ring, id); 1513cfe2790bSJohn Ogness info = to_info(desc_ring, id); 1514b6cf8b3fSJohn Ogness 1515b6cf8b3fSJohn Ogness /* 1516cc5c7041SJohn Ogness * All @info fields (except @seq) are cleared and must be filled in 1517cc5c7041SJohn Ogness * by the writer. Save @seq before clearing because it is used to 1518cc5c7041SJohn Ogness * determine the new sequence number. 1519cc5c7041SJohn Ogness */ 1520cfe2790bSJohn Ogness seq = info->seq; 1521cfe2790bSJohn Ogness memset(info, 0, sizeof(*info)); 1522cc5c7041SJohn Ogness 1523cc5c7041SJohn Ogness /* 1524b6cf8b3fSJohn Ogness * Set the @e fields here so that prb_commit() can be used if 1525b6cf8b3fSJohn Ogness * text data allocation fails. 1526b6cf8b3fSJohn Ogness */ 1527b6cf8b3fSJohn Ogness e->rb = rb; 1528b6cf8b3fSJohn Ogness e->id = id; 1529b6cf8b3fSJohn Ogness 1530b6cf8b3fSJohn Ogness /* 1531b6cf8b3fSJohn Ogness * Initialize the sequence number if it has "never been set". 1532b6cf8b3fSJohn Ogness * Otherwise just increment it by a full wrap. 1533b6cf8b3fSJohn Ogness * 1534b6cf8b3fSJohn Ogness * @seq is considered "never been set" if it has a value of 0, 1535cfe2790bSJohn Ogness * _except_ for @infos[0], which was specially setup by the ringbuffer 1536b6cf8b3fSJohn Ogness * initializer and therefore is always considered as set. 1537b6cf8b3fSJohn Ogness * 1538b6cf8b3fSJohn Ogness * See the "Bootstrap" comment block in printk_ringbuffer.h for 1539b6cf8b3fSJohn Ogness * details about how the initializer bootstraps the descriptors. 1540b6cf8b3fSJohn Ogness */ 1541cc5c7041SJohn Ogness if (seq == 0 && DESC_INDEX(desc_ring, id) != 0) 1542cfe2790bSJohn Ogness info->seq = DESC_INDEX(desc_ring, id); 1543b6cf8b3fSJohn Ogness else 1544cfe2790bSJohn Ogness info->seq = seq + DESCS_COUNT(desc_ring); 1545b6cf8b3fSJohn Ogness 15464cfc7258SJohn Ogness /* 15474cfc7258SJohn Ogness * New data is about to be reserved. Once that happens, previous 15484cfc7258SJohn Ogness * descriptors are no longer able to be extended. Finalize the 15494cfc7258SJohn Ogness * previous descriptor now so that it can be made available to 15504cfc7258SJohn Ogness * readers. (For seq==0 there is no previous descriptor.) 15514cfc7258SJohn Ogness */ 1552cfe2790bSJohn Ogness if (info->seq > 0) 15534cfc7258SJohn Ogness desc_make_final(desc_ring, DESC_ID(id - 1)); 15544cfc7258SJohn Ogness 1555584da076SNikolay Borisov r->text_buf = data_alloc(rb, r->text_buf_size, &d->text_blk_lpos, id); 1556b6cf8b3fSJohn Ogness /* If text data allocation fails, a data-less record is committed. */ 1557b6cf8b3fSJohn Ogness if (r->text_buf_size && !r->text_buf) { 1558b6cf8b3fSJohn Ogness prb_commit(e); 1559b6cf8b3fSJohn Ogness /* prb_commit() re-enabled interrupts. */ 1560b6cf8b3fSJohn Ogness goto fail; 1561b6cf8b3fSJohn Ogness } 1562b6cf8b3fSJohn Ogness 1563cfe2790bSJohn Ogness r->info = info; 1564b6cf8b3fSJohn Ogness 1565b6cf8b3fSJohn Ogness /* Record full text space used by record. */ 1566b6cf8b3fSJohn Ogness e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos); 1567b6cf8b3fSJohn Ogness 1568b6cf8b3fSJohn Ogness return true; 1569b6cf8b3fSJohn Ogness fail: 1570b6cf8b3fSJohn Ogness /* Make it clear to the caller that the reserve failed. */ 1571b6cf8b3fSJohn Ogness memset(r, 0, sizeof(*r)); 1572b6cf8b3fSJohn Ogness return false; 1573b6cf8b3fSJohn Ogness } 1574b6cf8b3fSJohn Ogness 15754cfc7258SJohn Ogness /* Commit the data (possibly finalizing it) and restore interrupts. */ 15764cfc7258SJohn Ogness static void _prb_commit(struct prb_reserved_entry *e, unsigned long state_val) 15774cfc7258SJohn Ogness { 15784cfc7258SJohn Ogness struct prb_desc_ring *desc_ring = &e->rb->desc_ring; 15794cfc7258SJohn Ogness struct prb_desc *d = to_desc(desc_ring, e->id); 15804cfc7258SJohn Ogness unsigned long prev_state_val = DESC_SV(e->id, desc_reserved); 15814cfc7258SJohn Ogness 15824cfc7258SJohn Ogness /* Now the writer has finished all writing: LMM(_prb_commit:A) */ 15834cfc7258SJohn Ogness 15844cfc7258SJohn Ogness /* 15854cfc7258SJohn Ogness * Set the descriptor as committed. See "ABA Issues" about why 15864cfc7258SJohn Ogness * cmpxchg() instead of set() is used. 15874cfc7258SJohn Ogness * 15884cfc7258SJohn Ogness * 1 Guarantee all record data is stored before the descriptor state 15894cfc7258SJohn Ogness * is stored as committed. A write memory barrier is sufficient 15904cfc7258SJohn Ogness * for this. This pairs with desc_read:B and desc_reopen_last:A. 15914cfc7258SJohn Ogness * 15924cfc7258SJohn Ogness * 2. Guarantee the descriptor state is stored as committed before 15934cfc7258SJohn Ogness * re-checking the head ID in order to possibly finalize this 15944cfc7258SJohn Ogness * descriptor. This pairs with desc_reserve:D. 15954cfc7258SJohn Ogness * 15964cfc7258SJohn Ogness * Memory barrier involvement: 15974cfc7258SJohn Ogness * 15984cfc7258SJohn Ogness * If prb_commit:A reads from desc_reserve:D, then 15994cfc7258SJohn Ogness * desc_make_final:A reads from _prb_commit:B. 16004cfc7258SJohn Ogness * 16014cfc7258SJohn Ogness * Relies on: 16024cfc7258SJohn Ogness * 16034cfc7258SJohn Ogness * MB _prb_commit:B to prb_commit:A 16044cfc7258SJohn Ogness * matching 16054cfc7258SJohn Ogness * MB desc_reserve:D to desc_make_final:A 16064cfc7258SJohn Ogness */ 16074cfc7258SJohn Ogness if (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val, 16084cfc7258SJohn Ogness DESC_SV(e->id, state_val))) { /* LMM(_prb_commit:B) */ 16094cfc7258SJohn Ogness WARN_ON_ONCE(1); 16104cfc7258SJohn Ogness } 16114cfc7258SJohn Ogness 16124cfc7258SJohn Ogness /* Restore interrupts, the reserve/commit window is finished. */ 16134cfc7258SJohn Ogness local_irq_restore(e->irqflags); 16144cfc7258SJohn Ogness } 16154cfc7258SJohn Ogness 1616b6cf8b3fSJohn Ogness /** 1617b6cf8b3fSJohn Ogness * prb_commit() - Commit (previously reserved) data to the ringbuffer. 1618b6cf8b3fSJohn Ogness * 1619b6cf8b3fSJohn Ogness * @e: The entry containing the reserved data information. 1620b6cf8b3fSJohn Ogness * 1621b6cf8b3fSJohn Ogness * This is the public function available to writers to commit data. 1622b6cf8b3fSJohn Ogness * 16234cfc7258SJohn Ogness * Note that the data is not yet available to readers until it is finalized. 16244cfc7258SJohn Ogness * Finalizing happens automatically when space for the next record is 16254cfc7258SJohn Ogness * reserved. 16264cfc7258SJohn Ogness * 16274cfc7258SJohn Ogness * See prb_final_commit() for a version of this function that finalizes 16284cfc7258SJohn Ogness * immediately. 16294cfc7258SJohn Ogness * 1630b6cf8b3fSJohn Ogness * Context: Any context. Enables local interrupts. 1631b6cf8b3fSJohn Ogness */ 1632b6cf8b3fSJohn Ogness void prb_commit(struct prb_reserved_entry *e) 1633b6cf8b3fSJohn Ogness { 1634b6cf8b3fSJohn Ogness struct prb_desc_ring *desc_ring = &e->rb->desc_ring; 16354cfc7258SJohn Ogness unsigned long head_id; 1636b6cf8b3fSJohn Ogness 16374cfc7258SJohn Ogness _prb_commit(e, desc_committed); 1638b6cf8b3fSJohn Ogness 1639b6cf8b3fSJohn Ogness /* 16404cfc7258SJohn Ogness * If this descriptor is no longer the head (i.e. a new record has 16414cfc7258SJohn Ogness * been allocated), extending the data for this record is no longer 16424cfc7258SJohn Ogness * allowed and therefore it must be finalized. 1643b6cf8b3fSJohn Ogness */ 16444cfc7258SJohn Ogness head_id = atomic_long_read(&desc_ring->head_id); /* LMM(prb_commit:A) */ 16454cfc7258SJohn Ogness if (head_id != e->id) 16464cfc7258SJohn Ogness desc_make_final(desc_ring, e->id); 1647b6cf8b3fSJohn Ogness } 1648b6cf8b3fSJohn Ogness 16494cfc7258SJohn Ogness /** 16504cfc7258SJohn Ogness * prb_final_commit() - Commit and finalize (previously reserved) data to 16514cfc7258SJohn Ogness * the ringbuffer. 16524cfc7258SJohn Ogness * 16534cfc7258SJohn Ogness * @e: The entry containing the reserved data information. 16544cfc7258SJohn Ogness * 16554cfc7258SJohn Ogness * This is the public function available to writers to commit+finalize data. 16564cfc7258SJohn Ogness * 16574cfc7258SJohn Ogness * By finalizing, the data is made immediately available to readers. 16584cfc7258SJohn Ogness * 16594cfc7258SJohn Ogness * This function should only be used if there are no intentions of extending 16604cfc7258SJohn Ogness * this data using prb_reserve_in_last(). 16614cfc7258SJohn Ogness * 16624cfc7258SJohn Ogness * Context: Any context. Enables local interrupts. 16634cfc7258SJohn Ogness */ 16644cfc7258SJohn Ogness void prb_final_commit(struct prb_reserved_entry *e) 16654cfc7258SJohn Ogness { 1666*f244b4dcSPetr Mladek struct prb_desc_ring *desc_ring = &e->rb->desc_ring; 1667*f244b4dcSPetr Mladek 16684cfc7258SJohn Ogness _prb_commit(e, desc_finalized); 1669*f244b4dcSPetr Mladek 1670*f244b4dcSPetr Mladek /* Best effort to remember the last finalized @id. */ 1671*f244b4dcSPetr Mladek atomic_long_set(&desc_ring->last_finalized_id, e->id); 1672b6cf8b3fSJohn Ogness } 1673b6cf8b3fSJohn Ogness 1674b6cf8b3fSJohn Ogness /* 1675b6cf8b3fSJohn Ogness * Count the number of lines in provided text. All text has at least 1 line 1676b6cf8b3fSJohn Ogness * (even if @text_size is 0). Each '\n' processed is counted as an additional 1677b6cf8b3fSJohn Ogness * line. 1678b6cf8b3fSJohn Ogness */ 1679d397820fSJohn Ogness static unsigned int count_lines(const char *text, unsigned int text_size) 1680b6cf8b3fSJohn Ogness { 1681b6cf8b3fSJohn Ogness unsigned int next_size = text_size; 1682b6cf8b3fSJohn Ogness unsigned int line_count = 1; 1683d397820fSJohn Ogness const char *next = text; 1684b6cf8b3fSJohn Ogness 1685b6cf8b3fSJohn Ogness while (next_size) { 1686b6cf8b3fSJohn Ogness next = memchr(next, '\n', next_size); 1687b6cf8b3fSJohn Ogness if (!next) 1688b6cf8b3fSJohn Ogness break; 1689b6cf8b3fSJohn Ogness line_count++; 1690b6cf8b3fSJohn Ogness next++; 1691b6cf8b3fSJohn Ogness next_size = text_size - (next - text); 1692b6cf8b3fSJohn Ogness } 1693b6cf8b3fSJohn Ogness 1694b6cf8b3fSJohn Ogness return line_count; 1695b6cf8b3fSJohn Ogness } 1696b6cf8b3fSJohn Ogness 1697b6cf8b3fSJohn Ogness /* 1698b6cf8b3fSJohn Ogness * Given @blk_lpos, copy an expected @len of data into the provided buffer. 1699b6cf8b3fSJohn Ogness * If @line_count is provided, count the number of lines in the data. 1700b6cf8b3fSJohn Ogness * 1701b6cf8b3fSJohn Ogness * This function (used by readers) performs strict validation on the data 1702b6cf8b3fSJohn Ogness * size to possibly detect bugs in the writer code. A WARN_ON_ONCE() is 1703b6cf8b3fSJohn Ogness * triggered if an internal error is detected. 1704b6cf8b3fSJohn Ogness */ 1705b6cf8b3fSJohn Ogness static bool copy_data(struct prb_data_ring *data_ring, 1706b6cf8b3fSJohn Ogness struct prb_data_blk_lpos *blk_lpos, u16 len, char *buf, 1707b6cf8b3fSJohn Ogness unsigned int buf_size, unsigned int *line_count) 1708b6cf8b3fSJohn Ogness { 1709b6cf8b3fSJohn Ogness unsigned int data_size; 1710d397820fSJohn Ogness const char *data; 1711b6cf8b3fSJohn Ogness 1712b6cf8b3fSJohn Ogness /* Caller might not want any data. */ 1713b6cf8b3fSJohn Ogness if ((!buf || !buf_size) && !line_count) 1714b6cf8b3fSJohn Ogness return true; 1715b6cf8b3fSJohn Ogness 1716b6cf8b3fSJohn Ogness data = get_data(data_ring, blk_lpos, &data_size); 1717b6cf8b3fSJohn Ogness if (!data) 1718b6cf8b3fSJohn Ogness return false; 1719b6cf8b3fSJohn Ogness 1720b6cf8b3fSJohn Ogness /* 1721b6cf8b3fSJohn Ogness * Actual cannot be less than expected. It can be more than expected 1722b6cf8b3fSJohn Ogness * because of the trailing alignment padding. 1723cfe2790bSJohn Ogness * 1724cfe2790bSJohn Ogness * Note that invalid @len values can occur because the caller loads 1725cfe2790bSJohn Ogness * the value during an allowed data race. 1726b6cf8b3fSJohn Ogness */ 1727cfe2790bSJohn Ogness if (data_size < (unsigned int)len) 1728b6cf8b3fSJohn Ogness return false; 1729b6cf8b3fSJohn Ogness 1730b6cf8b3fSJohn Ogness /* Caller interested in the line count? */ 1731b6cf8b3fSJohn Ogness if (line_count) 1732668af87fSJohn Ogness *line_count = count_lines(data, len); 1733b6cf8b3fSJohn Ogness 1734b6cf8b3fSJohn Ogness /* Caller interested in the data content? */ 1735b6cf8b3fSJohn Ogness if (!buf || !buf_size) 1736b6cf8b3fSJohn Ogness return true; 1737b6cf8b3fSJohn Ogness 1738b6cf8b3fSJohn Ogness data_size = min_t(u16, buf_size, len); 1739b6cf8b3fSJohn Ogness 1740b6cf8b3fSJohn Ogness memcpy(&buf[0], data, data_size); /* LMM(copy_data:A) */ 1741b6cf8b3fSJohn Ogness return true; 1742b6cf8b3fSJohn Ogness } 1743b6cf8b3fSJohn Ogness 1744b6cf8b3fSJohn Ogness /* 1745b6cf8b3fSJohn Ogness * This is an extended version of desc_read(). It gets a copy of a specified 17464cfc7258SJohn Ogness * descriptor. However, it also verifies that the record is finalized and has 1747b6cf8b3fSJohn Ogness * the sequence number @seq. On success, 0 is returned. 1748b6cf8b3fSJohn Ogness * 1749b6cf8b3fSJohn Ogness * Error return values: 17504cfc7258SJohn Ogness * -EINVAL: A finalized record with sequence number @seq does not exist. 17514cfc7258SJohn Ogness * -ENOENT: A finalized record with sequence number @seq exists, but its data 1752b6cf8b3fSJohn Ogness * is not available. This is a valid record, so readers should 1753b6cf8b3fSJohn Ogness * continue with the next record. 1754b6cf8b3fSJohn Ogness */ 17554cfc7258SJohn Ogness static int desc_read_finalized_seq(struct prb_desc_ring *desc_ring, 1756b6cf8b3fSJohn Ogness unsigned long id, u64 seq, 1757b6cf8b3fSJohn Ogness struct prb_desc *desc_out) 1758b6cf8b3fSJohn Ogness { 1759b6cf8b3fSJohn Ogness struct prb_data_blk_lpos *blk_lpos = &desc_out->text_blk_lpos; 1760b6cf8b3fSJohn Ogness enum desc_state d_state; 1761cfe2790bSJohn Ogness u64 s; 1762b6cf8b3fSJohn Ogness 1763cfe2790bSJohn Ogness d_state = desc_read(desc_ring, id, desc_out, &s, NULL); 1764b6cf8b3fSJohn Ogness 1765b6cf8b3fSJohn Ogness /* 1766b6cf8b3fSJohn Ogness * An unexpected @id (desc_miss) or @seq mismatch means the record 17674cfc7258SJohn Ogness * does not exist. A descriptor in the reserved or committed state 17684cfc7258SJohn Ogness * means the record does not yet exist for the reader. 1769b6cf8b3fSJohn Ogness */ 1770b6cf8b3fSJohn Ogness if (d_state == desc_miss || 1771b6cf8b3fSJohn Ogness d_state == desc_reserved || 17724cfc7258SJohn Ogness d_state == desc_committed || 1773cfe2790bSJohn Ogness s != seq) { 1774b6cf8b3fSJohn Ogness return -EINVAL; 1775b6cf8b3fSJohn Ogness } 1776b6cf8b3fSJohn Ogness 1777b6cf8b3fSJohn Ogness /* 1778b6cf8b3fSJohn Ogness * A descriptor in the reusable state may no longer have its data 1779d397820fSJohn Ogness * available; report it as existing but with lost data. Or the record 1780d397820fSJohn Ogness * may actually be a record with lost data. 1781b6cf8b3fSJohn Ogness */ 1782b6cf8b3fSJohn Ogness if (d_state == desc_reusable || 1783d397820fSJohn Ogness (blk_lpos->begin == FAILED_LPOS && blk_lpos->next == FAILED_LPOS)) { 1784b6cf8b3fSJohn Ogness return -ENOENT; 1785b6cf8b3fSJohn Ogness } 1786b6cf8b3fSJohn Ogness 1787b6cf8b3fSJohn Ogness return 0; 1788b6cf8b3fSJohn Ogness } 1789b6cf8b3fSJohn Ogness 1790b6cf8b3fSJohn Ogness /* 1791b6cf8b3fSJohn Ogness * Copy the ringbuffer data from the record with @seq to the provided 1792b6cf8b3fSJohn Ogness * @r buffer. On success, 0 is returned. 1793b6cf8b3fSJohn Ogness * 17944cfc7258SJohn Ogness * See desc_read_finalized_seq() for error return values. 1795b6cf8b3fSJohn Ogness */ 1796b6cf8b3fSJohn Ogness static int prb_read(struct printk_ringbuffer *rb, u64 seq, 1797b6cf8b3fSJohn Ogness struct printk_record *r, unsigned int *line_count) 1798b6cf8b3fSJohn Ogness { 1799b6cf8b3fSJohn Ogness struct prb_desc_ring *desc_ring = &rb->desc_ring; 1800cfe2790bSJohn Ogness struct printk_info *info = to_info(desc_ring, seq); 1801b6cf8b3fSJohn Ogness struct prb_desc *rdesc = to_desc(desc_ring, seq); 1802b6cf8b3fSJohn Ogness atomic_long_t *state_var = &rdesc->state_var; 1803b6cf8b3fSJohn Ogness struct prb_desc desc; 1804b6cf8b3fSJohn Ogness unsigned long id; 1805b6cf8b3fSJohn Ogness int err; 1806b6cf8b3fSJohn Ogness 1807b6cf8b3fSJohn Ogness /* Extract the ID, used to specify the descriptor to read. */ 1808b6cf8b3fSJohn Ogness id = DESC_ID(atomic_long_read(state_var)); 1809b6cf8b3fSJohn Ogness 1810b6cf8b3fSJohn Ogness /* Get a local copy of the correct descriptor (if available). */ 18114cfc7258SJohn Ogness err = desc_read_finalized_seq(desc_ring, id, seq, &desc); 1812b6cf8b3fSJohn Ogness 1813b6cf8b3fSJohn Ogness /* 1814b6cf8b3fSJohn Ogness * If @r is NULL, the caller is only interested in the availability 1815b6cf8b3fSJohn Ogness * of the record. 1816b6cf8b3fSJohn Ogness */ 1817b6cf8b3fSJohn Ogness if (err || !r) 1818b6cf8b3fSJohn Ogness return err; 1819b6cf8b3fSJohn Ogness 1820b6cf8b3fSJohn Ogness /* If requested, copy meta data. */ 1821b6cf8b3fSJohn Ogness if (r->info) 1822cfe2790bSJohn Ogness memcpy(r->info, info, sizeof(*(r->info))); 1823b6cf8b3fSJohn Ogness 1824b6cf8b3fSJohn Ogness /* Copy text data. If it fails, this is a data-less record. */ 1825cfe2790bSJohn Ogness if (!copy_data(&rb->text_data_ring, &desc.text_blk_lpos, info->text_len, 1826b6cf8b3fSJohn Ogness r->text_buf, r->text_buf_size, line_count)) { 1827b6cf8b3fSJohn Ogness return -ENOENT; 1828b6cf8b3fSJohn Ogness } 1829b6cf8b3fSJohn Ogness 18304cfc7258SJohn Ogness /* Ensure the record is still finalized and has the same @seq. */ 18314cfc7258SJohn Ogness return desc_read_finalized_seq(desc_ring, id, seq, &desc); 1832b6cf8b3fSJohn Ogness } 1833b6cf8b3fSJohn Ogness 1834b6cf8b3fSJohn Ogness /* Get the sequence number of the tail descriptor. */ 1835b6cf8b3fSJohn Ogness static u64 prb_first_seq(struct printk_ringbuffer *rb) 1836b6cf8b3fSJohn Ogness { 1837b6cf8b3fSJohn Ogness struct prb_desc_ring *desc_ring = &rb->desc_ring; 1838b6cf8b3fSJohn Ogness enum desc_state d_state; 1839b6cf8b3fSJohn Ogness struct prb_desc desc; 1840b6cf8b3fSJohn Ogness unsigned long id; 1841cfe2790bSJohn Ogness u64 seq; 1842b6cf8b3fSJohn Ogness 1843b6cf8b3fSJohn Ogness for (;;) { 1844b6cf8b3fSJohn Ogness id = atomic_long_read(&rb->desc_ring.tail_id); /* LMM(prb_first_seq:A) */ 1845b6cf8b3fSJohn Ogness 1846cfe2790bSJohn Ogness d_state = desc_read(desc_ring, id, &desc, &seq, NULL); /* LMM(prb_first_seq:B) */ 1847b6cf8b3fSJohn Ogness 1848b6cf8b3fSJohn Ogness /* 1849b6cf8b3fSJohn Ogness * This loop will not be infinite because the tail is 18504cfc7258SJohn Ogness * _always_ in the finalized or reusable state. 1851b6cf8b3fSJohn Ogness */ 18524cfc7258SJohn Ogness if (d_state == desc_finalized || d_state == desc_reusable) 1853b6cf8b3fSJohn Ogness break; 1854b6cf8b3fSJohn Ogness 1855b6cf8b3fSJohn Ogness /* 1856b6cf8b3fSJohn Ogness * Guarantee the last state load from desc_read() is before 1857b6cf8b3fSJohn Ogness * reloading @tail_id in order to see a new tail in the case 1858b6cf8b3fSJohn Ogness * that the descriptor has been recycled. This pairs with 1859b6cf8b3fSJohn Ogness * desc_reserve:D. 1860b6cf8b3fSJohn Ogness * 1861b6cf8b3fSJohn Ogness * Memory barrier involvement: 1862b6cf8b3fSJohn Ogness * 1863b6cf8b3fSJohn Ogness * If prb_first_seq:B reads from desc_reserve:F, then 1864b6cf8b3fSJohn Ogness * prb_first_seq:A reads from desc_push_tail:B. 1865b6cf8b3fSJohn Ogness * 1866b6cf8b3fSJohn Ogness * Relies on: 1867b6cf8b3fSJohn Ogness * 1868b6cf8b3fSJohn Ogness * MB from desc_push_tail:B to desc_reserve:F 1869b6cf8b3fSJohn Ogness * matching 1870b6cf8b3fSJohn Ogness * RMB prb_first_seq:B to prb_first_seq:A 1871b6cf8b3fSJohn Ogness */ 1872b6cf8b3fSJohn Ogness smp_rmb(); /* LMM(prb_first_seq:C) */ 1873b6cf8b3fSJohn Ogness } 1874b6cf8b3fSJohn Ogness 1875cfe2790bSJohn Ogness return seq; 1876b6cf8b3fSJohn Ogness } 1877b6cf8b3fSJohn Ogness 1878b6cf8b3fSJohn Ogness /* 18794cfc7258SJohn Ogness * Non-blocking read of a record. Updates @seq to the last finalized record 18804cfc7258SJohn Ogness * (which may have no data available). 1881b6cf8b3fSJohn Ogness * 1882b6cf8b3fSJohn Ogness * See the description of prb_read_valid() and prb_read_valid_info() 1883b6cf8b3fSJohn Ogness * for details. 1884b6cf8b3fSJohn Ogness */ 1885b6cf8b3fSJohn Ogness static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq, 1886b6cf8b3fSJohn Ogness struct printk_record *r, unsigned int *line_count) 1887b6cf8b3fSJohn Ogness { 1888b6cf8b3fSJohn Ogness u64 tail_seq; 1889b6cf8b3fSJohn Ogness int err; 1890b6cf8b3fSJohn Ogness 1891b6cf8b3fSJohn Ogness while ((err = prb_read(rb, *seq, r, line_count))) { 1892b6cf8b3fSJohn Ogness tail_seq = prb_first_seq(rb); 1893b6cf8b3fSJohn Ogness 1894b6cf8b3fSJohn Ogness if (*seq < tail_seq) { 1895b6cf8b3fSJohn Ogness /* 1896b6cf8b3fSJohn Ogness * Behind the tail. Catch up and try again. This 1897b6cf8b3fSJohn Ogness * can happen for -ENOENT and -EINVAL cases. 1898b6cf8b3fSJohn Ogness */ 1899b6cf8b3fSJohn Ogness *seq = tail_seq; 1900b6cf8b3fSJohn Ogness 1901b6cf8b3fSJohn Ogness } else if (err == -ENOENT) { 1902b6cf8b3fSJohn Ogness /* Record exists, but no data available. Skip. */ 1903b6cf8b3fSJohn Ogness (*seq)++; 1904b6cf8b3fSJohn Ogness 1905b6cf8b3fSJohn Ogness } else { 19064cfc7258SJohn Ogness /* Non-existent/non-finalized record. Must stop. */ 1907b6cf8b3fSJohn Ogness return false; 1908b6cf8b3fSJohn Ogness } 1909b6cf8b3fSJohn Ogness } 1910b6cf8b3fSJohn Ogness 1911b6cf8b3fSJohn Ogness return true; 1912b6cf8b3fSJohn Ogness } 1913b6cf8b3fSJohn Ogness 1914b6cf8b3fSJohn Ogness /** 1915b6cf8b3fSJohn Ogness * prb_read_valid() - Non-blocking read of a requested record or (if gone) 1916b6cf8b3fSJohn Ogness * the next available record. 1917b6cf8b3fSJohn Ogness * 1918b6cf8b3fSJohn Ogness * @rb: The ringbuffer to read from. 1919b6cf8b3fSJohn Ogness * @seq: The sequence number of the record to read. 1920b6cf8b3fSJohn Ogness * @r: A record data buffer to store the read record to. 1921b6cf8b3fSJohn Ogness * 1922b6cf8b3fSJohn Ogness * This is the public function available to readers to read a record. 1923b6cf8b3fSJohn Ogness * 1924f35efc78SJohn Ogness * The reader provides the @info and @text_buf buffers of @r to be 1925b6cf8b3fSJohn Ogness * filled in. Any of the buffer pointers can be set to NULL if the reader 1926b6cf8b3fSJohn Ogness * is not interested in that data. To ensure proper initialization of @r, 1927b6cf8b3fSJohn Ogness * prb_rec_init_rd() should be used. 1928b6cf8b3fSJohn Ogness * 1929b6cf8b3fSJohn Ogness * Context: Any context. 1930b6cf8b3fSJohn Ogness * Return: true if a record was read, otherwise false. 1931b6cf8b3fSJohn Ogness * 1932b6cf8b3fSJohn Ogness * On success, the reader must check r->info.seq to see which record was 1933b6cf8b3fSJohn Ogness * actually read. This allows the reader to detect dropped records. 1934b6cf8b3fSJohn Ogness * 1935b6cf8b3fSJohn Ogness * Failure means @seq refers to a not yet written record. 1936b6cf8b3fSJohn Ogness */ 1937b6cf8b3fSJohn Ogness bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq, 1938b6cf8b3fSJohn Ogness struct printk_record *r) 1939b6cf8b3fSJohn Ogness { 1940b6cf8b3fSJohn Ogness return _prb_read_valid(rb, &seq, r, NULL); 1941b6cf8b3fSJohn Ogness } 1942b6cf8b3fSJohn Ogness 1943b6cf8b3fSJohn Ogness /** 1944b6cf8b3fSJohn Ogness * prb_read_valid_info() - Non-blocking read of meta data for a requested 1945b6cf8b3fSJohn Ogness * record or (if gone) the next available record. 1946b6cf8b3fSJohn Ogness * 1947b6cf8b3fSJohn Ogness * @rb: The ringbuffer to read from. 1948b6cf8b3fSJohn Ogness * @seq: The sequence number of the record to read. 1949b6cf8b3fSJohn Ogness * @info: A buffer to store the read record meta data to. 1950b6cf8b3fSJohn Ogness * @line_count: A buffer to store the number of lines in the record text. 1951b6cf8b3fSJohn Ogness * 1952b6cf8b3fSJohn Ogness * This is the public function available to readers to read only the 1953b6cf8b3fSJohn Ogness * meta data of a record. 1954b6cf8b3fSJohn Ogness * 1955b6cf8b3fSJohn Ogness * The reader provides the @info, @line_count buffers to be filled in. 1956b6cf8b3fSJohn Ogness * Either of the buffer pointers can be set to NULL if the reader is not 1957b6cf8b3fSJohn Ogness * interested in that data. 1958b6cf8b3fSJohn Ogness * 1959b6cf8b3fSJohn Ogness * Context: Any context. 1960b6cf8b3fSJohn Ogness * Return: true if a record's meta data was read, otherwise false. 1961b6cf8b3fSJohn Ogness * 1962b6cf8b3fSJohn Ogness * On success, the reader must check info->seq to see which record meta data 1963b6cf8b3fSJohn Ogness * was actually read. This allows the reader to detect dropped records. 1964b6cf8b3fSJohn Ogness * 1965b6cf8b3fSJohn Ogness * Failure means @seq refers to a not yet written record. 1966b6cf8b3fSJohn Ogness */ 1967b6cf8b3fSJohn Ogness bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq, 1968b6cf8b3fSJohn Ogness struct printk_info *info, unsigned int *line_count) 1969b6cf8b3fSJohn Ogness { 1970b6cf8b3fSJohn Ogness struct printk_record r; 1971b6cf8b3fSJohn Ogness 1972f35efc78SJohn Ogness prb_rec_init_rd(&r, info, NULL, 0); 1973b6cf8b3fSJohn Ogness 1974b6cf8b3fSJohn Ogness return _prb_read_valid(rb, &seq, &r, line_count); 1975b6cf8b3fSJohn Ogness } 1976b6cf8b3fSJohn Ogness 1977b6cf8b3fSJohn Ogness /** 1978b6cf8b3fSJohn Ogness * prb_first_valid_seq() - Get the sequence number of the oldest available 1979b6cf8b3fSJohn Ogness * record. 1980b6cf8b3fSJohn Ogness * 1981b6cf8b3fSJohn Ogness * @rb: The ringbuffer to get the sequence number from. 1982b6cf8b3fSJohn Ogness * 1983b6cf8b3fSJohn Ogness * This is the public function available to readers to see what the 1984b6cf8b3fSJohn Ogness * first/oldest valid sequence number is. 1985b6cf8b3fSJohn Ogness * 1986b6cf8b3fSJohn Ogness * This provides readers a starting point to begin iterating the ringbuffer. 1987b6cf8b3fSJohn Ogness * 1988b6cf8b3fSJohn Ogness * Context: Any context. 1989b6cf8b3fSJohn Ogness * Return: The sequence number of the first/oldest record or, if the 1990b6cf8b3fSJohn Ogness * ringbuffer is empty, 0 is returned. 1991b6cf8b3fSJohn Ogness */ 1992b6cf8b3fSJohn Ogness u64 prb_first_valid_seq(struct printk_ringbuffer *rb) 1993b6cf8b3fSJohn Ogness { 1994b6cf8b3fSJohn Ogness u64 seq = 0; 1995b6cf8b3fSJohn Ogness 1996b6cf8b3fSJohn Ogness if (!_prb_read_valid(rb, &seq, NULL, NULL)) 1997b6cf8b3fSJohn Ogness return 0; 1998b6cf8b3fSJohn Ogness 1999b6cf8b3fSJohn Ogness return seq; 2000b6cf8b3fSJohn Ogness } 2001b6cf8b3fSJohn Ogness 2002b6cf8b3fSJohn Ogness /** 2003b6cf8b3fSJohn Ogness * prb_next_seq() - Get the sequence number after the last available record. 2004b6cf8b3fSJohn Ogness * 2005b6cf8b3fSJohn Ogness * @rb: The ringbuffer to get the sequence number from. 2006b6cf8b3fSJohn Ogness * 2007b6cf8b3fSJohn Ogness * This is the public function available to readers to see what the next 2008b6cf8b3fSJohn Ogness * newest sequence number available to readers will be. 2009b6cf8b3fSJohn Ogness * 2010b6cf8b3fSJohn Ogness * This provides readers a sequence number to jump to if all currently 2011b6cf8b3fSJohn Ogness * available records should be skipped. 2012b6cf8b3fSJohn Ogness * 2013b6cf8b3fSJohn Ogness * Context: Any context. 2014b6cf8b3fSJohn Ogness * Return: The sequence number of the next newest (not yet available) record 2015b6cf8b3fSJohn Ogness * for readers. 2016b6cf8b3fSJohn Ogness */ 2017b6cf8b3fSJohn Ogness u64 prb_next_seq(struct printk_ringbuffer *rb) 2018b6cf8b3fSJohn Ogness { 2019*f244b4dcSPetr Mladek struct prb_desc_ring *desc_ring = &rb->desc_ring; 2020*f244b4dcSPetr Mladek enum desc_state d_state; 2021*f244b4dcSPetr Mladek unsigned long id; 2022*f244b4dcSPetr Mladek u64 seq; 2023b6cf8b3fSJohn Ogness 2024*f244b4dcSPetr Mladek /* Check if the cached @id still points to a valid @seq. */ 2025*f244b4dcSPetr Mladek id = atomic_long_read(&desc_ring->last_finalized_id); 2026*f244b4dcSPetr Mladek d_state = desc_read(desc_ring, id, NULL, &seq, NULL); 2027*f244b4dcSPetr Mladek 2028*f244b4dcSPetr Mladek if (d_state == desc_finalized || d_state == desc_reusable) { 2029*f244b4dcSPetr Mladek /* 2030*f244b4dcSPetr Mladek * Begin searching after the last finalized record. 2031*f244b4dcSPetr Mladek * 2032*f244b4dcSPetr Mladek * On 0, the search must begin at 0 because of hack#2 2033*f244b4dcSPetr Mladek * of the bootstrapping phase it is not known if a 2034*f244b4dcSPetr Mladek * record at index 0 exists. 2035*f244b4dcSPetr Mladek */ 2036*f244b4dcSPetr Mladek if (seq != 0) 2037*f244b4dcSPetr Mladek seq++; 2038*f244b4dcSPetr Mladek } else { 2039*f244b4dcSPetr Mladek /* 2040*f244b4dcSPetr Mladek * The information about the last finalized sequence number 2041*f244b4dcSPetr Mladek * has gone. It should happen only when there is a flood of 2042*f244b4dcSPetr Mladek * new messages and the ringbuffer is rapidly recycled. 2043*f244b4dcSPetr Mladek * Give up and start from the beginning. 2044*f244b4dcSPetr Mladek */ 2045*f244b4dcSPetr Mladek seq = 0; 2046*f244b4dcSPetr Mladek } 2047*f244b4dcSPetr Mladek 2048*f244b4dcSPetr Mladek /* 2049*f244b4dcSPetr Mladek * The information about the last finalized @seq might be inaccurate. 2050*f244b4dcSPetr Mladek * Search forward to find the current one. 2051*f244b4dcSPetr Mladek */ 2052b6cf8b3fSJohn Ogness while (_prb_read_valid(rb, &seq, NULL, NULL)) 2053b6cf8b3fSJohn Ogness seq++; 2054b6cf8b3fSJohn Ogness 2055b6cf8b3fSJohn Ogness return seq; 2056b6cf8b3fSJohn Ogness } 2057b6cf8b3fSJohn Ogness 2058b6cf8b3fSJohn Ogness /** 2059b6cf8b3fSJohn Ogness * prb_init() - Initialize a ringbuffer to use provided external buffers. 2060b6cf8b3fSJohn Ogness * 2061b6cf8b3fSJohn Ogness * @rb: The ringbuffer to initialize. 2062b6cf8b3fSJohn Ogness * @text_buf: The data buffer for text data. 2063b6cf8b3fSJohn Ogness * @textbits: The size of @text_buf as a power-of-2 value. 2064b6cf8b3fSJohn Ogness * @descs: The descriptor buffer for ringbuffer records. 2065b6cf8b3fSJohn Ogness * @descbits: The count of @descs items as a power-of-2 value. 2066cfe2790bSJohn Ogness * @infos: The printk_info buffer for ringbuffer records. 2067b6cf8b3fSJohn Ogness * 2068b6cf8b3fSJohn Ogness * This is the public function available to writers to setup a ringbuffer 2069b6cf8b3fSJohn Ogness * during runtime using provided buffers. 2070b6cf8b3fSJohn Ogness * 2071b6cf8b3fSJohn Ogness * This must match the initialization of DEFINE_PRINTKRB(). 2072b6cf8b3fSJohn Ogness * 2073b6cf8b3fSJohn Ogness * Context: Any context. 2074b6cf8b3fSJohn Ogness */ 2075b6cf8b3fSJohn Ogness void prb_init(struct printk_ringbuffer *rb, 2076b6cf8b3fSJohn Ogness char *text_buf, unsigned int textbits, 2077cfe2790bSJohn Ogness struct prb_desc *descs, unsigned int descbits, 2078cfe2790bSJohn Ogness struct printk_info *infos) 2079b6cf8b3fSJohn Ogness { 2080b6cf8b3fSJohn Ogness memset(descs, 0, _DESCS_COUNT(descbits) * sizeof(descs[0])); 2081cfe2790bSJohn Ogness memset(infos, 0, _DESCS_COUNT(descbits) * sizeof(infos[0])); 2082b6cf8b3fSJohn Ogness 2083b6cf8b3fSJohn Ogness rb->desc_ring.count_bits = descbits; 2084b6cf8b3fSJohn Ogness rb->desc_ring.descs = descs; 2085cfe2790bSJohn Ogness rb->desc_ring.infos = infos; 2086b6cf8b3fSJohn Ogness atomic_long_set(&rb->desc_ring.head_id, DESC0_ID(descbits)); 2087b6cf8b3fSJohn Ogness atomic_long_set(&rb->desc_ring.tail_id, DESC0_ID(descbits)); 2088*f244b4dcSPetr Mladek atomic_long_set(&rb->desc_ring.last_finalized_id, DESC0_ID(descbits)); 2089b6cf8b3fSJohn Ogness 2090b6cf8b3fSJohn Ogness rb->text_data_ring.size_bits = textbits; 2091b6cf8b3fSJohn Ogness rb->text_data_ring.data = text_buf; 2092b6cf8b3fSJohn Ogness atomic_long_set(&rb->text_data_ring.head_lpos, BLK0_LPOS(textbits)); 2093b6cf8b3fSJohn Ogness atomic_long_set(&rb->text_data_ring.tail_lpos, BLK0_LPOS(textbits)); 2094b6cf8b3fSJohn Ogness 2095b6cf8b3fSJohn Ogness atomic_long_set(&rb->fail, 0); 2096b6cf8b3fSJohn Ogness 2097b6cf8b3fSJohn Ogness atomic_long_set(&(descs[_DESCS_COUNT(descbits) - 1].state_var), DESC0_SV(descbits)); 2098d397820fSJohn Ogness descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.begin = FAILED_LPOS; 2099d397820fSJohn Ogness descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.next = FAILED_LPOS; 2100cfe2790bSJohn Ogness 2101cfe2790bSJohn Ogness infos[0].seq = -(u64)_DESCS_COUNT(descbits); 2102cfe2790bSJohn Ogness infos[_DESCS_COUNT(descbits) - 1].seq = 0; 2103b6cf8b3fSJohn Ogness } 2104b6cf8b3fSJohn Ogness 2105b6cf8b3fSJohn Ogness /** 2106b6cf8b3fSJohn Ogness * prb_record_text_space() - Query the full actual used ringbuffer space for 2107b6cf8b3fSJohn Ogness * the text data of a reserved entry. 2108b6cf8b3fSJohn Ogness * 2109b6cf8b3fSJohn Ogness * @e: The successfully reserved entry to query. 2110b6cf8b3fSJohn Ogness * 2111b6cf8b3fSJohn Ogness * This is the public function available to writers to see how much actual 2112b6cf8b3fSJohn Ogness * space is used in the ringbuffer to store the text data of the specified 2113b6cf8b3fSJohn Ogness * entry. 2114b6cf8b3fSJohn Ogness * 2115b6cf8b3fSJohn Ogness * This function is only valid if @e has been successfully reserved using 2116b6cf8b3fSJohn Ogness * prb_reserve(). 2117b6cf8b3fSJohn Ogness * 2118b6cf8b3fSJohn Ogness * Context: Any context. 2119b6cf8b3fSJohn Ogness * Return: The size in bytes used by the text data of the associated record. 2120b6cf8b3fSJohn Ogness */ 2121b6cf8b3fSJohn Ogness unsigned int prb_record_text_space(struct prb_reserved_entry *e) 2122b6cf8b3fSJohn Ogness { 2123b6cf8b3fSJohn Ogness return e->text_space; 2124b6cf8b3fSJohn Ogness } 2125