1b6cf8b3fSJohn Ogness // SPDX-License-Identifier: GPL-2.0 2b6cf8b3fSJohn Ogness 3b6cf8b3fSJohn Ogness #include <linux/kernel.h> 4b6cf8b3fSJohn Ogness #include <linux/irqflags.h> 5b6cf8b3fSJohn Ogness #include <linux/string.h> 6b6cf8b3fSJohn Ogness #include <linux/errno.h> 7b6cf8b3fSJohn Ogness #include <linux/bug.h> 8b6cf8b3fSJohn Ogness #include "printk_ringbuffer.h" 9b6cf8b3fSJohn Ogness 10b6cf8b3fSJohn Ogness /** 11b6cf8b3fSJohn Ogness * DOC: printk_ringbuffer overview 12b6cf8b3fSJohn Ogness * 13b6cf8b3fSJohn Ogness * Data Structure 14b6cf8b3fSJohn Ogness * -------------- 15b6cf8b3fSJohn Ogness * The printk_ringbuffer is made up of 3 internal ringbuffers: 16b6cf8b3fSJohn Ogness * 17b6cf8b3fSJohn Ogness * desc_ring 18b6cf8b3fSJohn Ogness * A ring of descriptors. A descriptor contains all record meta data 19b6cf8b3fSJohn Ogness * (sequence number, timestamp, loglevel, etc.) as well as internal state 20b6cf8b3fSJohn Ogness * information about the record and logical positions specifying where in 21b6cf8b3fSJohn Ogness * the other ringbuffers the text and dictionary strings are located. 22b6cf8b3fSJohn Ogness * 23b6cf8b3fSJohn Ogness * text_data_ring 24b6cf8b3fSJohn Ogness * A ring of data blocks. A data block consists of an unsigned long 25b6cf8b3fSJohn Ogness * integer (ID) that maps to a desc_ring index followed by the text 26b6cf8b3fSJohn Ogness * string of the record. 27b6cf8b3fSJohn Ogness * 28b6cf8b3fSJohn Ogness * dict_data_ring 29b6cf8b3fSJohn Ogness * A ring of data blocks. A data block consists of an unsigned long 30b6cf8b3fSJohn Ogness * integer (ID) that maps to a desc_ring index followed by the dictionary 31b6cf8b3fSJohn Ogness * string of the record. 32b6cf8b3fSJohn Ogness * 33b6cf8b3fSJohn Ogness * The internal state information of a descriptor is the key element to allow 34b6cf8b3fSJohn Ogness * readers and writers to locklessly synchronize access to the data. 35b6cf8b3fSJohn Ogness * 36b6cf8b3fSJohn Ogness * Implementation 37b6cf8b3fSJohn Ogness * -------------- 38b6cf8b3fSJohn Ogness * 39b6cf8b3fSJohn Ogness * Descriptor Ring 40b6cf8b3fSJohn Ogness * ~~~~~~~~~~~~~~~ 41b6cf8b3fSJohn Ogness * The descriptor ring is an array of descriptors. A descriptor contains all 42b6cf8b3fSJohn Ogness * the meta data of a printk record as well as blk_lpos structs pointing to 43b6cf8b3fSJohn Ogness * associated text and dictionary data blocks (see "Data Rings" below). Each 44b6cf8b3fSJohn Ogness * descriptor is assigned an ID that maps directly to index values of the 45b6cf8b3fSJohn Ogness * descriptor array and has a state. The ID and the state are bitwise combined 46b6cf8b3fSJohn Ogness * into a single descriptor field named @state_var, allowing ID and state to 47b6cf8b3fSJohn Ogness * be synchronously and atomically updated. 48b6cf8b3fSJohn Ogness * 49b6cf8b3fSJohn Ogness * Descriptors have three states: 50b6cf8b3fSJohn Ogness * 51b6cf8b3fSJohn Ogness * reserved 52b6cf8b3fSJohn Ogness * A writer is modifying the record. 53b6cf8b3fSJohn Ogness * 54b6cf8b3fSJohn Ogness * committed 55b6cf8b3fSJohn Ogness * The record and all its data are complete and available for reading. 56b6cf8b3fSJohn Ogness * 57b6cf8b3fSJohn Ogness * reusable 58b6cf8b3fSJohn Ogness * The record exists, but its text and/or dictionary data may no longer 59b6cf8b3fSJohn Ogness * be available. 60b6cf8b3fSJohn Ogness * 61b6cf8b3fSJohn Ogness * Querying the @state_var of a record requires providing the ID of the 62b6cf8b3fSJohn Ogness * descriptor to query. This can yield a possible fourth (pseudo) state: 63b6cf8b3fSJohn Ogness * 64b6cf8b3fSJohn Ogness * miss 65b6cf8b3fSJohn Ogness * The descriptor being queried has an unexpected ID. 66b6cf8b3fSJohn Ogness * 67b6cf8b3fSJohn Ogness * The descriptor ring has a @tail_id that contains the ID of the oldest 68b6cf8b3fSJohn Ogness * descriptor and @head_id that contains the ID of the newest descriptor. 69b6cf8b3fSJohn Ogness * 70b6cf8b3fSJohn Ogness * When a new descriptor should be created (and the ring is full), the tail 71b6cf8b3fSJohn Ogness * descriptor is invalidated by first transitioning to the reusable state and 72b6cf8b3fSJohn Ogness * then invalidating all tail data blocks up to and including the data blocks 73b6cf8b3fSJohn Ogness * associated with the tail descriptor (for text and dictionary rings). Then 74b6cf8b3fSJohn Ogness * @tail_id is advanced, followed by advancing @head_id. And finally the 75b6cf8b3fSJohn Ogness * @state_var of the new descriptor is initialized to the new ID and reserved 76b6cf8b3fSJohn Ogness * state. 77b6cf8b3fSJohn Ogness * 78b6cf8b3fSJohn Ogness * The @tail_id can only be advanced if the new @tail_id would be in the 79b6cf8b3fSJohn Ogness * committed or reusable queried state. This makes it possible that a valid 80b6cf8b3fSJohn Ogness * sequence number of the tail is always available. 81b6cf8b3fSJohn Ogness * 82b6cf8b3fSJohn Ogness * Data Rings 83b6cf8b3fSJohn Ogness * ~~~~~~~~~~ 84b6cf8b3fSJohn Ogness * The two data rings (text and dictionary) function identically. They exist 85b6cf8b3fSJohn Ogness * separately so that their buffer sizes can be individually set and they do 86b6cf8b3fSJohn Ogness * not affect one another. 87b6cf8b3fSJohn Ogness * 88b6cf8b3fSJohn Ogness * Data rings are byte arrays composed of data blocks. Data blocks are 89b6cf8b3fSJohn Ogness * referenced by blk_lpos structs that point to the logical position of the 90b6cf8b3fSJohn Ogness * beginning of a data block and the beginning of the next adjacent data 91b6cf8b3fSJohn Ogness * block. Logical positions are mapped directly to index values of the byte 92b6cf8b3fSJohn Ogness * array ringbuffer. 93b6cf8b3fSJohn Ogness * 94b6cf8b3fSJohn Ogness * Each data block consists of an ID followed by the writer data. The ID is 95b6cf8b3fSJohn Ogness * the identifier of a descriptor that is associated with the data block. A 96b6cf8b3fSJohn Ogness * given data block is considered valid if all of the following conditions 97b6cf8b3fSJohn Ogness * are met: 98b6cf8b3fSJohn Ogness * 99b6cf8b3fSJohn Ogness * 1) The descriptor associated with the data block is in the committed 100b6cf8b3fSJohn Ogness * queried state. 101b6cf8b3fSJohn Ogness * 102b6cf8b3fSJohn Ogness * 2) The blk_lpos struct within the descriptor associated with the data 103b6cf8b3fSJohn Ogness * block references back to the same data block. 104b6cf8b3fSJohn Ogness * 105b6cf8b3fSJohn Ogness * 3) The data block is within the head/tail logical position range. 106b6cf8b3fSJohn Ogness * 107b6cf8b3fSJohn Ogness * If the writer data of a data block would extend beyond the end of the 108b6cf8b3fSJohn Ogness * byte array, only the ID of the data block is stored at the logical 109b6cf8b3fSJohn Ogness * position and the full data block (ID and writer data) is stored at the 110b6cf8b3fSJohn Ogness * beginning of the byte array. The referencing blk_lpos will point to the 111b6cf8b3fSJohn Ogness * ID before the wrap and the next data block will be at the logical 112b6cf8b3fSJohn Ogness * position adjacent the full data block after the wrap. 113b6cf8b3fSJohn Ogness * 114b6cf8b3fSJohn Ogness * Data rings have a @tail_lpos that points to the beginning of the oldest 115b6cf8b3fSJohn Ogness * data block and a @head_lpos that points to the logical position of the 116b6cf8b3fSJohn Ogness * next (not yet existing) data block. 117b6cf8b3fSJohn Ogness * 118b6cf8b3fSJohn Ogness * When a new data block should be created (and the ring is full), tail data 119b6cf8b3fSJohn Ogness * blocks will first be invalidated by putting their associated descriptors 120b6cf8b3fSJohn Ogness * into the reusable state and then pushing the @tail_lpos forward beyond 121b6cf8b3fSJohn Ogness * them. Then the @head_lpos is pushed forward and is associated with a new 122b6cf8b3fSJohn Ogness * descriptor. If a data block is not valid, the @tail_lpos cannot be 123b6cf8b3fSJohn Ogness * advanced beyond it. 124b6cf8b3fSJohn Ogness * 125b6cf8b3fSJohn Ogness * Usage 126b6cf8b3fSJohn Ogness * ----- 127b6cf8b3fSJohn Ogness * Here are some simple examples demonstrating writers and readers. For the 128b6cf8b3fSJohn Ogness * examples a global ringbuffer (test_rb) is available (which is not the 129b6cf8b3fSJohn Ogness * actual ringbuffer used by printk):: 130b6cf8b3fSJohn Ogness * 131b6cf8b3fSJohn Ogness * DEFINE_PRINTKRB(test_rb, 15, 5, 3); 132b6cf8b3fSJohn Ogness * 133b6cf8b3fSJohn Ogness * This ringbuffer allows up to 32768 records (2 ^ 15) and has a size of 134b6cf8b3fSJohn Ogness * 1 MiB (2 ^ (15 + 5)) for text data and 256 KiB (2 ^ (15 + 3)) for 135b6cf8b3fSJohn Ogness * dictionary data. 136b6cf8b3fSJohn Ogness * 137b6cf8b3fSJohn Ogness * Sample writer code:: 138b6cf8b3fSJohn Ogness * 139b6cf8b3fSJohn Ogness * const char *dictstr = "dictionary text"; 140b6cf8b3fSJohn Ogness * const char *textstr = "message text"; 141b6cf8b3fSJohn Ogness * struct prb_reserved_entry e; 142b6cf8b3fSJohn Ogness * struct printk_record r; 143b6cf8b3fSJohn Ogness * 144b6cf8b3fSJohn Ogness * // specify how much to allocate 145b6cf8b3fSJohn Ogness * prb_rec_init_wr(&r, strlen(textstr) + 1, strlen(dictstr) + 1); 146b6cf8b3fSJohn Ogness * 147b6cf8b3fSJohn Ogness * if (prb_reserve(&e, &test_rb, &r)) { 148b6cf8b3fSJohn Ogness * snprintf(r.text_buf, r.text_buf_size, "%s", textstr); 149b6cf8b3fSJohn Ogness * 150b6cf8b3fSJohn Ogness * // dictionary allocation may have failed 151b6cf8b3fSJohn Ogness * if (r.dict_buf) 152b6cf8b3fSJohn Ogness * snprintf(r.dict_buf, r.dict_buf_size, "%s", dictstr); 153b6cf8b3fSJohn Ogness * 154b6cf8b3fSJohn Ogness * r.info->ts_nsec = local_clock(); 155b6cf8b3fSJohn Ogness * 156b6cf8b3fSJohn Ogness * prb_commit(&e); 157b6cf8b3fSJohn Ogness * } 158b6cf8b3fSJohn Ogness * 159b6cf8b3fSJohn Ogness * Sample reader code:: 160b6cf8b3fSJohn Ogness * 161b6cf8b3fSJohn Ogness * struct printk_info info; 162b6cf8b3fSJohn Ogness * struct printk_record r; 163b6cf8b3fSJohn Ogness * char text_buf[32]; 164b6cf8b3fSJohn Ogness * char dict_buf[32]; 165b6cf8b3fSJohn Ogness * u64 seq; 166b6cf8b3fSJohn Ogness * 167b6cf8b3fSJohn Ogness * prb_rec_init_rd(&r, &info, &text_buf[0], sizeof(text_buf), 168b6cf8b3fSJohn Ogness * &dict_buf[0], sizeof(dict_buf)); 169b6cf8b3fSJohn Ogness * 170b6cf8b3fSJohn Ogness * prb_for_each_record(0, &test_rb, &seq, &r) { 171b6cf8b3fSJohn Ogness * if (info.seq != seq) 172b6cf8b3fSJohn Ogness * pr_warn("lost %llu records\n", info.seq - seq); 173b6cf8b3fSJohn Ogness * 174b6cf8b3fSJohn Ogness * if (info.text_len > r.text_buf_size) { 175b6cf8b3fSJohn Ogness * pr_warn("record %llu text truncated\n", info.seq); 176b6cf8b3fSJohn Ogness * text_buf[r.text_buf_size - 1] = 0; 177b6cf8b3fSJohn Ogness * } 178b6cf8b3fSJohn Ogness * 179b6cf8b3fSJohn Ogness * if (info.dict_len > r.dict_buf_size) { 180b6cf8b3fSJohn Ogness * pr_warn("record %llu dict truncated\n", info.seq); 181b6cf8b3fSJohn Ogness * dict_buf[r.dict_buf_size - 1] = 0; 182b6cf8b3fSJohn Ogness * } 183b6cf8b3fSJohn Ogness * 184b6cf8b3fSJohn Ogness * pr_info("%llu: %llu: %s;%s\n", info.seq, info.ts_nsec, 185b6cf8b3fSJohn Ogness * &text_buf[0], info.dict_len ? &dict_buf[0] : ""); 186b6cf8b3fSJohn Ogness * } 187b6cf8b3fSJohn Ogness * 188b6cf8b3fSJohn Ogness * Note that additional less convenient reader functions are available to 189b6cf8b3fSJohn Ogness * allow complex record access. 190b6cf8b3fSJohn Ogness * 191b6cf8b3fSJohn Ogness * ABA Issues 192b6cf8b3fSJohn Ogness * ~~~~~~~~~~ 193b6cf8b3fSJohn Ogness * To help avoid ABA issues, descriptors are referenced by IDs (array index 194b6cf8b3fSJohn Ogness * values combined with tagged bits counting array wraps) and data blocks are 195b6cf8b3fSJohn Ogness * referenced by logical positions (array index values combined with tagged 196b6cf8b3fSJohn Ogness * bits counting array wraps). However, on 32-bit systems the number of 197b6cf8b3fSJohn Ogness * tagged bits is relatively small such that an ABA incident is (at least 198b6cf8b3fSJohn Ogness * theoretically) possible. For example, if 4 million maximally sized (1KiB) 199b6cf8b3fSJohn Ogness * printk messages were to occur in NMI context on a 32-bit system, the 200b6cf8b3fSJohn Ogness * interrupted context would not be able to recognize that the 32-bit integer 201b6cf8b3fSJohn Ogness * completely wrapped and thus represents a different data block than the one 202b6cf8b3fSJohn Ogness * the interrupted context expects. 203b6cf8b3fSJohn Ogness * 204b6cf8b3fSJohn Ogness * To help combat this possibility, additional state checking is performed 205b6cf8b3fSJohn Ogness * (such as using cmpxchg() even though set() would suffice). These extra 206b6cf8b3fSJohn Ogness * checks are commented as such and will hopefully catch any ABA issue that 207b6cf8b3fSJohn Ogness * a 32-bit system might experience. 208b6cf8b3fSJohn Ogness * 209b6cf8b3fSJohn Ogness * Memory Barriers 210b6cf8b3fSJohn Ogness * ~~~~~~~~~~~~~~~ 211b6cf8b3fSJohn Ogness * Multiple memory barriers are used. To simplify proving correctness and 212b6cf8b3fSJohn Ogness * generating litmus tests, lines of code related to memory barriers 213b6cf8b3fSJohn Ogness * (loads, stores, and the associated memory barriers) are labeled:: 214b6cf8b3fSJohn Ogness * 215b6cf8b3fSJohn Ogness * LMM(function:letter) 216b6cf8b3fSJohn Ogness * 217b6cf8b3fSJohn Ogness * Comments reference the labels using only the "function:letter" part. 218b6cf8b3fSJohn Ogness * 219b6cf8b3fSJohn Ogness * The memory barrier pairs and their ordering are: 220b6cf8b3fSJohn Ogness * 221b6cf8b3fSJohn Ogness * desc_reserve:D / desc_reserve:B 222b6cf8b3fSJohn Ogness * push descriptor tail (id), then push descriptor head (id) 223b6cf8b3fSJohn Ogness * 224b6cf8b3fSJohn Ogness * desc_reserve:D / data_push_tail:B 225b6cf8b3fSJohn Ogness * push data tail (lpos), then set new descriptor reserved (state) 226b6cf8b3fSJohn Ogness * 227b6cf8b3fSJohn Ogness * desc_reserve:D / desc_push_tail:C 228b6cf8b3fSJohn Ogness * push descriptor tail (id), then set new descriptor reserved (state) 229b6cf8b3fSJohn Ogness * 230b6cf8b3fSJohn Ogness * desc_reserve:D / prb_first_seq:C 231b6cf8b3fSJohn Ogness * push descriptor tail (id), then set new descriptor reserved (state) 232b6cf8b3fSJohn Ogness * 233b6cf8b3fSJohn Ogness * desc_reserve:F / desc_read:D 234b6cf8b3fSJohn Ogness * set new descriptor id and reserved (state), then allow writer changes 235b6cf8b3fSJohn Ogness * 236b6cf8b3fSJohn Ogness * data_alloc:A / desc_read:D 237b6cf8b3fSJohn Ogness * set old descriptor reusable (state), then modify new data block area 238b6cf8b3fSJohn Ogness * 239b6cf8b3fSJohn Ogness * data_alloc:A / data_push_tail:B 240b6cf8b3fSJohn Ogness * push data tail (lpos), then modify new data block area 241b6cf8b3fSJohn Ogness * 242b6cf8b3fSJohn Ogness * prb_commit:B / desc_read:B 243b6cf8b3fSJohn Ogness * store writer changes, then set new descriptor committed (state) 244b6cf8b3fSJohn Ogness * 245b6cf8b3fSJohn Ogness * data_push_tail:D / data_push_tail:A 246b6cf8b3fSJohn Ogness * set descriptor reusable (state), then push data tail (lpos) 247b6cf8b3fSJohn Ogness * 248b6cf8b3fSJohn Ogness * desc_push_tail:B / desc_reserve:D 249b6cf8b3fSJohn Ogness * set descriptor reusable (state), then push descriptor tail (id) 250b6cf8b3fSJohn Ogness */ 251b6cf8b3fSJohn Ogness 252b6cf8b3fSJohn Ogness #define DATA_SIZE(data_ring) _DATA_SIZE((data_ring)->size_bits) 253b6cf8b3fSJohn Ogness #define DATA_SIZE_MASK(data_ring) (DATA_SIZE(data_ring) - 1) 254b6cf8b3fSJohn Ogness 255b6cf8b3fSJohn Ogness #define DESCS_COUNT(desc_ring) _DESCS_COUNT((desc_ring)->count_bits) 256b6cf8b3fSJohn Ogness #define DESCS_COUNT_MASK(desc_ring) (DESCS_COUNT(desc_ring) - 1) 257b6cf8b3fSJohn Ogness 258b6cf8b3fSJohn Ogness /* Determine the data array index from a logical position. */ 259b6cf8b3fSJohn Ogness #define DATA_INDEX(data_ring, lpos) ((lpos) & DATA_SIZE_MASK(data_ring)) 260b6cf8b3fSJohn Ogness 261b6cf8b3fSJohn Ogness /* Determine the desc array index from an ID or sequence number. */ 262b6cf8b3fSJohn Ogness #define DESC_INDEX(desc_ring, n) ((n) & DESCS_COUNT_MASK(desc_ring)) 263b6cf8b3fSJohn Ogness 264b6cf8b3fSJohn Ogness /* Determine how many times the data array has wrapped. */ 265b6cf8b3fSJohn Ogness #define DATA_WRAPS(data_ring, lpos) ((lpos) >> (data_ring)->size_bits) 266b6cf8b3fSJohn Ogness 267d397820fSJohn Ogness /* Determine if a logical position refers to a data-less block. */ 268d397820fSJohn Ogness #define LPOS_DATALESS(lpos) ((lpos) & 1UL) 269d397820fSJohn Ogness 270b6cf8b3fSJohn Ogness /* Get the logical position at index 0 of the current wrap. */ 271b6cf8b3fSJohn Ogness #define DATA_THIS_WRAP_START_LPOS(data_ring, lpos) \ 272b6cf8b3fSJohn Ogness ((lpos) & ~DATA_SIZE_MASK(data_ring)) 273b6cf8b3fSJohn Ogness 274b6cf8b3fSJohn Ogness /* Get the ID for the same index of the previous wrap as the given ID. */ 275b6cf8b3fSJohn Ogness #define DESC_ID_PREV_WRAP(desc_ring, id) \ 276b6cf8b3fSJohn Ogness DESC_ID((id) - DESCS_COUNT(desc_ring)) 277b6cf8b3fSJohn Ogness 278b6cf8b3fSJohn Ogness /* 279b6cf8b3fSJohn Ogness * A data block: mapped directly to the beginning of the data block area 280b6cf8b3fSJohn Ogness * specified as a logical position within the data ring. 281b6cf8b3fSJohn Ogness * 282b6cf8b3fSJohn Ogness * @id: the ID of the associated descriptor 283b6cf8b3fSJohn Ogness * @data: the writer data 284b6cf8b3fSJohn Ogness * 285b6cf8b3fSJohn Ogness * Note that the size of a data block is only known by its associated 286b6cf8b3fSJohn Ogness * descriptor. 287b6cf8b3fSJohn Ogness */ 288b6cf8b3fSJohn Ogness struct prb_data_block { 289b6cf8b3fSJohn Ogness unsigned long id; 290b6cf8b3fSJohn Ogness char data[0]; 291b6cf8b3fSJohn Ogness }; 292b6cf8b3fSJohn Ogness 293b6cf8b3fSJohn Ogness /* 294b6cf8b3fSJohn Ogness * Return the descriptor associated with @n. @n can be either a 295b6cf8b3fSJohn Ogness * descriptor ID or a sequence number. 296b6cf8b3fSJohn Ogness */ 297b6cf8b3fSJohn Ogness static struct prb_desc *to_desc(struct prb_desc_ring *desc_ring, u64 n) 298b6cf8b3fSJohn Ogness { 299b6cf8b3fSJohn Ogness return &desc_ring->descs[DESC_INDEX(desc_ring, n)]; 300b6cf8b3fSJohn Ogness } 301b6cf8b3fSJohn Ogness 302b6cf8b3fSJohn Ogness static struct prb_data_block *to_block(struct prb_data_ring *data_ring, 303b6cf8b3fSJohn Ogness unsigned long begin_lpos) 304b6cf8b3fSJohn Ogness { 305b6cf8b3fSJohn Ogness return (void *)&data_ring->data[DATA_INDEX(data_ring, begin_lpos)]; 306b6cf8b3fSJohn Ogness } 307b6cf8b3fSJohn Ogness 308b6cf8b3fSJohn Ogness /* 309b6cf8b3fSJohn Ogness * Increase the data size to account for data block meta data plus any 310b6cf8b3fSJohn Ogness * padding so that the adjacent data block is aligned on the ID size. 311b6cf8b3fSJohn Ogness */ 312b6cf8b3fSJohn Ogness static unsigned int to_blk_size(unsigned int size) 313b6cf8b3fSJohn Ogness { 314b6cf8b3fSJohn Ogness struct prb_data_block *db = NULL; 315b6cf8b3fSJohn Ogness 316b6cf8b3fSJohn Ogness size += sizeof(*db); 317b6cf8b3fSJohn Ogness size = ALIGN(size, sizeof(db->id)); 318b6cf8b3fSJohn Ogness return size; 319b6cf8b3fSJohn Ogness } 320b6cf8b3fSJohn Ogness 321b6cf8b3fSJohn Ogness /* 322b6cf8b3fSJohn Ogness * Sanity checker for reserve size. The ringbuffer code assumes that a data 323b6cf8b3fSJohn Ogness * block does not exceed the maximum possible size that could fit within the 324b6cf8b3fSJohn Ogness * ringbuffer. This function provides that basic size check so that the 325b6cf8b3fSJohn Ogness * assumption is safe. 326b6cf8b3fSJohn Ogness */ 327b6cf8b3fSJohn Ogness static bool data_check_size(struct prb_data_ring *data_ring, unsigned int size) 328b6cf8b3fSJohn Ogness { 329b6cf8b3fSJohn Ogness struct prb_data_block *db = NULL; 330b6cf8b3fSJohn Ogness 331b6cf8b3fSJohn Ogness if (size == 0) 332d397820fSJohn Ogness return true; 333b6cf8b3fSJohn Ogness 334b6cf8b3fSJohn Ogness /* 335b6cf8b3fSJohn Ogness * Ensure the alignment padded size could possibly fit in the data 336b6cf8b3fSJohn Ogness * array. The largest possible data block must still leave room for 337b6cf8b3fSJohn Ogness * at least the ID of the next block. 338b6cf8b3fSJohn Ogness */ 339b6cf8b3fSJohn Ogness size = to_blk_size(size); 340b6cf8b3fSJohn Ogness if (size > DATA_SIZE(data_ring) - sizeof(db->id)) 341b6cf8b3fSJohn Ogness return false; 342b6cf8b3fSJohn Ogness 343b6cf8b3fSJohn Ogness return true; 344b6cf8b3fSJohn Ogness } 345b6cf8b3fSJohn Ogness 346b6cf8b3fSJohn Ogness /* The possible responses of a descriptor state-query. */ 347b6cf8b3fSJohn Ogness enum desc_state { 348b6cf8b3fSJohn Ogness desc_miss, /* ID mismatch */ 349b6cf8b3fSJohn Ogness desc_reserved, /* reserved, in use by writer */ 350b6cf8b3fSJohn Ogness desc_committed, /* committed, writer is done */ 351b6cf8b3fSJohn Ogness desc_reusable, /* free, not yet used by any writer */ 352b6cf8b3fSJohn Ogness }; 353b6cf8b3fSJohn Ogness 354b6cf8b3fSJohn Ogness /* Query the state of a descriptor. */ 355b6cf8b3fSJohn Ogness static enum desc_state get_desc_state(unsigned long id, 356b6cf8b3fSJohn Ogness unsigned long state_val) 357b6cf8b3fSJohn Ogness { 358b6cf8b3fSJohn Ogness if (id != DESC_ID(state_val)) 359b6cf8b3fSJohn Ogness return desc_miss; 360b6cf8b3fSJohn Ogness 361b6cf8b3fSJohn Ogness if (state_val & DESC_REUSE_MASK) 362b6cf8b3fSJohn Ogness return desc_reusable; 363b6cf8b3fSJohn Ogness 364b6cf8b3fSJohn Ogness if (state_val & DESC_COMMITTED_MASK) 365b6cf8b3fSJohn Ogness return desc_committed; 366b6cf8b3fSJohn Ogness 367b6cf8b3fSJohn Ogness return desc_reserved; 368b6cf8b3fSJohn Ogness } 369b6cf8b3fSJohn Ogness 370b6cf8b3fSJohn Ogness /* 371ce003d67SJohn Ogness * Get a copy of a specified descriptor and return its queried state. If the 372ce003d67SJohn Ogness * descriptor is in an inconsistent state (miss or reserved), the caller can 373ce003d67SJohn Ogness * only expect the descriptor's @state_var field to be valid. 374b6cf8b3fSJohn Ogness */ 375b6cf8b3fSJohn Ogness static enum desc_state desc_read(struct prb_desc_ring *desc_ring, 376b6cf8b3fSJohn Ogness unsigned long id, struct prb_desc *desc_out) 377b6cf8b3fSJohn Ogness { 378b6cf8b3fSJohn Ogness struct prb_desc *desc = to_desc(desc_ring, id); 379b6cf8b3fSJohn Ogness atomic_long_t *state_var = &desc->state_var; 380b6cf8b3fSJohn Ogness enum desc_state d_state; 381b6cf8b3fSJohn Ogness unsigned long state_val; 382b6cf8b3fSJohn Ogness 383b6cf8b3fSJohn Ogness /* Check the descriptor state. */ 384b6cf8b3fSJohn Ogness state_val = atomic_long_read(state_var); /* LMM(desc_read:A) */ 385b6cf8b3fSJohn Ogness d_state = get_desc_state(id, state_val); 386ce003d67SJohn Ogness if (d_state == desc_miss || d_state == desc_reserved) { 387ce003d67SJohn Ogness /* 388ce003d67SJohn Ogness * The descriptor is in an inconsistent state. Set at least 389ce003d67SJohn Ogness * @state_var so that the caller can see the details of 390ce003d67SJohn Ogness * the inconsistent state. 391ce003d67SJohn Ogness */ 392ce003d67SJohn Ogness goto out; 393ce003d67SJohn Ogness } 394b6cf8b3fSJohn Ogness 395b6cf8b3fSJohn Ogness /* 396b6cf8b3fSJohn Ogness * Guarantee the state is loaded before copying the descriptor 397b6cf8b3fSJohn Ogness * content. This avoids copying obsolete descriptor content that might 398b6cf8b3fSJohn Ogness * not apply to the descriptor state. This pairs with prb_commit:B. 399b6cf8b3fSJohn Ogness * 400b6cf8b3fSJohn Ogness * Memory barrier involvement: 401b6cf8b3fSJohn Ogness * 402b6cf8b3fSJohn Ogness * If desc_read:A reads from prb_commit:B, then desc_read:C reads 403b6cf8b3fSJohn Ogness * from prb_commit:A. 404b6cf8b3fSJohn Ogness * 405b6cf8b3fSJohn Ogness * Relies on: 406b6cf8b3fSJohn Ogness * 407b6cf8b3fSJohn Ogness * WMB from prb_commit:A to prb_commit:B 408b6cf8b3fSJohn Ogness * matching 409b6cf8b3fSJohn Ogness * RMB from desc_read:A to desc_read:C 410b6cf8b3fSJohn Ogness */ 411b6cf8b3fSJohn Ogness smp_rmb(); /* LMM(desc_read:B) */ 412b6cf8b3fSJohn Ogness 413b6cf8b3fSJohn Ogness /* 414b6cf8b3fSJohn Ogness * Copy the descriptor data. The data is not valid until the 415e7c1fe21SJohn Ogness * state has been re-checked. A memcpy() for all of @desc 416e7c1fe21SJohn Ogness * cannot be used because of the atomic_t @state_var field. 417b6cf8b3fSJohn Ogness */ 418e7c1fe21SJohn Ogness memcpy(&desc_out->info, &desc->info, sizeof(desc_out->info)); /* LMM(desc_read:C) */ 419e7c1fe21SJohn Ogness memcpy(&desc_out->text_blk_lpos, &desc->text_blk_lpos, 420e7c1fe21SJohn Ogness sizeof(desc_out->text_blk_lpos)); /* also part of desc_read:C */ 421e7c1fe21SJohn Ogness memcpy(&desc_out->dict_blk_lpos, &desc->dict_blk_lpos, 422e7c1fe21SJohn Ogness sizeof(desc_out->dict_blk_lpos)); /* also part of desc_read:C */ 423b6cf8b3fSJohn Ogness 424b6cf8b3fSJohn Ogness /* 425b6cf8b3fSJohn Ogness * 1. Guarantee the descriptor content is loaded before re-checking 426b6cf8b3fSJohn Ogness * the state. This avoids reading an obsolete descriptor state 427b6cf8b3fSJohn Ogness * that may not apply to the copied content. This pairs with 428b6cf8b3fSJohn Ogness * desc_reserve:F. 429b6cf8b3fSJohn Ogness * 430b6cf8b3fSJohn Ogness * Memory barrier involvement: 431b6cf8b3fSJohn Ogness * 432b6cf8b3fSJohn Ogness * If desc_read:C reads from desc_reserve:G, then desc_read:E 433b6cf8b3fSJohn Ogness * reads from desc_reserve:F. 434b6cf8b3fSJohn Ogness * 435b6cf8b3fSJohn Ogness * Relies on: 436b6cf8b3fSJohn Ogness * 437b6cf8b3fSJohn Ogness * WMB from desc_reserve:F to desc_reserve:G 438b6cf8b3fSJohn Ogness * matching 439b6cf8b3fSJohn Ogness * RMB from desc_read:C to desc_read:E 440b6cf8b3fSJohn Ogness * 441b6cf8b3fSJohn Ogness * 2. Guarantee the record data is loaded before re-checking the 442b6cf8b3fSJohn Ogness * state. This avoids reading an obsolete descriptor state that may 443b6cf8b3fSJohn Ogness * not apply to the copied data. This pairs with data_alloc:A. 444b6cf8b3fSJohn Ogness * 445b6cf8b3fSJohn Ogness * Memory barrier involvement: 446b6cf8b3fSJohn Ogness * 447b6cf8b3fSJohn Ogness * If copy_data:A reads from data_alloc:B, then desc_read:E 448b6cf8b3fSJohn Ogness * reads from desc_make_reusable:A. 449b6cf8b3fSJohn Ogness * 450b6cf8b3fSJohn Ogness * Relies on: 451b6cf8b3fSJohn Ogness * 452b6cf8b3fSJohn Ogness * MB from desc_make_reusable:A to data_alloc:B 453b6cf8b3fSJohn Ogness * matching 454b6cf8b3fSJohn Ogness * RMB from desc_read:C to desc_read:E 455b6cf8b3fSJohn Ogness * 456b6cf8b3fSJohn Ogness * Note: desc_make_reusable:A and data_alloc:B can be different 457b6cf8b3fSJohn Ogness * CPUs. However, the data_alloc:B CPU (which performs the 458b6cf8b3fSJohn Ogness * full memory barrier) must have previously seen 459b6cf8b3fSJohn Ogness * desc_make_reusable:A. 460b6cf8b3fSJohn Ogness */ 461b6cf8b3fSJohn Ogness smp_rmb(); /* LMM(desc_read:D) */ 462b6cf8b3fSJohn Ogness 463ce003d67SJohn Ogness /* 464ce003d67SJohn Ogness * The data has been copied. Return the current descriptor state, 465ce003d67SJohn Ogness * which may have changed since the load above. 466ce003d67SJohn Ogness */ 467b6cf8b3fSJohn Ogness state_val = atomic_long_read(state_var); /* LMM(desc_read:E) */ 468ce003d67SJohn Ogness d_state = get_desc_state(id, state_val); 469ce003d67SJohn Ogness out: 470ce003d67SJohn Ogness atomic_long_set(&desc_out->state_var, state_val); 471ce003d67SJohn Ogness return d_state; 472b6cf8b3fSJohn Ogness } 473b6cf8b3fSJohn Ogness 474b6cf8b3fSJohn Ogness /* 475b6cf8b3fSJohn Ogness * Take a specified descriptor out of the committed state by attempting 476b6cf8b3fSJohn Ogness * the transition from committed to reusable. Either this context or some 477b6cf8b3fSJohn Ogness * other context will have been successful. 478b6cf8b3fSJohn Ogness */ 479b6cf8b3fSJohn Ogness static void desc_make_reusable(struct prb_desc_ring *desc_ring, 480b6cf8b3fSJohn Ogness unsigned long id) 481b6cf8b3fSJohn Ogness { 482b6cf8b3fSJohn Ogness unsigned long val_committed = id | DESC_COMMITTED_MASK; 483b6cf8b3fSJohn Ogness unsigned long val_reusable = val_committed | DESC_REUSE_MASK; 484b6cf8b3fSJohn Ogness struct prb_desc *desc = to_desc(desc_ring, id); 485b6cf8b3fSJohn Ogness atomic_long_t *state_var = &desc->state_var; 486b6cf8b3fSJohn Ogness 487b6cf8b3fSJohn Ogness atomic_long_cmpxchg_relaxed(state_var, val_committed, 488b6cf8b3fSJohn Ogness val_reusable); /* LMM(desc_make_reusable:A) */ 489b6cf8b3fSJohn Ogness } 490b6cf8b3fSJohn Ogness 491b6cf8b3fSJohn Ogness /* 492b6cf8b3fSJohn Ogness * Given a data ring (text or dict), put the associated descriptor of each 493b6cf8b3fSJohn Ogness * data block from @lpos_begin until @lpos_end into the reusable state. 494b6cf8b3fSJohn Ogness * 495b6cf8b3fSJohn Ogness * If there is any problem making the associated descriptor reusable, either 496b6cf8b3fSJohn Ogness * the descriptor has not yet been committed or another writer context has 497b6cf8b3fSJohn Ogness * already pushed the tail lpos past the problematic data block. Regardless, 498b6cf8b3fSJohn Ogness * on error the caller can re-load the tail lpos to determine the situation. 499b6cf8b3fSJohn Ogness */ 500b6cf8b3fSJohn Ogness static bool data_make_reusable(struct printk_ringbuffer *rb, 501b6cf8b3fSJohn Ogness struct prb_data_ring *data_ring, 502b6cf8b3fSJohn Ogness unsigned long lpos_begin, 503b6cf8b3fSJohn Ogness unsigned long lpos_end, 504b6cf8b3fSJohn Ogness unsigned long *lpos_out) 505b6cf8b3fSJohn Ogness { 506b6cf8b3fSJohn Ogness struct prb_desc_ring *desc_ring = &rb->desc_ring; 507b6cf8b3fSJohn Ogness struct prb_data_blk_lpos *blk_lpos; 508b6cf8b3fSJohn Ogness struct prb_data_block *blk; 509b6cf8b3fSJohn Ogness enum desc_state d_state; 510b6cf8b3fSJohn Ogness struct prb_desc desc; 511b6cf8b3fSJohn Ogness unsigned long id; 512b6cf8b3fSJohn Ogness 513b6cf8b3fSJohn Ogness /* 514b6cf8b3fSJohn Ogness * Using the provided @data_ring, point @blk_lpos to the correct 515b6cf8b3fSJohn Ogness * blk_lpos within the local copy of the descriptor. 516b6cf8b3fSJohn Ogness */ 517b6cf8b3fSJohn Ogness if (data_ring == &rb->text_data_ring) 518b6cf8b3fSJohn Ogness blk_lpos = &desc.text_blk_lpos; 519b6cf8b3fSJohn Ogness else 520b6cf8b3fSJohn Ogness blk_lpos = &desc.dict_blk_lpos; 521b6cf8b3fSJohn Ogness 522b6cf8b3fSJohn Ogness /* Loop until @lpos_begin has advanced to or beyond @lpos_end. */ 523b6cf8b3fSJohn Ogness while ((lpos_end - lpos_begin) - 1 < DATA_SIZE(data_ring)) { 524b6cf8b3fSJohn Ogness blk = to_block(data_ring, lpos_begin); 525b6cf8b3fSJohn Ogness 526b6cf8b3fSJohn Ogness /* 527b6cf8b3fSJohn Ogness * Load the block ID from the data block. This is a data race 528b6cf8b3fSJohn Ogness * against a writer that may have newly reserved this data 529b6cf8b3fSJohn Ogness * area. If the loaded value matches a valid descriptor ID, 530b6cf8b3fSJohn Ogness * the blk_lpos of that descriptor will be checked to make 531b6cf8b3fSJohn Ogness * sure it points back to this data block. If the check fails, 532b6cf8b3fSJohn Ogness * the data area has been recycled by another writer. 533b6cf8b3fSJohn Ogness */ 534b6cf8b3fSJohn Ogness id = blk->id; /* LMM(data_make_reusable:A) */ 535b6cf8b3fSJohn Ogness 536b6cf8b3fSJohn Ogness d_state = desc_read(desc_ring, id, &desc); /* LMM(data_make_reusable:B) */ 537b6cf8b3fSJohn Ogness 538b6cf8b3fSJohn Ogness switch (d_state) { 539b6cf8b3fSJohn Ogness case desc_miss: 540b6cf8b3fSJohn Ogness return false; 541b6cf8b3fSJohn Ogness case desc_reserved: 542b6cf8b3fSJohn Ogness return false; 543b6cf8b3fSJohn Ogness case desc_committed: 544b6cf8b3fSJohn Ogness /* 545b6cf8b3fSJohn Ogness * This data block is invalid if the descriptor 546b6cf8b3fSJohn Ogness * does not point back to it. 547b6cf8b3fSJohn Ogness */ 548b6cf8b3fSJohn Ogness if (blk_lpos->begin != lpos_begin) 549b6cf8b3fSJohn Ogness return false; 550b6cf8b3fSJohn Ogness desc_make_reusable(desc_ring, id); 551b6cf8b3fSJohn Ogness break; 552b6cf8b3fSJohn Ogness case desc_reusable: 553b6cf8b3fSJohn Ogness /* 554b6cf8b3fSJohn Ogness * This data block is invalid if the descriptor 555b6cf8b3fSJohn Ogness * does not point back to it. 556b6cf8b3fSJohn Ogness */ 557b6cf8b3fSJohn Ogness if (blk_lpos->begin != lpos_begin) 558b6cf8b3fSJohn Ogness return false; 559b6cf8b3fSJohn Ogness break; 560b6cf8b3fSJohn Ogness } 561b6cf8b3fSJohn Ogness 562b6cf8b3fSJohn Ogness /* Advance @lpos_begin to the next data block. */ 563b6cf8b3fSJohn Ogness lpos_begin = blk_lpos->next; 564b6cf8b3fSJohn Ogness } 565b6cf8b3fSJohn Ogness 566b6cf8b3fSJohn Ogness *lpos_out = lpos_begin; 567b6cf8b3fSJohn Ogness return true; 568b6cf8b3fSJohn Ogness } 569b6cf8b3fSJohn Ogness 570b6cf8b3fSJohn Ogness /* 571b6cf8b3fSJohn Ogness * Advance the data ring tail to at least @lpos. This function puts 572b6cf8b3fSJohn Ogness * descriptors into the reusable state if the tail is pushed beyond 573b6cf8b3fSJohn Ogness * their associated data block. 574b6cf8b3fSJohn Ogness */ 575b6cf8b3fSJohn Ogness static bool data_push_tail(struct printk_ringbuffer *rb, 576b6cf8b3fSJohn Ogness struct prb_data_ring *data_ring, 577b6cf8b3fSJohn Ogness unsigned long lpos) 578b6cf8b3fSJohn Ogness { 579b6cf8b3fSJohn Ogness unsigned long tail_lpos_new; 580b6cf8b3fSJohn Ogness unsigned long tail_lpos; 581b6cf8b3fSJohn Ogness unsigned long next_lpos; 582b6cf8b3fSJohn Ogness 583d397820fSJohn Ogness /* If @lpos is from a data-less block, there is nothing to do. */ 584d397820fSJohn Ogness if (LPOS_DATALESS(lpos)) 585b6cf8b3fSJohn Ogness return true; 586b6cf8b3fSJohn Ogness 587b6cf8b3fSJohn Ogness /* 588b6cf8b3fSJohn Ogness * Any descriptor states that have transitioned to reusable due to the 589b6cf8b3fSJohn Ogness * data tail being pushed to this loaded value will be visible to this 590b6cf8b3fSJohn Ogness * CPU. This pairs with data_push_tail:D. 591b6cf8b3fSJohn Ogness * 592b6cf8b3fSJohn Ogness * Memory barrier involvement: 593b6cf8b3fSJohn Ogness * 594b6cf8b3fSJohn Ogness * If data_push_tail:A reads from data_push_tail:D, then this CPU can 595b6cf8b3fSJohn Ogness * see desc_make_reusable:A. 596b6cf8b3fSJohn Ogness * 597b6cf8b3fSJohn Ogness * Relies on: 598b6cf8b3fSJohn Ogness * 599b6cf8b3fSJohn Ogness * MB from desc_make_reusable:A to data_push_tail:D 600b6cf8b3fSJohn Ogness * matches 601b6cf8b3fSJohn Ogness * READFROM from data_push_tail:D to data_push_tail:A 602b6cf8b3fSJohn Ogness * thus 603b6cf8b3fSJohn Ogness * READFROM from desc_make_reusable:A to this CPU 604b6cf8b3fSJohn Ogness */ 605b6cf8b3fSJohn Ogness tail_lpos = atomic_long_read(&data_ring->tail_lpos); /* LMM(data_push_tail:A) */ 606b6cf8b3fSJohn Ogness 607b6cf8b3fSJohn Ogness /* 608b6cf8b3fSJohn Ogness * Loop until the tail lpos is at or beyond @lpos. This condition 609b6cf8b3fSJohn Ogness * may already be satisfied, resulting in no full memory barrier 610b6cf8b3fSJohn Ogness * from data_push_tail:D being performed. However, since this CPU 611b6cf8b3fSJohn Ogness * sees the new tail lpos, any descriptor states that transitioned to 612b6cf8b3fSJohn Ogness * the reusable state must already be visible. 613b6cf8b3fSJohn Ogness */ 614b6cf8b3fSJohn Ogness while ((lpos - tail_lpos) - 1 < DATA_SIZE(data_ring)) { 615b6cf8b3fSJohn Ogness /* 616b6cf8b3fSJohn Ogness * Make all descriptors reusable that are associated with 617b6cf8b3fSJohn Ogness * data blocks before @lpos. 618b6cf8b3fSJohn Ogness */ 619b6cf8b3fSJohn Ogness if (!data_make_reusable(rb, data_ring, tail_lpos, lpos, 620b6cf8b3fSJohn Ogness &next_lpos)) { 621b6cf8b3fSJohn Ogness /* 622b6cf8b3fSJohn Ogness * 1. Guarantee the block ID loaded in 623b6cf8b3fSJohn Ogness * data_make_reusable() is performed before 624b6cf8b3fSJohn Ogness * reloading the tail lpos. The failed 625b6cf8b3fSJohn Ogness * data_make_reusable() may be due to a newly 626b6cf8b3fSJohn Ogness * recycled data area causing the tail lpos to 627b6cf8b3fSJohn Ogness * have been previously pushed. This pairs with 628b6cf8b3fSJohn Ogness * data_alloc:A. 629b6cf8b3fSJohn Ogness * 630b6cf8b3fSJohn Ogness * Memory barrier involvement: 631b6cf8b3fSJohn Ogness * 632b6cf8b3fSJohn Ogness * If data_make_reusable:A reads from data_alloc:B, 633b6cf8b3fSJohn Ogness * then data_push_tail:C reads from 634b6cf8b3fSJohn Ogness * data_push_tail:D. 635b6cf8b3fSJohn Ogness * 636b6cf8b3fSJohn Ogness * Relies on: 637b6cf8b3fSJohn Ogness * 638b6cf8b3fSJohn Ogness * MB from data_push_tail:D to data_alloc:B 639b6cf8b3fSJohn Ogness * matching 640b6cf8b3fSJohn Ogness * RMB from data_make_reusable:A to 641b6cf8b3fSJohn Ogness * data_push_tail:C 642b6cf8b3fSJohn Ogness * 643b6cf8b3fSJohn Ogness * Note: data_push_tail:D and data_alloc:B can be 644b6cf8b3fSJohn Ogness * different CPUs. However, the data_alloc:B 645b6cf8b3fSJohn Ogness * CPU (which performs the full memory 646b6cf8b3fSJohn Ogness * barrier) must have previously seen 647b6cf8b3fSJohn Ogness * data_push_tail:D. 648b6cf8b3fSJohn Ogness * 649b6cf8b3fSJohn Ogness * 2. Guarantee the descriptor state loaded in 650b6cf8b3fSJohn Ogness * data_make_reusable() is performed before 651b6cf8b3fSJohn Ogness * reloading the tail lpos. The failed 652b6cf8b3fSJohn Ogness * data_make_reusable() may be due to a newly 653b6cf8b3fSJohn Ogness * recycled descriptor causing the tail lpos to 654b6cf8b3fSJohn Ogness * have been previously pushed. This pairs with 655b6cf8b3fSJohn Ogness * desc_reserve:D. 656b6cf8b3fSJohn Ogness * 657b6cf8b3fSJohn Ogness * Memory barrier involvement: 658b6cf8b3fSJohn Ogness * 659b6cf8b3fSJohn Ogness * If data_make_reusable:B reads from 660b6cf8b3fSJohn Ogness * desc_reserve:F, then data_push_tail:C reads 661b6cf8b3fSJohn Ogness * from data_push_tail:D. 662b6cf8b3fSJohn Ogness * 663b6cf8b3fSJohn Ogness * Relies on: 664b6cf8b3fSJohn Ogness * 665b6cf8b3fSJohn Ogness * MB from data_push_tail:D to desc_reserve:F 666b6cf8b3fSJohn Ogness * matching 667b6cf8b3fSJohn Ogness * RMB from data_make_reusable:B to 668b6cf8b3fSJohn Ogness * data_push_tail:C 669b6cf8b3fSJohn Ogness * 670b6cf8b3fSJohn Ogness * Note: data_push_tail:D and desc_reserve:F can 671b6cf8b3fSJohn Ogness * be different CPUs. However, the 672b6cf8b3fSJohn Ogness * desc_reserve:F CPU (which performs the 673b6cf8b3fSJohn Ogness * full memory barrier) must have previously 674b6cf8b3fSJohn Ogness * seen data_push_tail:D. 675b6cf8b3fSJohn Ogness */ 676b6cf8b3fSJohn Ogness smp_rmb(); /* LMM(data_push_tail:B) */ 677b6cf8b3fSJohn Ogness 678b6cf8b3fSJohn Ogness tail_lpos_new = atomic_long_read(&data_ring->tail_lpos 679b6cf8b3fSJohn Ogness ); /* LMM(data_push_tail:C) */ 680b6cf8b3fSJohn Ogness if (tail_lpos_new == tail_lpos) 681b6cf8b3fSJohn Ogness return false; 682b6cf8b3fSJohn Ogness 683b6cf8b3fSJohn Ogness /* Another CPU pushed the tail. Try again. */ 684b6cf8b3fSJohn Ogness tail_lpos = tail_lpos_new; 685b6cf8b3fSJohn Ogness continue; 686b6cf8b3fSJohn Ogness } 687b6cf8b3fSJohn Ogness 688b6cf8b3fSJohn Ogness /* 689b6cf8b3fSJohn Ogness * Guarantee any descriptor states that have transitioned to 690b6cf8b3fSJohn Ogness * reusable are stored before pushing the tail lpos. A full 691b6cf8b3fSJohn Ogness * memory barrier is needed since other CPUs may have made 692b6cf8b3fSJohn Ogness * the descriptor states reusable. This pairs with 693b6cf8b3fSJohn Ogness * data_push_tail:A. 694b6cf8b3fSJohn Ogness */ 695b6cf8b3fSJohn Ogness if (atomic_long_try_cmpxchg(&data_ring->tail_lpos, &tail_lpos, 696b6cf8b3fSJohn Ogness next_lpos)) { /* LMM(data_push_tail:D) */ 697b6cf8b3fSJohn Ogness break; 698b6cf8b3fSJohn Ogness } 699b6cf8b3fSJohn Ogness } 700b6cf8b3fSJohn Ogness 701b6cf8b3fSJohn Ogness return true; 702b6cf8b3fSJohn Ogness } 703b6cf8b3fSJohn Ogness 704b6cf8b3fSJohn Ogness /* 705b6cf8b3fSJohn Ogness * Advance the desc ring tail. This function advances the tail by one 706b6cf8b3fSJohn Ogness * descriptor, thus invalidating the oldest descriptor. Before advancing 707b6cf8b3fSJohn Ogness * the tail, the tail descriptor is made reusable and all data blocks up to 708b6cf8b3fSJohn Ogness * and including the descriptor's data block are invalidated (i.e. the data 709b6cf8b3fSJohn Ogness * ring tail is pushed past the data block of the descriptor being made 710b6cf8b3fSJohn Ogness * reusable). 711b6cf8b3fSJohn Ogness */ 712b6cf8b3fSJohn Ogness static bool desc_push_tail(struct printk_ringbuffer *rb, 713b6cf8b3fSJohn Ogness unsigned long tail_id) 714b6cf8b3fSJohn Ogness { 715b6cf8b3fSJohn Ogness struct prb_desc_ring *desc_ring = &rb->desc_ring; 716b6cf8b3fSJohn Ogness enum desc_state d_state; 717b6cf8b3fSJohn Ogness struct prb_desc desc; 718b6cf8b3fSJohn Ogness 719b6cf8b3fSJohn Ogness d_state = desc_read(desc_ring, tail_id, &desc); 720b6cf8b3fSJohn Ogness 721b6cf8b3fSJohn Ogness switch (d_state) { 722b6cf8b3fSJohn Ogness case desc_miss: 723b6cf8b3fSJohn Ogness /* 724b6cf8b3fSJohn Ogness * If the ID is exactly 1 wrap behind the expected, it is 725b6cf8b3fSJohn Ogness * in the process of being reserved by another writer and 726b6cf8b3fSJohn Ogness * must be considered reserved. 727b6cf8b3fSJohn Ogness */ 728b6cf8b3fSJohn Ogness if (DESC_ID(atomic_long_read(&desc.state_var)) == 729b6cf8b3fSJohn Ogness DESC_ID_PREV_WRAP(desc_ring, tail_id)) { 730b6cf8b3fSJohn Ogness return false; 731b6cf8b3fSJohn Ogness } 732b6cf8b3fSJohn Ogness 733b6cf8b3fSJohn Ogness /* 734b6cf8b3fSJohn Ogness * The ID has changed. Another writer must have pushed the 735b6cf8b3fSJohn Ogness * tail and recycled the descriptor already. Success is 736b6cf8b3fSJohn Ogness * returned because the caller is only interested in the 737b6cf8b3fSJohn Ogness * specified tail being pushed, which it was. 738b6cf8b3fSJohn Ogness */ 739b6cf8b3fSJohn Ogness return true; 740b6cf8b3fSJohn Ogness case desc_reserved: 741b6cf8b3fSJohn Ogness return false; 742b6cf8b3fSJohn Ogness case desc_committed: 743b6cf8b3fSJohn Ogness desc_make_reusable(desc_ring, tail_id); 744b6cf8b3fSJohn Ogness break; 745b6cf8b3fSJohn Ogness case desc_reusable: 746b6cf8b3fSJohn Ogness break; 747b6cf8b3fSJohn Ogness } 748b6cf8b3fSJohn Ogness 749b6cf8b3fSJohn Ogness /* 750b6cf8b3fSJohn Ogness * Data blocks must be invalidated before their associated 751b6cf8b3fSJohn Ogness * descriptor can be made available for recycling. Invalidating 752b6cf8b3fSJohn Ogness * them later is not possible because there is no way to trust 753b6cf8b3fSJohn Ogness * data blocks once their associated descriptor is gone. 754b6cf8b3fSJohn Ogness */ 755b6cf8b3fSJohn Ogness 756b6cf8b3fSJohn Ogness if (!data_push_tail(rb, &rb->text_data_ring, desc.text_blk_lpos.next)) 757b6cf8b3fSJohn Ogness return false; 758b6cf8b3fSJohn Ogness if (!data_push_tail(rb, &rb->dict_data_ring, desc.dict_blk_lpos.next)) 759b6cf8b3fSJohn Ogness return false; 760b6cf8b3fSJohn Ogness 761b6cf8b3fSJohn Ogness /* 762b6cf8b3fSJohn Ogness * Check the next descriptor after @tail_id before pushing the tail 763b6cf8b3fSJohn Ogness * to it because the tail must always be in a committed or reusable 764b6cf8b3fSJohn Ogness * state. The implementation of prb_first_seq() relies on this. 765b6cf8b3fSJohn Ogness * 766b6cf8b3fSJohn Ogness * A successful read implies that the next descriptor is less than or 767b6cf8b3fSJohn Ogness * equal to @head_id so there is no risk of pushing the tail past the 768b6cf8b3fSJohn Ogness * head. 769b6cf8b3fSJohn Ogness */ 770b6cf8b3fSJohn Ogness d_state = desc_read(desc_ring, DESC_ID(tail_id + 1), &desc); /* LMM(desc_push_tail:A) */ 771b6cf8b3fSJohn Ogness 772b6cf8b3fSJohn Ogness if (d_state == desc_committed || d_state == desc_reusable) { 773b6cf8b3fSJohn Ogness /* 774b6cf8b3fSJohn Ogness * Guarantee any descriptor states that have transitioned to 775b6cf8b3fSJohn Ogness * reusable are stored before pushing the tail ID. This allows 776b6cf8b3fSJohn Ogness * verifying the recycled descriptor state. A full memory 777b6cf8b3fSJohn Ogness * barrier is needed since other CPUs may have made the 778b6cf8b3fSJohn Ogness * descriptor states reusable. This pairs with desc_reserve:D. 779b6cf8b3fSJohn Ogness */ 780b6cf8b3fSJohn Ogness atomic_long_cmpxchg(&desc_ring->tail_id, tail_id, 781b6cf8b3fSJohn Ogness DESC_ID(tail_id + 1)); /* LMM(desc_push_tail:B) */ 782b6cf8b3fSJohn Ogness } else { 783b6cf8b3fSJohn Ogness /* 784b6cf8b3fSJohn Ogness * Guarantee the last state load from desc_read() is before 785b6cf8b3fSJohn Ogness * reloading @tail_id in order to see a new tail ID in the 786b6cf8b3fSJohn Ogness * case that the descriptor has been recycled. This pairs 787b6cf8b3fSJohn Ogness * with desc_reserve:D. 788b6cf8b3fSJohn Ogness * 789b6cf8b3fSJohn Ogness * Memory barrier involvement: 790b6cf8b3fSJohn Ogness * 791b6cf8b3fSJohn Ogness * If desc_push_tail:A reads from desc_reserve:F, then 792b6cf8b3fSJohn Ogness * desc_push_tail:D reads from desc_push_tail:B. 793b6cf8b3fSJohn Ogness * 794b6cf8b3fSJohn Ogness * Relies on: 795b6cf8b3fSJohn Ogness * 796b6cf8b3fSJohn Ogness * MB from desc_push_tail:B to desc_reserve:F 797b6cf8b3fSJohn Ogness * matching 798b6cf8b3fSJohn Ogness * RMB from desc_push_tail:A to desc_push_tail:D 799b6cf8b3fSJohn Ogness * 800b6cf8b3fSJohn Ogness * Note: desc_push_tail:B and desc_reserve:F can be different 801b6cf8b3fSJohn Ogness * CPUs. However, the desc_reserve:F CPU (which performs 802b6cf8b3fSJohn Ogness * the full memory barrier) must have previously seen 803b6cf8b3fSJohn Ogness * desc_push_tail:B. 804b6cf8b3fSJohn Ogness */ 805b6cf8b3fSJohn Ogness smp_rmb(); /* LMM(desc_push_tail:C) */ 806b6cf8b3fSJohn Ogness 807b6cf8b3fSJohn Ogness /* 808b6cf8b3fSJohn Ogness * Re-check the tail ID. The descriptor following @tail_id is 809b6cf8b3fSJohn Ogness * not in an allowed tail state. But if the tail has since 810b6cf8b3fSJohn Ogness * been moved by another CPU, then it does not matter. 811b6cf8b3fSJohn Ogness */ 812b6cf8b3fSJohn Ogness if (atomic_long_read(&desc_ring->tail_id) == tail_id) /* LMM(desc_push_tail:D) */ 813b6cf8b3fSJohn Ogness return false; 814b6cf8b3fSJohn Ogness } 815b6cf8b3fSJohn Ogness 816b6cf8b3fSJohn Ogness return true; 817b6cf8b3fSJohn Ogness } 818b6cf8b3fSJohn Ogness 819b6cf8b3fSJohn Ogness /* Reserve a new descriptor, invalidating the oldest if necessary. */ 820b6cf8b3fSJohn Ogness static bool desc_reserve(struct printk_ringbuffer *rb, unsigned long *id_out) 821b6cf8b3fSJohn Ogness { 822b6cf8b3fSJohn Ogness struct prb_desc_ring *desc_ring = &rb->desc_ring; 823b6cf8b3fSJohn Ogness unsigned long prev_state_val; 824b6cf8b3fSJohn Ogness unsigned long id_prev_wrap; 825b6cf8b3fSJohn Ogness struct prb_desc *desc; 826b6cf8b3fSJohn Ogness unsigned long head_id; 827b6cf8b3fSJohn Ogness unsigned long id; 828b6cf8b3fSJohn Ogness 829b6cf8b3fSJohn Ogness head_id = atomic_long_read(&desc_ring->head_id); /* LMM(desc_reserve:A) */ 830b6cf8b3fSJohn Ogness 831b6cf8b3fSJohn Ogness do { 832b6cf8b3fSJohn Ogness desc = to_desc(desc_ring, head_id); 833b6cf8b3fSJohn Ogness 834b6cf8b3fSJohn Ogness id = DESC_ID(head_id + 1); 835b6cf8b3fSJohn Ogness id_prev_wrap = DESC_ID_PREV_WRAP(desc_ring, id); 836b6cf8b3fSJohn Ogness 837b6cf8b3fSJohn Ogness /* 838b6cf8b3fSJohn Ogness * Guarantee the head ID is read before reading the tail ID. 839b6cf8b3fSJohn Ogness * Since the tail ID is updated before the head ID, this 840b6cf8b3fSJohn Ogness * guarantees that @id_prev_wrap is never ahead of the tail 841b6cf8b3fSJohn Ogness * ID. This pairs with desc_reserve:D. 842b6cf8b3fSJohn Ogness * 843b6cf8b3fSJohn Ogness * Memory barrier involvement: 844b6cf8b3fSJohn Ogness * 845b6cf8b3fSJohn Ogness * If desc_reserve:A reads from desc_reserve:D, then 846b6cf8b3fSJohn Ogness * desc_reserve:C reads from desc_push_tail:B. 847b6cf8b3fSJohn Ogness * 848b6cf8b3fSJohn Ogness * Relies on: 849b6cf8b3fSJohn Ogness * 850b6cf8b3fSJohn Ogness * MB from desc_push_tail:B to desc_reserve:D 851b6cf8b3fSJohn Ogness * matching 852b6cf8b3fSJohn Ogness * RMB from desc_reserve:A to desc_reserve:C 853b6cf8b3fSJohn Ogness * 854b6cf8b3fSJohn Ogness * Note: desc_push_tail:B and desc_reserve:D can be different 855b6cf8b3fSJohn Ogness * CPUs. However, the desc_reserve:D CPU (which performs 856b6cf8b3fSJohn Ogness * the full memory barrier) must have previously seen 857b6cf8b3fSJohn Ogness * desc_push_tail:B. 858b6cf8b3fSJohn Ogness */ 859b6cf8b3fSJohn Ogness smp_rmb(); /* LMM(desc_reserve:B) */ 860b6cf8b3fSJohn Ogness 861b6cf8b3fSJohn Ogness if (id_prev_wrap == atomic_long_read(&desc_ring->tail_id 862b6cf8b3fSJohn Ogness )) { /* LMM(desc_reserve:C) */ 863b6cf8b3fSJohn Ogness /* 864b6cf8b3fSJohn Ogness * Make space for the new descriptor by 865b6cf8b3fSJohn Ogness * advancing the tail. 866b6cf8b3fSJohn Ogness */ 867b6cf8b3fSJohn Ogness if (!desc_push_tail(rb, id_prev_wrap)) 868b6cf8b3fSJohn Ogness return false; 869b6cf8b3fSJohn Ogness } 870b6cf8b3fSJohn Ogness 871b6cf8b3fSJohn Ogness /* 872b6cf8b3fSJohn Ogness * 1. Guarantee the tail ID is read before validating the 873b6cf8b3fSJohn Ogness * recycled descriptor state. A read memory barrier is 874b6cf8b3fSJohn Ogness * sufficient for this. This pairs with desc_push_tail:B. 875b6cf8b3fSJohn Ogness * 876b6cf8b3fSJohn Ogness * Memory barrier involvement: 877b6cf8b3fSJohn Ogness * 878b6cf8b3fSJohn Ogness * If desc_reserve:C reads from desc_push_tail:B, then 879b6cf8b3fSJohn Ogness * desc_reserve:E reads from desc_make_reusable:A. 880b6cf8b3fSJohn Ogness * 881b6cf8b3fSJohn Ogness * Relies on: 882b6cf8b3fSJohn Ogness * 883b6cf8b3fSJohn Ogness * MB from desc_make_reusable:A to desc_push_tail:B 884b6cf8b3fSJohn Ogness * matching 885b6cf8b3fSJohn Ogness * RMB from desc_reserve:C to desc_reserve:E 886b6cf8b3fSJohn Ogness * 887b6cf8b3fSJohn Ogness * Note: desc_make_reusable:A and desc_push_tail:B can be 888b6cf8b3fSJohn Ogness * different CPUs. However, the desc_push_tail:B CPU 889b6cf8b3fSJohn Ogness * (which performs the full memory barrier) must have 890b6cf8b3fSJohn Ogness * previously seen desc_make_reusable:A. 891b6cf8b3fSJohn Ogness * 892b6cf8b3fSJohn Ogness * 2. Guarantee the tail ID is stored before storing the head 893b6cf8b3fSJohn Ogness * ID. This pairs with desc_reserve:B. 894b6cf8b3fSJohn Ogness * 895b6cf8b3fSJohn Ogness * 3. Guarantee any data ring tail changes are stored before 896b6cf8b3fSJohn Ogness * recycling the descriptor. Data ring tail changes can 897b6cf8b3fSJohn Ogness * happen via desc_push_tail()->data_push_tail(). A full 898b6cf8b3fSJohn Ogness * memory barrier is needed since another CPU may have 899b6cf8b3fSJohn Ogness * pushed the data ring tails. This pairs with 900b6cf8b3fSJohn Ogness * data_push_tail:B. 901b6cf8b3fSJohn Ogness * 902b6cf8b3fSJohn Ogness * 4. Guarantee a new tail ID is stored before recycling the 903b6cf8b3fSJohn Ogness * descriptor. A full memory barrier is needed since 904b6cf8b3fSJohn Ogness * another CPU may have pushed the tail ID. This pairs 905b6cf8b3fSJohn Ogness * with desc_push_tail:C and this also pairs with 906b6cf8b3fSJohn Ogness * prb_first_seq:C. 907b6cf8b3fSJohn Ogness */ 908b6cf8b3fSJohn Ogness } while (!atomic_long_try_cmpxchg(&desc_ring->head_id, &head_id, 909b6cf8b3fSJohn Ogness id)); /* LMM(desc_reserve:D) */ 910b6cf8b3fSJohn Ogness 911b6cf8b3fSJohn Ogness desc = to_desc(desc_ring, id); 912b6cf8b3fSJohn Ogness 913b6cf8b3fSJohn Ogness /* 914b6cf8b3fSJohn Ogness * If the descriptor has been recycled, verify the old state val. 915b6cf8b3fSJohn Ogness * See "ABA Issues" about why this verification is performed. 916b6cf8b3fSJohn Ogness */ 917b6cf8b3fSJohn Ogness prev_state_val = atomic_long_read(&desc->state_var); /* LMM(desc_reserve:E) */ 918b6cf8b3fSJohn Ogness if (prev_state_val && 919b6cf8b3fSJohn Ogness prev_state_val != (id_prev_wrap | DESC_COMMITTED_MASK | DESC_REUSE_MASK)) { 920b6cf8b3fSJohn Ogness WARN_ON_ONCE(1); 921b6cf8b3fSJohn Ogness return false; 922b6cf8b3fSJohn Ogness } 923b6cf8b3fSJohn Ogness 924b6cf8b3fSJohn Ogness /* 925b6cf8b3fSJohn Ogness * Assign the descriptor a new ID and set its state to reserved. 926b6cf8b3fSJohn Ogness * See "ABA Issues" about why cmpxchg() instead of set() is used. 927b6cf8b3fSJohn Ogness * 928b6cf8b3fSJohn Ogness * Guarantee the new descriptor ID and state is stored before making 929b6cf8b3fSJohn Ogness * any other changes. A write memory barrier is sufficient for this. 930b6cf8b3fSJohn Ogness * This pairs with desc_read:D. 931b6cf8b3fSJohn Ogness */ 932b6cf8b3fSJohn Ogness if (!atomic_long_try_cmpxchg(&desc->state_var, &prev_state_val, 933b6cf8b3fSJohn Ogness id | 0)) { /* LMM(desc_reserve:F) */ 934b6cf8b3fSJohn Ogness WARN_ON_ONCE(1); 935b6cf8b3fSJohn Ogness return false; 936b6cf8b3fSJohn Ogness } 937b6cf8b3fSJohn Ogness 938b6cf8b3fSJohn Ogness /* Now data in @desc can be modified: LMM(desc_reserve:G) */ 939b6cf8b3fSJohn Ogness 940b6cf8b3fSJohn Ogness *id_out = id; 941b6cf8b3fSJohn Ogness return true; 942b6cf8b3fSJohn Ogness } 943b6cf8b3fSJohn Ogness 944b6cf8b3fSJohn Ogness /* Determine the end of a data block. */ 945b6cf8b3fSJohn Ogness static unsigned long get_next_lpos(struct prb_data_ring *data_ring, 946b6cf8b3fSJohn Ogness unsigned long lpos, unsigned int size) 947b6cf8b3fSJohn Ogness { 948b6cf8b3fSJohn Ogness unsigned long begin_lpos; 949b6cf8b3fSJohn Ogness unsigned long next_lpos; 950b6cf8b3fSJohn Ogness 951b6cf8b3fSJohn Ogness begin_lpos = lpos; 952b6cf8b3fSJohn Ogness next_lpos = lpos + size; 953b6cf8b3fSJohn Ogness 954b6cf8b3fSJohn Ogness /* First check if the data block does not wrap. */ 955b6cf8b3fSJohn Ogness if (DATA_WRAPS(data_ring, begin_lpos) == DATA_WRAPS(data_ring, next_lpos)) 956b6cf8b3fSJohn Ogness return next_lpos; 957b6cf8b3fSJohn Ogness 958b6cf8b3fSJohn Ogness /* Wrapping data blocks store their data at the beginning. */ 959b6cf8b3fSJohn Ogness return (DATA_THIS_WRAP_START_LPOS(data_ring, next_lpos) + size); 960b6cf8b3fSJohn Ogness } 961b6cf8b3fSJohn Ogness 962b6cf8b3fSJohn Ogness /* 963b6cf8b3fSJohn Ogness * Allocate a new data block, invalidating the oldest data block(s) 964b6cf8b3fSJohn Ogness * if necessary. This function also associates the data block with 965b6cf8b3fSJohn Ogness * a specified descriptor. 966b6cf8b3fSJohn Ogness */ 967b6cf8b3fSJohn Ogness static char *data_alloc(struct printk_ringbuffer *rb, 968b6cf8b3fSJohn Ogness struct prb_data_ring *data_ring, unsigned int size, 969b6cf8b3fSJohn Ogness struct prb_data_blk_lpos *blk_lpos, unsigned long id) 970b6cf8b3fSJohn Ogness { 971b6cf8b3fSJohn Ogness struct prb_data_block *blk; 972b6cf8b3fSJohn Ogness unsigned long begin_lpos; 973b6cf8b3fSJohn Ogness unsigned long next_lpos; 974b6cf8b3fSJohn Ogness 975b6cf8b3fSJohn Ogness if (size == 0) { 976b6cf8b3fSJohn Ogness /* Specify a data-less block. */ 977d397820fSJohn Ogness blk_lpos->begin = NO_LPOS; 978d397820fSJohn Ogness blk_lpos->next = NO_LPOS; 979b6cf8b3fSJohn Ogness return NULL; 980b6cf8b3fSJohn Ogness } 981b6cf8b3fSJohn Ogness 982b6cf8b3fSJohn Ogness size = to_blk_size(size); 983b6cf8b3fSJohn Ogness 984b6cf8b3fSJohn Ogness begin_lpos = atomic_long_read(&data_ring->head_lpos); 985b6cf8b3fSJohn Ogness 986b6cf8b3fSJohn Ogness do { 987b6cf8b3fSJohn Ogness next_lpos = get_next_lpos(data_ring, begin_lpos, size); 988b6cf8b3fSJohn Ogness 989b6cf8b3fSJohn Ogness if (!data_push_tail(rb, data_ring, next_lpos - DATA_SIZE(data_ring))) { 990b6cf8b3fSJohn Ogness /* Failed to allocate, specify a data-less block. */ 991d397820fSJohn Ogness blk_lpos->begin = FAILED_LPOS; 992d397820fSJohn Ogness blk_lpos->next = FAILED_LPOS; 993b6cf8b3fSJohn Ogness return NULL; 994b6cf8b3fSJohn Ogness } 995b6cf8b3fSJohn Ogness 996b6cf8b3fSJohn Ogness /* 997b6cf8b3fSJohn Ogness * 1. Guarantee any descriptor states that have transitioned 998b6cf8b3fSJohn Ogness * to reusable are stored before modifying the newly 999b6cf8b3fSJohn Ogness * allocated data area. A full memory barrier is needed 1000b6cf8b3fSJohn Ogness * since other CPUs may have made the descriptor states 1001b6cf8b3fSJohn Ogness * reusable. See data_push_tail:A about why the reusable 1002b6cf8b3fSJohn Ogness * states are visible. This pairs with desc_read:D. 1003b6cf8b3fSJohn Ogness * 1004b6cf8b3fSJohn Ogness * 2. Guarantee any updated tail lpos is stored before 1005b6cf8b3fSJohn Ogness * modifying the newly allocated data area. Another CPU may 1006b6cf8b3fSJohn Ogness * be in data_make_reusable() and is reading a block ID 1007b6cf8b3fSJohn Ogness * from this area. data_make_reusable() can handle reading 1008b6cf8b3fSJohn Ogness * a garbage block ID value, but then it must be able to 1009b6cf8b3fSJohn Ogness * load a new tail lpos. A full memory barrier is needed 1010b6cf8b3fSJohn Ogness * since other CPUs may have updated the tail lpos. This 1011b6cf8b3fSJohn Ogness * pairs with data_push_tail:B. 1012b6cf8b3fSJohn Ogness */ 1013b6cf8b3fSJohn Ogness } while (!atomic_long_try_cmpxchg(&data_ring->head_lpos, &begin_lpos, 1014b6cf8b3fSJohn Ogness next_lpos)); /* LMM(data_alloc:A) */ 1015b6cf8b3fSJohn Ogness 1016b6cf8b3fSJohn Ogness blk = to_block(data_ring, begin_lpos); 1017b6cf8b3fSJohn Ogness blk->id = id; /* LMM(data_alloc:B) */ 1018b6cf8b3fSJohn Ogness 1019b6cf8b3fSJohn Ogness if (DATA_WRAPS(data_ring, begin_lpos) != DATA_WRAPS(data_ring, next_lpos)) { 1020b6cf8b3fSJohn Ogness /* Wrapping data blocks store their data at the beginning. */ 1021b6cf8b3fSJohn Ogness blk = to_block(data_ring, 0); 1022b6cf8b3fSJohn Ogness 1023b6cf8b3fSJohn Ogness /* 1024b6cf8b3fSJohn Ogness * Store the ID on the wrapped block for consistency. 1025b6cf8b3fSJohn Ogness * The printk_ringbuffer does not actually use it. 1026b6cf8b3fSJohn Ogness */ 1027b6cf8b3fSJohn Ogness blk->id = id; 1028b6cf8b3fSJohn Ogness } 1029b6cf8b3fSJohn Ogness 1030b6cf8b3fSJohn Ogness blk_lpos->begin = begin_lpos; 1031b6cf8b3fSJohn Ogness blk_lpos->next = next_lpos; 1032b6cf8b3fSJohn Ogness 1033b6cf8b3fSJohn Ogness return &blk->data[0]; 1034b6cf8b3fSJohn Ogness } 1035b6cf8b3fSJohn Ogness 1036b6cf8b3fSJohn Ogness /* Return the number of bytes used by a data block. */ 1037b6cf8b3fSJohn Ogness static unsigned int space_used(struct prb_data_ring *data_ring, 1038b6cf8b3fSJohn Ogness struct prb_data_blk_lpos *blk_lpos) 1039b6cf8b3fSJohn Ogness { 1040d397820fSJohn Ogness /* Data-less blocks take no space. */ 1041d397820fSJohn Ogness if (LPOS_DATALESS(blk_lpos->begin)) 1042d397820fSJohn Ogness return 0; 1043d397820fSJohn Ogness 1044b6cf8b3fSJohn Ogness if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next)) { 1045b6cf8b3fSJohn Ogness /* Data block does not wrap. */ 1046b6cf8b3fSJohn Ogness return (DATA_INDEX(data_ring, blk_lpos->next) - 1047b6cf8b3fSJohn Ogness DATA_INDEX(data_ring, blk_lpos->begin)); 1048b6cf8b3fSJohn Ogness } 1049b6cf8b3fSJohn Ogness 1050b6cf8b3fSJohn Ogness /* 1051b6cf8b3fSJohn Ogness * For wrapping data blocks, the trailing (wasted) space is 1052b6cf8b3fSJohn Ogness * also counted. 1053b6cf8b3fSJohn Ogness */ 1054b6cf8b3fSJohn Ogness return (DATA_INDEX(data_ring, blk_lpos->next) + 1055b6cf8b3fSJohn Ogness DATA_SIZE(data_ring) - DATA_INDEX(data_ring, blk_lpos->begin)); 1056b6cf8b3fSJohn Ogness } 1057b6cf8b3fSJohn Ogness 10582a7f87edSJohn Ogness /* 10592a7f87edSJohn Ogness * Given @blk_lpos, return a pointer to the writer data from the data block 10602a7f87edSJohn Ogness * and calculate the size of the data part. A NULL pointer is returned if 10612a7f87edSJohn Ogness * @blk_lpos specifies values that could never be legal. 10622a7f87edSJohn Ogness * 10632a7f87edSJohn Ogness * This function (used by readers) performs strict validation on the lpos 10642a7f87edSJohn Ogness * values to possibly detect bugs in the writer code. A WARN_ON_ONCE() is 10652a7f87edSJohn Ogness * triggered if an internal error is detected. 10662a7f87edSJohn Ogness */ 10672a7f87edSJohn Ogness static const char *get_data(struct prb_data_ring *data_ring, 10682a7f87edSJohn Ogness struct prb_data_blk_lpos *blk_lpos, 10692a7f87edSJohn Ogness unsigned int *data_size) 10702a7f87edSJohn Ogness { 10712a7f87edSJohn Ogness struct prb_data_block *db; 10722a7f87edSJohn Ogness 10732a7f87edSJohn Ogness /* Data-less data block description. */ 10742a7f87edSJohn Ogness if (LPOS_DATALESS(blk_lpos->begin) && LPOS_DATALESS(blk_lpos->next)) { 10752a7f87edSJohn Ogness if (blk_lpos->begin == NO_LPOS && blk_lpos->next == NO_LPOS) { 10762a7f87edSJohn Ogness *data_size = 0; 10772a7f87edSJohn Ogness return ""; 10782a7f87edSJohn Ogness } 10792a7f87edSJohn Ogness return NULL; 10802a7f87edSJohn Ogness } 10812a7f87edSJohn Ogness 10822a7f87edSJohn Ogness /* Regular data block: @begin less than @next and in same wrap. */ 10832a7f87edSJohn Ogness if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next) && 10842a7f87edSJohn Ogness blk_lpos->begin < blk_lpos->next) { 10852a7f87edSJohn Ogness db = to_block(data_ring, blk_lpos->begin); 10862a7f87edSJohn Ogness *data_size = blk_lpos->next - blk_lpos->begin; 10872a7f87edSJohn Ogness 10882a7f87edSJohn Ogness /* Wrapping data block: @begin is one wrap behind @next. */ 10892a7f87edSJohn Ogness } else if (DATA_WRAPS(data_ring, blk_lpos->begin + DATA_SIZE(data_ring)) == 10902a7f87edSJohn Ogness DATA_WRAPS(data_ring, blk_lpos->next)) { 10912a7f87edSJohn Ogness db = to_block(data_ring, 0); 10922a7f87edSJohn Ogness *data_size = DATA_INDEX(data_ring, blk_lpos->next); 10932a7f87edSJohn Ogness 10942a7f87edSJohn Ogness /* Illegal block description. */ 10952a7f87edSJohn Ogness } else { 10962a7f87edSJohn Ogness WARN_ON_ONCE(1); 10972a7f87edSJohn Ogness return NULL; 10982a7f87edSJohn Ogness } 10992a7f87edSJohn Ogness 11002a7f87edSJohn Ogness /* A valid data block will always be aligned to the ID size. */ 11012a7f87edSJohn Ogness if (WARN_ON_ONCE(blk_lpos->begin != ALIGN(blk_lpos->begin, sizeof(db->id))) || 11022a7f87edSJohn Ogness WARN_ON_ONCE(blk_lpos->next != ALIGN(blk_lpos->next, sizeof(db->id)))) { 11032a7f87edSJohn Ogness return NULL; 11042a7f87edSJohn Ogness } 11052a7f87edSJohn Ogness 11062a7f87edSJohn Ogness /* A valid data block will always have at least an ID. */ 11072a7f87edSJohn Ogness if (WARN_ON_ONCE(*data_size < sizeof(db->id))) 11082a7f87edSJohn Ogness return NULL; 11092a7f87edSJohn Ogness 11102a7f87edSJohn Ogness /* Subtract block ID space from size to reflect data size. */ 11112a7f87edSJohn Ogness *data_size -= sizeof(db->id); 11122a7f87edSJohn Ogness 11132a7f87edSJohn Ogness return &db->data[0]; 11142a7f87edSJohn Ogness } 11152a7f87edSJohn Ogness 1116b6cf8b3fSJohn Ogness /** 1117b6cf8b3fSJohn Ogness * prb_reserve() - Reserve space in the ringbuffer. 1118b6cf8b3fSJohn Ogness * 1119b6cf8b3fSJohn Ogness * @e: The entry structure to setup. 1120b6cf8b3fSJohn Ogness * @rb: The ringbuffer to reserve data in. 1121b6cf8b3fSJohn Ogness * @r: The record structure to allocate buffers for. 1122b6cf8b3fSJohn Ogness * 1123b6cf8b3fSJohn Ogness * This is the public function available to writers to reserve data. 1124b6cf8b3fSJohn Ogness * 1125b6cf8b3fSJohn Ogness * The writer specifies the text and dict sizes to reserve by setting the 1126b6cf8b3fSJohn Ogness * @text_buf_size and @dict_buf_size fields of @r, respectively. Dictionaries 1127b6cf8b3fSJohn Ogness * are optional, so @dict_buf_size is allowed to be 0. To ensure proper 1128b6cf8b3fSJohn Ogness * initialization of @r, prb_rec_init_wr() should be used. 1129b6cf8b3fSJohn Ogness * 1130b6cf8b3fSJohn Ogness * Context: Any context. Disables local interrupts on success. 1131b6cf8b3fSJohn Ogness * Return: true if at least text data could be allocated, otherwise false. 1132b6cf8b3fSJohn Ogness * 1133b6cf8b3fSJohn Ogness * On success, the fields @info, @text_buf, @dict_buf of @r will be set by 1134b6cf8b3fSJohn Ogness * this function and should be filled in by the writer before committing. Also 1135b6cf8b3fSJohn Ogness * on success, prb_record_text_space() can be used on @e to query the actual 1136b6cf8b3fSJohn Ogness * space used for the text data block. 1137b6cf8b3fSJohn Ogness * 1138b6cf8b3fSJohn Ogness * If the function fails to reserve dictionary space (but all else succeeded), 1139b6cf8b3fSJohn Ogness * it will still report success. In that case @dict_buf is set to NULL and 1140b6cf8b3fSJohn Ogness * @dict_buf_size is set to 0. Writers must check this before writing to 1141b6cf8b3fSJohn Ogness * dictionary space. 1142b6cf8b3fSJohn Ogness * 1143b6cf8b3fSJohn Ogness * @info->text_len and @info->dict_len will already be set to @text_buf_size 1144b6cf8b3fSJohn Ogness * and @dict_buf_size, respectively. If dictionary space reservation fails, 1145b6cf8b3fSJohn Ogness * @info->dict_len is set to 0. 1146b6cf8b3fSJohn Ogness */ 1147b6cf8b3fSJohn Ogness bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb, 1148b6cf8b3fSJohn Ogness struct printk_record *r) 1149b6cf8b3fSJohn Ogness { 1150b6cf8b3fSJohn Ogness struct prb_desc_ring *desc_ring = &rb->desc_ring; 1151b6cf8b3fSJohn Ogness struct prb_desc *d; 1152b6cf8b3fSJohn Ogness unsigned long id; 1153b6cf8b3fSJohn Ogness 1154b6cf8b3fSJohn Ogness if (!data_check_size(&rb->text_data_ring, r->text_buf_size)) 1155b6cf8b3fSJohn Ogness goto fail; 1156b6cf8b3fSJohn Ogness 1157b6cf8b3fSJohn Ogness if (!data_check_size(&rb->dict_data_ring, r->dict_buf_size)) 1158b6cf8b3fSJohn Ogness goto fail; 1159b6cf8b3fSJohn Ogness 1160b6cf8b3fSJohn Ogness /* 1161b6cf8b3fSJohn Ogness * Descriptors in the reserved state act as blockers to all further 1162b6cf8b3fSJohn Ogness * reservations once the desc_ring has fully wrapped. Disable 1163b6cf8b3fSJohn Ogness * interrupts during the reserve/commit window in order to minimize 1164b6cf8b3fSJohn Ogness * the likelihood of this happening. 1165b6cf8b3fSJohn Ogness */ 1166b6cf8b3fSJohn Ogness local_irq_save(e->irqflags); 1167b6cf8b3fSJohn Ogness 1168b6cf8b3fSJohn Ogness if (!desc_reserve(rb, &id)) { 1169b6cf8b3fSJohn Ogness /* Descriptor reservation failures are tracked. */ 1170b6cf8b3fSJohn Ogness atomic_long_inc(&rb->fail); 1171b6cf8b3fSJohn Ogness local_irq_restore(e->irqflags); 1172b6cf8b3fSJohn Ogness goto fail; 1173b6cf8b3fSJohn Ogness } 1174b6cf8b3fSJohn Ogness 1175b6cf8b3fSJohn Ogness d = to_desc(desc_ring, id); 1176b6cf8b3fSJohn Ogness 1177b6cf8b3fSJohn Ogness /* 1178b6cf8b3fSJohn Ogness * Set the @e fields here so that prb_commit() can be used if 1179b6cf8b3fSJohn Ogness * text data allocation fails. 1180b6cf8b3fSJohn Ogness */ 1181b6cf8b3fSJohn Ogness e->rb = rb; 1182b6cf8b3fSJohn Ogness e->id = id; 1183b6cf8b3fSJohn Ogness 1184b6cf8b3fSJohn Ogness /* 1185b6cf8b3fSJohn Ogness * Initialize the sequence number if it has "never been set". 1186b6cf8b3fSJohn Ogness * Otherwise just increment it by a full wrap. 1187b6cf8b3fSJohn Ogness * 1188b6cf8b3fSJohn Ogness * @seq is considered "never been set" if it has a value of 0, 1189b6cf8b3fSJohn Ogness * _except_ for @descs[0], which was specially setup by the ringbuffer 1190b6cf8b3fSJohn Ogness * initializer and therefore is always considered as set. 1191b6cf8b3fSJohn Ogness * 1192b6cf8b3fSJohn Ogness * See the "Bootstrap" comment block in printk_ringbuffer.h for 1193b6cf8b3fSJohn Ogness * details about how the initializer bootstraps the descriptors. 1194b6cf8b3fSJohn Ogness */ 1195b6cf8b3fSJohn Ogness if (d->info.seq == 0 && DESC_INDEX(desc_ring, id) != 0) 1196b6cf8b3fSJohn Ogness d->info.seq = DESC_INDEX(desc_ring, id); 1197b6cf8b3fSJohn Ogness else 1198b6cf8b3fSJohn Ogness d->info.seq += DESCS_COUNT(desc_ring); 1199b6cf8b3fSJohn Ogness 1200b6cf8b3fSJohn Ogness r->text_buf = data_alloc(rb, &rb->text_data_ring, r->text_buf_size, 1201b6cf8b3fSJohn Ogness &d->text_blk_lpos, id); 1202b6cf8b3fSJohn Ogness /* If text data allocation fails, a data-less record is committed. */ 1203b6cf8b3fSJohn Ogness if (r->text_buf_size && !r->text_buf) { 1204b6cf8b3fSJohn Ogness d->info.text_len = 0; 1205b6cf8b3fSJohn Ogness d->info.dict_len = 0; 1206b6cf8b3fSJohn Ogness prb_commit(e); 1207b6cf8b3fSJohn Ogness /* prb_commit() re-enabled interrupts. */ 1208b6cf8b3fSJohn Ogness goto fail; 1209b6cf8b3fSJohn Ogness } 1210b6cf8b3fSJohn Ogness 1211b6cf8b3fSJohn Ogness r->dict_buf = data_alloc(rb, &rb->dict_data_ring, r->dict_buf_size, 1212b6cf8b3fSJohn Ogness &d->dict_blk_lpos, id); 1213b6cf8b3fSJohn Ogness /* 1214b6cf8b3fSJohn Ogness * If dict data allocation fails, the caller can still commit 1215b6cf8b3fSJohn Ogness * text. But dictionary information will not be available. 1216b6cf8b3fSJohn Ogness */ 1217b6cf8b3fSJohn Ogness if (r->dict_buf_size && !r->dict_buf) 1218b6cf8b3fSJohn Ogness r->dict_buf_size = 0; 1219b6cf8b3fSJohn Ogness 1220b6cf8b3fSJohn Ogness r->info = &d->info; 1221b6cf8b3fSJohn Ogness 1222b6cf8b3fSJohn Ogness /* Set default values for the sizes. */ 1223b6cf8b3fSJohn Ogness d->info.text_len = r->text_buf_size; 1224b6cf8b3fSJohn Ogness d->info.dict_len = r->dict_buf_size; 1225b6cf8b3fSJohn Ogness 1226b6cf8b3fSJohn Ogness /* Record full text space used by record. */ 1227b6cf8b3fSJohn Ogness e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos); 1228b6cf8b3fSJohn Ogness 1229b6cf8b3fSJohn Ogness return true; 1230b6cf8b3fSJohn Ogness fail: 1231b6cf8b3fSJohn Ogness /* Make it clear to the caller that the reserve failed. */ 1232b6cf8b3fSJohn Ogness memset(r, 0, sizeof(*r)); 1233b6cf8b3fSJohn Ogness return false; 1234b6cf8b3fSJohn Ogness } 1235b6cf8b3fSJohn Ogness 1236b6cf8b3fSJohn Ogness /** 1237b6cf8b3fSJohn Ogness * prb_commit() - Commit (previously reserved) data to the ringbuffer. 1238b6cf8b3fSJohn Ogness * 1239b6cf8b3fSJohn Ogness * @e: The entry containing the reserved data information. 1240b6cf8b3fSJohn Ogness * 1241b6cf8b3fSJohn Ogness * This is the public function available to writers to commit data. 1242b6cf8b3fSJohn Ogness * 1243b6cf8b3fSJohn Ogness * Context: Any context. Enables local interrupts. 1244b6cf8b3fSJohn Ogness */ 1245b6cf8b3fSJohn Ogness void prb_commit(struct prb_reserved_entry *e) 1246b6cf8b3fSJohn Ogness { 1247b6cf8b3fSJohn Ogness struct prb_desc_ring *desc_ring = &e->rb->desc_ring; 1248b6cf8b3fSJohn Ogness struct prb_desc *d = to_desc(desc_ring, e->id); 1249b6cf8b3fSJohn Ogness unsigned long prev_state_val = e->id | 0; 1250b6cf8b3fSJohn Ogness 1251b6cf8b3fSJohn Ogness /* Now the writer has finished all writing: LMM(prb_commit:A) */ 1252b6cf8b3fSJohn Ogness 1253b6cf8b3fSJohn Ogness /* 1254b6cf8b3fSJohn Ogness * Set the descriptor as committed. See "ABA Issues" about why 1255b6cf8b3fSJohn Ogness * cmpxchg() instead of set() is used. 1256b6cf8b3fSJohn Ogness * 1257b6cf8b3fSJohn Ogness * Guarantee all record data is stored before the descriptor state 1258b6cf8b3fSJohn Ogness * is stored as committed. A write memory barrier is sufficient for 1259b6cf8b3fSJohn Ogness * this. This pairs with desc_read:B. 1260b6cf8b3fSJohn Ogness */ 1261b6cf8b3fSJohn Ogness if (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val, 1262b6cf8b3fSJohn Ogness e->id | DESC_COMMITTED_MASK)) { /* LMM(prb_commit:B) */ 1263b6cf8b3fSJohn Ogness WARN_ON_ONCE(1); 1264b6cf8b3fSJohn Ogness } 1265b6cf8b3fSJohn Ogness 1266b6cf8b3fSJohn Ogness /* Restore interrupts, the reserve/commit window is finished. */ 1267b6cf8b3fSJohn Ogness local_irq_restore(e->irqflags); 1268b6cf8b3fSJohn Ogness } 1269b6cf8b3fSJohn Ogness 1270b6cf8b3fSJohn Ogness /* 1271b6cf8b3fSJohn Ogness * Count the number of lines in provided text. All text has at least 1 line 1272b6cf8b3fSJohn Ogness * (even if @text_size is 0). Each '\n' processed is counted as an additional 1273b6cf8b3fSJohn Ogness * line. 1274b6cf8b3fSJohn Ogness */ 1275d397820fSJohn Ogness static unsigned int count_lines(const char *text, unsigned int text_size) 1276b6cf8b3fSJohn Ogness { 1277b6cf8b3fSJohn Ogness unsigned int next_size = text_size; 1278b6cf8b3fSJohn Ogness unsigned int line_count = 1; 1279d397820fSJohn Ogness const char *next = text; 1280b6cf8b3fSJohn Ogness 1281b6cf8b3fSJohn Ogness while (next_size) { 1282b6cf8b3fSJohn Ogness next = memchr(next, '\n', next_size); 1283b6cf8b3fSJohn Ogness if (!next) 1284b6cf8b3fSJohn Ogness break; 1285b6cf8b3fSJohn Ogness line_count++; 1286b6cf8b3fSJohn Ogness next++; 1287b6cf8b3fSJohn Ogness next_size = text_size - (next - text); 1288b6cf8b3fSJohn Ogness } 1289b6cf8b3fSJohn Ogness 1290b6cf8b3fSJohn Ogness return line_count; 1291b6cf8b3fSJohn Ogness } 1292b6cf8b3fSJohn Ogness 1293b6cf8b3fSJohn Ogness /* 1294b6cf8b3fSJohn Ogness * Given @blk_lpos, copy an expected @len of data into the provided buffer. 1295b6cf8b3fSJohn Ogness * If @line_count is provided, count the number of lines in the data. 1296b6cf8b3fSJohn Ogness * 1297b6cf8b3fSJohn Ogness * This function (used by readers) performs strict validation on the data 1298b6cf8b3fSJohn Ogness * size to possibly detect bugs in the writer code. A WARN_ON_ONCE() is 1299b6cf8b3fSJohn Ogness * triggered if an internal error is detected. 1300b6cf8b3fSJohn Ogness */ 1301b6cf8b3fSJohn Ogness static bool copy_data(struct prb_data_ring *data_ring, 1302b6cf8b3fSJohn Ogness struct prb_data_blk_lpos *blk_lpos, u16 len, char *buf, 1303b6cf8b3fSJohn Ogness unsigned int buf_size, unsigned int *line_count) 1304b6cf8b3fSJohn Ogness { 1305b6cf8b3fSJohn Ogness unsigned int data_size; 1306d397820fSJohn Ogness const char *data; 1307b6cf8b3fSJohn Ogness 1308b6cf8b3fSJohn Ogness /* Caller might not want any data. */ 1309b6cf8b3fSJohn Ogness if ((!buf || !buf_size) && !line_count) 1310b6cf8b3fSJohn Ogness return true; 1311b6cf8b3fSJohn Ogness 1312b6cf8b3fSJohn Ogness data = get_data(data_ring, blk_lpos, &data_size); 1313b6cf8b3fSJohn Ogness if (!data) 1314b6cf8b3fSJohn Ogness return false; 1315b6cf8b3fSJohn Ogness 1316b6cf8b3fSJohn Ogness /* 1317b6cf8b3fSJohn Ogness * Actual cannot be less than expected. It can be more than expected 1318b6cf8b3fSJohn Ogness * because of the trailing alignment padding. 1319b6cf8b3fSJohn Ogness */ 1320b6cf8b3fSJohn Ogness if (WARN_ON_ONCE(data_size < (unsigned int)len)) { 1321b6cf8b3fSJohn Ogness pr_warn_once("wrong data size (%u, expecting %hu) for data: %.*s\n", 1322b6cf8b3fSJohn Ogness data_size, len, data_size, data); 1323b6cf8b3fSJohn Ogness return false; 1324b6cf8b3fSJohn Ogness } 1325b6cf8b3fSJohn Ogness 1326b6cf8b3fSJohn Ogness /* Caller interested in the line count? */ 1327b6cf8b3fSJohn Ogness if (line_count) 1328b6cf8b3fSJohn Ogness *line_count = count_lines(data, data_size); 1329b6cf8b3fSJohn Ogness 1330b6cf8b3fSJohn Ogness /* Caller interested in the data content? */ 1331b6cf8b3fSJohn Ogness if (!buf || !buf_size) 1332b6cf8b3fSJohn Ogness return true; 1333b6cf8b3fSJohn Ogness 1334b6cf8b3fSJohn Ogness data_size = min_t(u16, buf_size, len); 1335b6cf8b3fSJohn Ogness 1336b6cf8b3fSJohn Ogness memcpy(&buf[0], data, data_size); /* LMM(copy_data:A) */ 1337b6cf8b3fSJohn Ogness return true; 1338b6cf8b3fSJohn Ogness } 1339b6cf8b3fSJohn Ogness 1340b6cf8b3fSJohn Ogness /* 1341b6cf8b3fSJohn Ogness * This is an extended version of desc_read(). It gets a copy of a specified 1342b6cf8b3fSJohn Ogness * descriptor. However, it also verifies that the record is committed and has 1343b6cf8b3fSJohn Ogness * the sequence number @seq. On success, 0 is returned. 1344b6cf8b3fSJohn Ogness * 1345b6cf8b3fSJohn Ogness * Error return values: 1346b6cf8b3fSJohn Ogness * -EINVAL: A committed record with sequence number @seq does not exist. 1347b6cf8b3fSJohn Ogness * -ENOENT: A committed record with sequence number @seq exists, but its data 1348b6cf8b3fSJohn Ogness * is not available. This is a valid record, so readers should 1349b6cf8b3fSJohn Ogness * continue with the next record. 1350b6cf8b3fSJohn Ogness */ 1351b6cf8b3fSJohn Ogness static int desc_read_committed_seq(struct prb_desc_ring *desc_ring, 1352b6cf8b3fSJohn Ogness unsigned long id, u64 seq, 1353b6cf8b3fSJohn Ogness struct prb_desc *desc_out) 1354b6cf8b3fSJohn Ogness { 1355b6cf8b3fSJohn Ogness struct prb_data_blk_lpos *blk_lpos = &desc_out->text_blk_lpos; 1356b6cf8b3fSJohn Ogness enum desc_state d_state; 1357b6cf8b3fSJohn Ogness 1358b6cf8b3fSJohn Ogness d_state = desc_read(desc_ring, id, desc_out); 1359b6cf8b3fSJohn Ogness 1360b6cf8b3fSJohn Ogness /* 1361b6cf8b3fSJohn Ogness * An unexpected @id (desc_miss) or @seq mismatch means the record 1362b6cf8b3fSJohn Ogness * does not exist. A descriptor in the reserved state means the 1363b6cf8b3fSJohn Ogness * record does not yet exist for the reader. 1364b6cf8b3fSJohn Ogness */ 1365b6cf8b3fSJohn Ogness if (d_state == desc_miss || 1366b6cf8b3fSJohn Ogness d_state == desc_reserved || 1367b6cf8b3fSJohn Ogness desc_out->info.seq != seq) { 1368b6cf8b3fSJohn Ogness return -EINVAL; 1369b6cf8b3fSJohn Ogness } 1370b6cf8b3fSJohn Ogness 1371b6cf8b3fSJohn Ogness /* 1372b6cf8b3fSJohn Ogness * A descriptor in the reusable state may no longer have its data 1373d397820fSJohn Ogness * available; report it as existing but with lost data. Or the record 1374d397820fSJohn Ogness * may actually be a record with lost data. 1375b6cf8b3fSJohn Ogness */ 1376b6cf8b3fSJohn Ogness if (d_state == desc_reusable || 1377d397820fSJohn Ogness (blk_lpos->begin == FAILED_LPOS && blk_lpos->next == FAILED_LPOS)) { 1378b6cf8b3fSJohn Ogness return -ENOENT; 1379b6cf8b3fSJohn Ogness } 1380b6cf8b3fSJohn Ogness 1381b6cf8b3fSJohn Ogness return 0; 1382b6cf8b3fSJohn Ogness } 1383b6cf8b3fSJohn Ogness 1384b6cf8b3fSJohn Ogness /* 1385b6cf8b3fSJohn Ogness * Copy the ringbuffer data from the record with @seq to the provided 1386b6cf8b3fSJohn Ogness * @r buffer. On success, 0 is returned. 1387b6cf8b3fSJohn Ogness * 1388b6cf8b3fSJohn Ogness * See desc_read_committed_seq() for error return values. 1389b6cf8b3fSJohn Ogness */ 1390b6cf8b3fSJohn Ogness static int prb_read(struct printk_ringbuffer *rb, u64 seq, 1391b6cf8b3fSJohn Ogness struct printk_record *r, unsigned int *line_count) 1392b6cf8b3fSJohn Ogness { 1393b6cf8b3fSJohn Ogness struct prb_desc_ring *desc_ring = &rb->desc_ring; 1394b6cf8b3fSJohn Ogness struct prb_desc *rdesc = to_desc(desc_ring, seq); 1395b6cf8b3fSJohn Ogness atomic_long_t *state_var = &rdesc->state_var; 1396b6cf8b3fSJohn Ogness struct prb_desc desc; 1397b6cf8b3fSJohn Ogness unsigned long id; 1398b6cf8b3fSJohn Ogness int err; 1399b6cf8b3fSJohn Ogness 1400b6cf8b3fSJohn Ogness /* Extract the ID, used to specify the descriptor to read. */ 1401b6cf8b3fSJohn Ogness id = DESC_ID(atomic_long_read(state_var)); 1402b6cf8b3fSJohn Ogness 1403b6cf8b3fSJohn Ogness /* Get a local copy of the correct descriptor (if available). */ 1404b6cf8b3fSJohn Ogness err = desc_read_committed_seq(desc_ring, id, seq, &desc); 1405b6cf8b3fSJohn Ogness 1406b6cf8b3fSJohn Ogness /* 1407b6cf8b3fSJohn Ogness * If @r is NULL, the caller is only interested in the availability 1408b6cf8b3fSJohn Ogness * of the record. 1409b6cf8b3fSJohn Ogness */ 1410b6cf8b3fSJohn Ogness if (err || !r) 1411b6cf8b3fSJohn Ogness return err; 1412b6cf8b3fSJohn Ogness 1413b6cf8b3fSJohn Ogness /* If requested, copy meta data. */ 1414b6cf8b3fSJohn Ogness if (r->info) 1415b6cf8b3fSJohn Ogness memcpy(r->info, &desc.info, sizeof(*(r->info))); 1416b6cf8b3fSJohn Ogness 1417b6cf8b3fSJohn Ogness /* Copy text data. If it fails, this is a data-less record. */ 1418b6cf8b3fSJohn Ogness if (!copy_data(&rb->text_data_ring, &desc.text_blk_lpos, desc.info.text_len, 1419b6cf8b3fSJohn Ogness r->text_buf, r->text_buf_size, line_count)) { 1420b6cf8b3fSJohn Ogness return -ENOENT; 1421b6cf8b3fSJohn Ogness } 1422b6cf8b3fSJohn Ogness 1423b6cf8b3fSJohn Ogness /* 1424b6cf8b3fSJohn Ogness * Copy dict data. Although this should not fail, dict data is not 1425b6cf8b3fSJohn Ogness * important. So if it fails, modify the copied meta data to report 1426b6cf8b3fSJohn Ogness * that there is no dict data, thus silently dropping the dict data. 1427b6cf8b3fSJohn Ogness */ 1428b6cf8b3fSJohn Ogness if (!copy_data(&rb->dict_data_ring, &desc.dict_blk_lpos, desc.info.dict_len, 1429b6cf8b3fSJohn Ogness r->dict_buf, r->dict_buf_size, NULL)) { 1430b6cf8b3fSJohn Ogness if (r->info) 1431b6cf8b3fSJohn Ogness r->info->dict_len = 0; 1432b6cf8b3fSJohn Ogness } 1433b6cf8b3fSJohn Ogness 1434b6cf8b3fSJohn Ogness /* Ensure the record is still committed and has the same @seq. */ 1435b6cf8b3fSJohn Ogness return desc_read_committed_seq(desc_ring, id, seq, &desc); 1436b6cf8b3fSJohn Ogness } 1437b6cf8b3fSJohn Ogness 1438b6cf8b3fSJohn Ogness /* Get the sequence number of the tail descriptor. */ 1439b6cf8b3fSJohn Ogness static u64 prb_first_seq(struct printk_ringbuffer *rb) 1440b6cf8b3fSJohn Ogness { 1441b6cf8b3fSJohn Ogness struct prb_desc_ring *desc_ring = &rb->desc_ring; 1442b6cf8b3fSJohn Ogness enum desc_state d_state; 1443b6cf8b3fSJohn Ogness struct prb_desc desc; 1444b6cf8b3fSJohn Ogness unsigned long id; 1445b6cf8b3fSJohn Ogness 1446b6cf8b3fSJohn Ogness for (;;) { 1447b6cf8b3fSJohn Ogness id = atomic_long_read(&rb->desc_ring.tail_id); /* LMM(prb_first_seq:A) */ 1448b6cf8b3fSJohn Ogness 1449b6cf8b3fSJohn Ogness d_state = desc_read(desc_ring, id, &desc); /* LMM(prb_first_seq:B) */ 1450b6cf8b3fSJohn Ogness 1451b6cf8b3fSJohn Ogness /* 1452b6cf8b3fSJohn Ogness * This loop will not be infinite because the tail is 1453b6cf8b3fSJohn Ogness * _always_ in the committed or reusable state. 1454b6cf8b3fSJohn Ogness */ 1455b6cf8b3fSJohn Ogness if (d_state == desc_committed || d_state == desc_reusable) 1456b6cf8b3fSJohn Ogness break; 1457b6cf8b3fSJohn Ogness 1458b6cf8b3fSJohn Ogness /* 1459b6cf8b3fSJohn Ogness * Guarantee the last state load from desc_read() is before 1460b6cf8b3fSJohn Ogness * reloading @tail_id in order to see a new tail in the case 1461b6cf8b3fSJohn Ogness * that the descriptor has been recycled. This pairs with 1462b6cf8b3fSJohn Ogness * desc_reserve:D. 1463b6cf8b3fSJohn Ogness * 1464b6cf8b3fSJohn Ogness * Memory barrier involvement: 1465b6cf8b3fSJohn Ogness * 1466b6cf8b3fSJohn Ogness * If prb_first_seq:B reads from desc_reserve:F, then 1467b6cf8b3fSJohn Ogness * prb_first_seq:A reads from desc_push_tail:B. 1468b6cf8b3fSJohn Ogness * 1469b6cf8b3fSJohn Ogness * Relies on: 1470b6cf8b3fSJohn Ogness * 1471b6cf8b3fSJohn Ogness * MB from desc_push_tail:B to desc_reserve:F 1472b6cf8b3fSJohn Ogness * matching 1473b6cf8b3fSJohn Ogness * RMB prb_first_seq:B to prb_first_seq:A 1474b6cf8b3fSJohn Ogness */ 1475b6cf8b3fSJohn Ogness smp_rmb(); /* LMM(prb_first_seq:C) */ 1476b6cf8b3fSJohn Ogness } 1477b6cf8b3fSJohn Ogness 1478b6cf8b3fSJohn Ogness return desc.info.seq; 1479b6cf8b3fSJohn Ogness } 1480b6cf8b3fSJohn Ogness 1481b6cf8b3fSJohn Ogness /* 1482b6cf8b3fSJohn Ogness * Non-blocking read of a record. Updates @seq to the last committed record 1483b6cf8b3fSJohn Ogness * (which may have no data). 1484b6cf8b3fSJohn Ogness * 1485b6cf8b3fSJohn Ogness * See the description of prb_read_valid() and prb_read_valid_info() 1486b6cf8b3fSJohn Ogness * for details. 1487b6cf8b3fSJohn Ogness */ 1488b6cf8b3fSJohn Ogness static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq, 1489b6cf8b3fSJohn Ogness struct printk_record *r, unsigned int *line_count) 1490b6cf8b3fSJohn Ogness { 1491b6cf8b3fSJohn Ogness u64 tail_seq; 1492b6cf8b3fSJohn Ogness int err; 1493b6cf8b3fSJohn Ogness 1494b6cf8b3fSJohn Ogness while ((err = prb_read(rb, *seq, r, line_count))) { 1495b6cf8b3fSJohn Ogness tail_seq = prb_first_seq(rb); 1496b6cf8b3fSJohn Ogness 1497b6cf8b3fSJohn Ogness if (*seq < tail_seq) { 1498b6cf8b3fSJohn Ogness /* 1499b6cf8b3fSJohn Ogness * Behind the tail. Catch up and try again. This 1500b6cf8b3fSJohn Ogness * can happen for -ENOENT and -EINVAL cases. 1501b6cf8b3fSJohn Ogness */ 1502b6cf8b3fSJohn Ogness *seq = tail_seq; 1503b6cf8b3fSJohn Ogness 1504b6cf8b3fSJohn Ogness } else if (err == -ENOENT) { 1505b6cf8b3fSJohn Ogness /* Record exists, but no data available. Skip. */ 1506b6cf8b3fSJohn Ogness (*seq)++; 1507b6cf8b3fSJohn Ogness 1508b6cf8b3fSJohn Ogness } else { 1509b6cf8b3fSJohn Ogness /* Non-existent/non-committed record. Must stop. */ 1510b6cf8b3fSJohn Ogness return false; 1511b6cf8b3fSJohn Ogness } 1512b6cf8b3fSJohn Ogness } 1513b6cf8b3fSJohn Ogness 1514b6cf8b3fSJohn Ogness return true; 1515b6cf8b3fSJohn Ogness } 1516b6cf8b3fSJohn Ogness 1517b6cf8b3fSJohn Ogness /** 1518b6cf8b3fSJohn Ogness * prb_read_valid() - Non-blocking read of a requested record or (if gone) 1519b6cf8b3fSJohn Ogness * the next available record. 1520b6cf8b3fSJohn Ogness * 1521b6cf8b3fSJohn Ogness * @rb: The ringbuffer to read from. 1522b6cf8b3fSJohn Ogness * @seq: The sequence number of the record to read. 1523b6cf8b3fSJohn Ogness * @r: A record data buffer to store the read record to. 1524b6cf8b3fSJohn Ogness * 1525b6cf8b3fSJohn Ogness * This is the public function available to readers to read a record. 1526b6cf8b3fSJohn Ogness * 1527b6cf8b3fSJohn Ogness * The reader provides the @info, @text_buf, @dict_buf buffers of @r to be 1528b6cf8b3fSJohn Ogness * filled in. Any of the buffer pointers can be set to NULL if the reader 1529b6cf8b3fSJohn Ogness * is not interested in that data. To ensure proper initialization of @r, 1530b6cf8b3fSJohn Ogness * prb_rec_init_rd() should be used. 1531b6cf8b3fSJohn Ogness * 1532b6cf8b3fSJohn Ogness * Context: Any context. 1533b6cf8b3fSJohn Ogness * Return: true if a record was read, otherwise false. 1534b6cf8b3fSJohn Ogness * 1535b6cf8b3fSJohn Ogness * On success, the reader must check r->info.seq to see which record was 1536b6cf8b3fSJohn Ogness * actually read. This allows the reader to detect dropped records. 1537b6cf8b3fSJohn Ogness * 1538b6cf8b3fSJohn Ogness * Failure means @seq refers to a not yet written record. 1539b6cf8b3fSJohn Ogness */ 1540b6cf8b3fSJohn Ogness bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq, 1541b6cf8b3fSJohn Ogness struct printk_record *r) 1542b6cf8b3fSJohn Ogness { 1543b6cf8b3fSJohn Ogness return _prb_read_valid(rb, &seq, r, NULL); 1544b6cf8b3fSJohn Ogness } 1545b6cf8b3fSJohn Ogness 1546b6cf8b3fSJohn Ogness /** 1547b6cf8b3fSJohn Ogness * prb_read_valid_info() - Non-blocking read of meta data for a requested 1548b6cf8b3fSJohn Ogness * record or (if gone) the next available record. 1549b6cf8b3fSJohn Ogness * 1550b6cf8b3fSJohn Ogness * @rb: The ringbuffer to read from. 1551b6cf8b3fSJohn Ogness * @seq: The sequence number of the record to read. 1552b6cf8b3fSJohn Ogness * @info: A buffer to store the read record meta data to. 1553b6cf8b3fSJohn Ogness * @line_count: A buffer to store the number of lines in the record text. 1554b6cf8b3fSJohn Ogness * 1555b6cf8b3fSJohn Ogness * This is the public function available to readers to read only the 1556b6cf8b3fSJohn Ogness * meta data of a record. 1557b6cf8b3fSJohn Ogness * 1558b6cf8b3fSJohn Ogness * The reader provides the @info, @line_count buffers to be filled in. 1559b6cf8b3fSJohn Ogness * Either of the buffer pointers can be set to NULL if the reader is not 1560b6cf8b3fSJohn Ogness * interested in that data. 1561b6cf8b3fSJohn Ogness * 1562b6cf8b3fSJohn Ogness * Context: Any context. 1563b6cf8b3fSJohn Ogness * Return: true if a record's meta data was read, otherwise false. 1564b6cf8b3fSJohn Ogness * 1565b6cf8b3fSJohn Ogness * On success, the reader must check info->seq to see which record meta data 1566b6cf8b3fSJohn Ogness * was actually read. This allows the reader to detect dropped records. 1567b6cf8b3fSJohn Ogness * 1568b6cf8b3fSJohn Ogness * Failure means @seq refers to a not yet written record. 1569b6cf8b3fSJohn Ogness */ 1570b6cf8b3fSJohn Ogness bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq, 1571b6cf8b3fSJohn Ogness struct printk_info *info, unsigned int *line_count) 1572b6cf8b3fSJohn Ogness { 1573b6cf8b3fSJohn Ogness struct printk_record r; 1574b6cf8b3fSJohn Ogness 1575b6cf8b3fSJohn Ogness prb_rec_init_rd(&r, info, NULL, 0, NULL, 0); 1576b6cf8b3fSJohn Ogness 1577b6cf8b3fSJohn Ogness return _prb_read_valid(rb, &seq, &r, line_count); 1578b6cf8b3fSJohn Ogness } 1579b6cf8b3fSJohn Ogness 1580b6cf8b3fSJohn Ogness /** 1581b6cf8b3fSJohn Ogness * prb_first_valid_seq() - Get the sequence number of the oldest available 1582b6cf8b3fSJohn Ogness * record. 1583b6cf8b3fSJohn Ogness * 1584b6cf8b3fSJohn Ogness * @rb: The ringbuffer to get the sequence number from. 1585b6cf8b3fSJohn Ogness * 1586b6cf8b3fSJohn Ogness * This is the public function available to readers to see what the 1587b6cf8b3fSJohn Ogness * first/oldest valid sequence number is. 1588b6cf8b3fSJohn Ogness * 1589b6cf8b3fSJohn Ogness * This provides readers a starting point to begin iterating the ringbuffer. 1590b6cf8b3fSJohn Ogness * 1591b6cf8b3fSJohn Ogness * Context: Any context. 1592b6cf8b3fSJohn Ogness * Return: The sequence number of the first/oldest record or, if the 1593b6cf8b3fSJohn Ogness * ringbuffer is empty, 0 is returned. 1594b6cf8b3fSJohn Ogness */ 1595b6cf8b3fSJohn Ogness u64 prb_first_valid_seq(struct printk_ringbuffer *rb) 1596b6cf8b3fSJohn Ogness { 1597b6cf8b3fSJohn Ogness u64 seq = 0; 1598b6cf8b3fSJohn Ogness 1599b6cf8b3fSJohn Ogness if (!_prb_read_valid(rb, &seq, NULL, NULL)) 1600b6cf8b3fSJohn Ogness return 0; 1601b6cf8b3fSJohn Ogness 1602b6cf8b3fSJohn Ogness return seq; 1603b6cf8b3fSJohn Ogness } 1604b6cf8b3fSJohn Ogness 1605b6cf8b3fSJohn Ogness /** 1606b6cf8b3fSJohn Ogness * prb_next_seq() - Get the sequence number after the last available record. 1607b6cf8b3fSJohn Ogness * 1608b6cf8b3fSJohn Ogness * @rb: The ringbuffer to get the sequence number from. 1609b6cf8b3fSJohn Ogness * 1610b6cf8b3fSJohn Ogness * This is the public function available to readers to see what the next 1611b6cf8b3fSJohn Ogness * newest sequence number available to readers will be. 1612b6cf8b3fSJohn Ogness * 1613b6cf8b3fSJohn Ogness * This provides readers a sequence number to jump to if all currently 1614b6cf8b3fSJohn Ogness * available records should be skipped. 1615b6cf8b3fSJohn Ogness * 1616b6cf8b3fSJohn Ogness * Context: Any context. 1617b6cf8b3fSJohn Ogness * Return: The sequence number of the next newest (not yet available) record 1618b6cf8b3fSJohn Ogness * for readers. 1619b6cf8b3fSJohn Ogness */ 1620b6cf8b3fSJohn Ogness u64 prb_next_seq(struct printk_ringbuffer *rb) 1621b6cf8b3fSJohn Ogness { 1622b6cf8b3fSJohn Ogness u64 seq = 0; 1623b6cf8b3fSJohn Ogness 1624b6cf8b3fSJohn Ogness /* Search forward from the oldest descriptor. */ 1625b6cf8b3fSJohn Ogness while (_prb_read_valid(rb, &seq, NULL, NULL)) 1626b6cf8b3fSJohn Ogness seq++; 1627b6cf8b3fSJohn Ogness 1628b6cf8b3fSJohn Ogness return seq; 1629b6cf8b3fSJohn Ogness } 1630b6cf8b3fSJohn Ogness 1631b6cf8b3fSJohn Ogness /** 1632b6cf8b3fSJohn Ogness * prb_init() - Initialize a ringbuffer to use provided external buffers. 1633b6cf8b3fSJohn Ogness * 1634b6cf8b3fSJohn Ogness * @rb: The ringbuffer to initialize. 1635b6cf8b3fSJohn Ogness * @text_buf: The data buffer for text data. 1636b6cf8b3fSJohn Ogness * @textbits: The size of @text_buf as a power-of-2 value. 1637b6cf8b3fSJohn Ogness * @dict_buf: The data buffer for dictionary data. 1638b6cf8b3fSJohn Ogness * @dictbits: The size of @dict_buf as a power-of-2 value. 1639b6cf8b3fSJohn Ogness * @descs: The descriptor buffer for ringbuffer records. 1640b6cf8b3fSJohn Ogness * @descbits: The count of @descs items as a power-of-2 value. 1641b6cf8b3fSJohn Ogness * 1642b6cf8b3fSJohn Ogness * This is the public function available to writers to setup a ringbuffer 1643b6cf8b3fSJohn Ogness * during runtime using provided buffers. 1644b6cf8b3fSJohn Ogness * 1645b6cf8b3fSJohn Ogness * This must match the initialization of DEFINE_PRINTKRB(). 1646b6cf8b3fSJohn Ogness * 1647b6cf8b3fSJohn Ogness * Context: Any context. 1648b6cf8b3fSJohn Ogness */ 1649b6cf8b3fSJohn Ogness void prb_init(struct printk_ringbuffer *rb, 1650b6cf8b3fSJohn Ogness char *text_buf, unsigned int textbits, 1651b6cf8b3fSJohn Ogness char *dict_buf, unsigned int dictbits, 1652b6cf8b3fSJohn Ogness struct prb_desc *descs, unsigned int descbits) 1653b6cf8b3fSJohn Ogness { 1654b6cf8b3fSJohn Ogness memset(descs, 0, _DESCS_COUNT(descbits) * sizeof(descs[0])); 1655b6cf8b3fSJohn Ogness 1656b6cf8b3fSJohn Ogness rb->desc_ring.count_bits = descbits; 1657b6cf8b3fSJohn Ogness rb->desc_ring.descs = descs; 1658b6cf8b3fSJohn Ogness atomic_long_set(&rb->desc_ring.head_id, DESC0_ID(descbits)); 1659b6cf8b3fSJohn Ogness atomic_long_set(&rb->desc_ring.tail_id, DESC0_ID(descbits)); 1660b6cf8b3fSJohn Ogness 1661b6cf8b3fSJohn Ogness rb->text_data_ring.size_bits = textbits; 1662b6cf8b3fSJohn Ogness rb->text_data_ring.data = text_buf; 1663b6cf8b3fSJohn Ogness atomic_long_set(&rb->text_data_ring.head_lpos, BLK0_LPOS(textbits)); 1664b6cf8b3fSJohn Ogness atomic_long_set(&rb->text_data_ring.tail_lpos, BLK0_LPOS(textbits)); 1665b6cf8b3fSJohn Ogness 1666b6cf8b3fSJohn Ogness rb->dict_data_ring.size_bits = dictbits; 1667b6cf8b3fSJohn Ogness rb->dict_data_ring.data = dict_buf; 1668b6cf8b3fSJohn Ogness atomic_long_set(&rb->dict_data_ring.head_lpos, BLK0_LPOS(dictbits)); 1669b6cf8b3fSJohn Ogness atomic_long_set(&rb->dict_data_ring.tail_lpos, BLK0_LPOS(dictbits)); 1670b6cf8b3fSJohn Ogness 1671b6cf8b3fSJohn Ogness atomic_long_set(&rb->fail, 0); 1672b6cf8b3fSJohn Ogness 1673b6cf8b3fSJohn Ogness descs[0].info.seq = -(u64)_DESCS_COUNT(descbits); 1674b6cf8b3fSJohn Ogness 1675b6cf8b3fSJohn Ogness descs[_DESCS_COUNT(descbits) - 1].info.seq = 0; 1676b6cf8b3fSJohn Ogness atomic_long_set(&(descs[_DESCS_COUNT(descbits) - 1].state_var), DESC0_SV(descbits)); 1677d397820fSJohn Ogness descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.begin = FAILED_LPOS; 1678d397820fSJohn Ogness descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.next = FAILED_LPOS; 1679d397820fSJohn Ogness descs[_DESCS_COUNT(descbits) - 1].dict_blk_lpos.begin = FAILED_LPOS; 1680d397820fSJohn Ogness descs[_DESCS_COUNT(descbits) - 1].dict_blk_lpos.next = FAILED_LPOS; 1681b6cf8b3fSJohn Ogness } 1682b6cf8b3fSJohn Ogness 1683b6cf8b3fSJohn Ogness /** 1684b6cf8b3fSJohn Ogness * prb_record_text_space() - Query the full actual used ringbuffer space for 1685b6cf8b3fSJohn Ogness * the text data of a reserved entry. 1686b6cf8b3fSJohn Ogness * 1687b6cf8b3fSJohn Ogness * @e: The successfully reserved entry to query. 1688b6cf8b3fSJohn Ogness * 1689b6cf8b3fSJohn Ogness * This is the public function available to writers to see how much actual 1690b6cf8b3fSJohn Ogness * space is used in the ringbuffer to store the text data of the specified 1691b6cf8b3fSJohn Ogness * entry. 1692b6cf8b3fSJohn Ogness * 1693b6cf8b3fSJohn Ogness * This function is only valid if @e has been successfully reserved using 1694b6cf8b3fSJohn Ogness * prb_reserve(). 1695b6cf8b3fSJohn Ogness * 1696b6cf8b3fSJohn Ogness * Context: Any context. 1697b6cf8b3fSJohn Ogness * Return: The size in bytes used by the text data of the associated record. 1698b6cf8b3fSJohn Ogness */ 1699b6cf8b3fSJohn Ogness unsigned int prb_record_text_space(struct prb_reserved_entry *e) 1700b6cf8b3fSJohn Ogness { 1701b6cf8b3fSJohn Ogness return e->text_space; 1702b6cf8b3fSJohn Ogness } 1703