1 /* 2 * RCU segmented callback lists, function definitions 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright IBM Corporation, 2017 19 * 20 * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 21 */ 22 23 #include <linux/types.h> 24 #include <linux/kernel.h> 25 #include <linux/interrupt.h> 26 27 #include "rcu_segcblist.h" 28 29 /* Initialize simple callback list. */ 30 void rcu_cblist_init(struct rcu_cblist *rclp) 31 { 32 rclp->head = NULL; 33 rclp->tail = &rclp->head; 34 rclp->len = 0; 35 rclp->len_lazy = 0; 36 } 37 38 /* 39 * Dequeue the oldest rcu_head structure from the specified callback 40 * list. This function assumes that the callback is non-lazy, but 41 * the caller can later invoke rcu_cblist_dequeued_lazy() if it 42 * finds otherwise (and if it cares about laziness). This allows 43 * different users to have different ways of determining laziness. 44 */ 45 struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp) 46 { 47 struct rcu_head *rhp; 48 49 rhp = rclp->head; 50 if (!rhp) 51 return NULL; 52 rclp->len--; 53 rclp->head = rhp->next; 54 if (!rclp->head) 55 rclp->tail = &rclp->head; 56 return rhp; 57 } 58 59 /* 60 * Initialize an rcu_segcblist structure. 61 */ 62 void rcu_segcblist_init(struct rcu_segcblist *rsclp) 63 { 64 int i; 65 66 BUILD_BUG_ON(RCU_NEXT_TAIL + 1 != ARRAY_SIZE(rsclp->gp_seq)); 67 BUILD_BUG_ON(ARRAY_SIZE(rsclp->tails) != ARRAY_SIZE(rsclp->gp_seq)); 68 rsclp->head = NULL; 69 for (i = 0; i < RCU_CBLIST_NSEGS; i++) 70 rsclp->tails[i] = &rsclp->head; 71 rsclp->len = 0; 72 rsclp->len_lazy = 0; 73 } 74 75 /* 76 * Disable the specified rcu_segcblist structure, so that callbacks can 77 * no longer be posted to it. This structure must be empty. 78 */ 79 void rcu_segcblist_disable(struct rcu_segcblist *rsclp) 80 { 81 WARN_ON_ONCE(!rcu_segcblist_empty(rsclp)); 82 WARN_ON_ONCE(rcu_segcblist_n_cbs(rsclp)); 83 WARN_ON_ONCE(rcu_segcblist_n_lazy_cbs(rsclp)); 84 rsclp->tails[RCU_NEXT_TAIL] = NULL; 85 } 86 87 /* 88 * Does the specified rcu_segcblist structure contain callbacks that 89 * are ready to be invoked? 90 */ 91 bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp) 92 { 93 return rcu_segcblist_is_enabled(rsclp) && 94 &rsclp->head != rsclp->tails[RCU_DONE_TAIL]; 95 } 96 97 /* 98 * Does the specified rcu_segcblist structure contain callbacks that 99 * are still pending, that is, not yet ready to be invoked? 100 */ 101 bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp) 102 { 103 return rcu_segcblist_is_enabled(rsclp) && 104 !rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL); 105 } 106 107 /* 108 * Return a pointer to the first callback in the specified rcu_segcblist 109 * structure. This is useful for diagnostics. 110 */ 111 struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp) 112 { 113 if (rcu_segcblist_is_enabled(rsclp)) 114 return rsclp->head; 115 return NULL; 116 } 117 118 /* 119 * Return a pointer to the first pending callback in the specified 120 * rcu_segcblist structure. This is useful just after posting a given 121 * callback -- if that callback is the first pending callback, then 122 * you cannot rely on someone else having already started up the required 123 * grace period. 124 */ 125 struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp) 126 { 127 if (rcu_segcblist_is_enabled(rsclp)) 128 return *rsclp->tails[RCU_DONE_TAIL]; 129 return NULL; 130 } 131 132 /* 133 * Enqueue the specified callback onto the specified rcu_segcblist 134 * structure, updating accounting as needed. Note that the ->len 135 * field may be accessed locklessly, hence the WRITE_ONCE(). 136 * The ->len field is used by rcu_barrier() and friends to determine 137 * if it must post a callback on this structure, and it is OK 138 * for rcu_barrier() to sometimes post callbacks needlessly, but 139 * absolutely not OK for it to ever miss posting a callback. 140 */ 141 void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp, 142 struct rcu_head *rhp, bool lazy) 143 { 144 WRITE_ONCE(rsclp->len, rsclp->len + 1); /* ->len sampled locklessly. */ 145 if (lazy) 146 rsclp->len_lazy++; 147 smp_mb(); /* Ensure counts are updated before callback is enqueued. */ 148 rhp->next = NULL; 149 *rsclp->tails[RCU_NEXT_TAIL] = rhp; 150 rsclp->tails[RCU_NEXT_TAIL] = &rhp->next; 151 } 152 153 /* 154 * Entrain the specified callback onto the specified rcu_segcblist at 155 * the end of the last non-empty segment. If the entire rcu_segcblist 156 * is empty, make no change, but return false. 157 * 158 * This is intended for use by rcu_barrier()-like primitives, -not- 159 * for normal grace-period use. IMPORTANT: The callback you enqueue 160 * will wait for all prior callbacks, NOT necessarily for a grace 161 * period. You have been warned. 162 */ 163 bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp, 164 struct rcu_head *rhp, bool lazy) 165 { 166 int i; 167 168 if (rcu_segcblist_n_cbs(rsclp) == 0) 169 return false; 170 WRITE_ONCE(rsclp->len, rsclp->len + 1); 171 if (lazy) 172 rsclp->len_lazy++; 173 smp_mb(); /* Ensure counts are updated before callback is entrained. */ 174 rhp->next = NULL; 175 for (i = RCU_NEXT_TAIL; i > RCU_DONE_TAIL; i--) 176 if (rsclp->tails[i] != rsclp->tails[i - 1]) 177 break; 178 *rsclp->tails[i] = rhp; 179 for (; i <= RCU_NEXT_TAIL; i++) 180 rsclp->tails[i] = &rhp->next; 181 return true; 182 } 183 184 /* 185 * Extract only the counts from the specified rcu_segcblist structure, 186 * and place them in the specified rcu_cblist structure. This function 187 * supports both callback orphaning and invocation, hence the separation 188 * of counts and callbacks. (Callbacks ready for invocation must be 189 * orphaned and adopted separately from pending callbacks, but counts 190 * apply to all callbacks. Locking must be used to make sure that 191 * both orphaned-callbacks lists are consistent.) 192 */ 193 void rcu_segcblist_extract_count(struct rcu_segcblist *rsclp, 194 struct rcu_cblist *rclp) 195 { 196 rclp->len_lazy += rsclp->len_lazy; 197 rclp->len += rsclp->len; 198 rsclp->len_lazy = 0; 199 WRITE_ONCE(rsclp->len, 0); /* ->len sampled locklessly. */ 200 } 201 202 /* 203 * Extract only those callbacks ready to be invoked from the specified 204 * rcu_segcblist structure and place them in the specified rcu_cblist 205 * structure. 206 */ 207 void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp, 208 struct rcu_cblist *rclp) 209 { 210 int i; 211 212 if (!rcu_segcblist_ready_cbs(rsclp)) 213 return; /* Nothing to do. */ 214 *rclp->tail = rsclp->head; 215 rsclp->head = *rsclp->tails[RCU_DONE_TAIL]; 216 *rsclp->tails[RCU_DONE_TAIL] = NULL; 217 rclp->tail = rsclp->tails[RCU_DONE_TAIL]; 218 for (i = RCU_CBLIST_NSEGS - 1; i >= RCU_DONE_TAIL; i--) 219 if (rsclp->tails[i] == rsclp->tails[RCU_DONE_TAIL]) 220 rsclp->tails[i] = &rsclp->head; 221 } 222 223 /* 224 * Extract only those callbacks still pending (not yet ready to be 225 * invoked) from the specified rcu_segcblist structure and place them in 226 * the specified rcu_cblist structure. Note that this loses information 227 * about any callbacks that might have been partway done waiting for 228 * their grace period. Too bad! They will have to start over. 229 */ 230 void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp, 231 struct rcu_cblist *rclp) 232 { 233 int i; 234 235 if (!rcu_segcblist_pend_cbs(rsclp)) 236 return; /* Nothing to do. */ 237 *rclp->tail = *rsclp->tails[RCU_DONE_TAIL]; 238 rclp->tail = rsclp->tails[RCU_NEXT_TAIL]; 239 *rsclp->tails[RCU_DONE_TAIL] = NULL; 240 for (i = RCU_DONE_TAIL + 1; i < RCU_CBLIST_NSEGS; i++) 241 rsclp->tails[i] = rsclp->tails[RCU_DONE_TAIL]; 242 } 243 244 /* 245 * Insert counts from the specified rcu_cblist structure in the 246 * specified rcu_segcblist structure. 247 */ 248 void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp, 249 struct rcu_cblist *rclp) 250 { 251 rsclp->len_lazy += rclp->len_lazy; 252 /* ->len sampled locklessly. */ 253 WRITE_ONCE(rsclp->len, rsclp->len + rclp->len); 254 rclp->len_lazy = 0; 255 rclp->len = 0; 256 } 257 258 /* 259 * Move callbacks from the specified rcu_cblist to the beginning of the 260 * done-callbacks segment of the specified rcu_segcblist. 261 */ 262 void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp, 263 struct rcu_cblist *rclp) 264 { 265 int i; 266 267 if (!rclp->head) 268 return; /* No callbacks to move. */ 269 *rclp->tail = rsclp->head; 270 rsclp->head = rclp->head; 271 for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++) 272 if (&rsclp->head == rsclp->tails[i]) 273 rsclp->tails[i] = rclp->tail; 274 else 275 break; 276 rclp->head = NULL; 277 rclp->tail = &rclp->head; 278 } 279 280 /* 281 * Move callbacks from the specified rcu_cblist to the end of the 282 * new-callbacks segment of the specified rcu_segcblist. 283 */ 284 void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp, 285 struct rcu_cblist *rclp) 286 { 287 if (!rclp->head) 288 return; /* Nothing to do. */ 289 *rsclp->tails[RCU_NEXT_TAIL] = rclp->head; 290 rsclp->tails[RCU_NEXT_TAIL] = rclp->tail; 291 rclp->head = NULL; 292 rclp->tail = &rclp->head; 293 } 294 295 /* 296 * Advance the callbacks in the specified rcu_segcblist structure based 297 * on the current value passed in for the grace-period counter. 298 */ 299 void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq) 300 { 301 int i, j; 302 303 WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp)); 304 if (rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)) 305 return; 306 307 /* 308 * Find all callbacks whose ->gp_seq numbers indicate that they 309 * are ready to invoke, and put them into the RCU_DONE_TAIL segment. 310 */ 311 for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) { 312 if (ULONG_CMP_LT(seq, rsclp->gp_seq[i])) 313 break; 314 rsclp->tails[RCU_DONE_TAIL] = rsclp->tails[i]; 315 } 316 317 /* If no callbacks moved, nothing more need be done. */ 318 if (i == RCU_WAIT_TAIL) 319 return; 320 321 /* Clean up tail pointers that might have been misordered above. */ 322 for (j = RCU_WAIT_TAIL; j < i; j++) 323 rsclp->tails[j] = rsclp->tails[RCU_DONE_TAIL]; 324 325 /* 326 * Callbacks moved, so clean up the misordered ->tails[] pointers 327 * that now point into the middle of the list of ready-to-invoke 328 * callbacks. The overall effect is to copy down the later pointers 329 * into the gap that was created by the now-ready segments. 330 */ 331 for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) { 332 if (rsclp->tails[j] == rsclp->tails[RCU_NEXT_TAIL]) 333 break; /* No more callbacks. */ 334 rsclp->tails[j] = rsclp->tails[i]; 335 rsclp->gp_seq[j] = rsclp->gp_seq[i]; 336 } 337 } 338 339 /* 340 * "Accelerate" callbacks based on more-accurate grace-period information. 341 * The reason for this is that RCU does not synchronize the beginnings and 342 * ends of grace periods, and that callbacks are posted locally. This in 343 * turn means that the callbacks must be labelled conservatively early 344 * on, as getting exact information would degrade both performance and 345 * scalability. When more accurate grace-period information becomes 346 * available, previously posted callbacks can be "accelerated", marking 347 * them to complete at the end of the earlier grace period. 348 * 349 * This function operates on an rcu_segcblist structure, and also the 350 * grace-period sequence number seq at which new callbacks would become 351 * ready to invoke. Returns true if there are callbacks that won't be 352 * ready to invoke until seq, false otherwise. 353 */ 354 bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq) 355 { 356 int i; 357 358 WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp)); 359 if (rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)) 360 return false; 361 362 /* 363 * Find the segment preceding the oldest segment of callbacks 364 * whose ->gp_seq[] completion is at or after that passed in via 365 * "seq", skipping any empty segments. This oldest segment, along 366 * with any later segments, can be merged in with any newly arrived 367 * callbacks in the RCU_NEXT_TAIL segment, and assigned "seq" 368 * as their ->gp_seq[] grace-period completion sequence number. 369 */ 370 for (i = RCU_NEXT_READY_TAIL; i > RCU_DONE_TAIL; i--) 371 if (rsclp->tails[i] != rsclp->tails[i - 1] && 372 ULONG_CMP_LT(rsclp->gp_seq[i], seq)) 373 break; 374 375 /* 376 * If all the segments contain callbacks that correspond to 377 * earlier grace-period sequence numbers than "seq", leave. 378 * Assuming that the rcu_segcblist structure has enough 379 * segments in its arrays, this can only happen if some of 380 * the non-done segments contain callbacks that really are 381 * ready to invoke. This situation will get straightened 382 * out by the next call to rcu_segcblist_advance(). 383 * 384 * Also advance to the oldest segment of callbacks whose 385 * ->gp_seq[] completion is at or after that passed in via "seq", 386 * skipping any empty segments. 387 */ 388 if (++i >= RCU_NEXT_TAIL) 389 return false; 390 391 /* 392 * Merge all later callbacks, including newly arrived callbacks, 393 * into the segment located by the for-loop above. Assign "seq" 394 * as the ->gp_seq[] value in order to correctly handle the case 395 * where there were no pending callbacks in the rcu_segcblist 396 * structure other than in the RCU_NEXT_TAIL segment. 397 */ 398 for (; i < RCU_NEXT_TAIL; i++) { 399 rsclp->tails[i] = rsclp->tails[RCU_NEXT_TAIL]; 400 rsclp->gp_seq[i] = seq; 401 } 402 return true; 403 } 404 405 /* 406 * Scan the specified rcu_segcblist structure for callbacks that need 407 * a grace period later than the one specified by "seq". We don't look 408 * at the RCU_DONE_TAIL or RCU_NEXT_TAIL segments because they don't 409 * have a grace-period sequence number. 410 */ 411 bool rcu_segcblist_future_gp_needed(struct rcu_segcblist *rsclp, 412 unsigned long seq) 413 { 414 int i; 415 416 for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) 417 if (rsclp->tails[i - 1] != rsclp->tails[i] && 418 ULONG_CMP_LT(seq, rsclp->gp_seq[i])) 419 return true; 420 return false; 421 } 422 423 /* 424 * Merge the source rcu_segcblist structure into the destination 425 * rcu_segcblist structure, then initialize the source. Any pending 426 * callbacks from the source get to start over. It is best to 427 * advance and accelerate both the destination and the source 428 * before merging. 429 */ 430 void rcu_segcblist_merge(struct rcu_segcblist *dst_rsclp, 431 struct rcu_segcblist *src_rsclp) 432 { 433 struct rcu_cblist donecbs; 434 struct rcu_cblist pendcbs; 435 436 rcu_cblist_init(&donecbs); 437 rcu_cblist_init(&pendcbs); 438 rcu_segcblist_extract_count(src_rsclp, &donecbs); 439 rcu_segcblist_extract_done_cbs(src_rsclp, &donecbs); 440 rcu_segcblist_extract_pend_cbs(src_rsclp, &pendcbs); 441 rcu_segcblist_insert_count(dst_rsclp, &donecbs); 442 rcu_segcblist_insert_done_cbs(dst_rsclp, &donecbs); 443 rcu_segcblist_insert_pend_cbs(dst_rsclp, &pendcbs); 444 rcu_segcblist_init(src_rsclp); 445 } 446