1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * RCU segmented callback lists, function definitions 4 * 5 * Copyright IBM Corporation, 2017 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 */ 9 10 #include <linux/cpu.h> 11 #include <linux/interrupt.h> 12 #include <linux/kernel.h> 13 #include <linux/types.h> 14 15 #include "rcu_segcblist.h" 16 17 /* Initialize simple callback list. */ 18 void rcu_cblist_init(struct rcu_cblist *rclp) 19 { 20 rclp->head = NULL; 21 rclp->tail = &rclp->head; 22 rclp->len = 0; 23 } 24 25 /* 26 * Enqueue an rcu_head structure onto the specified callback list. 27 */ 28 void rcu_cblist_enqueue(struct rcu_cblist *rclp, struct rcu_head *rhp) 29 { 30 *rclp->tail = rhp; 31 rclp->tail = &rhp->next; 32 WRITE_ONCE(rclp->len, rclp->len + 1); 33 } 34 35 /* 36 * Flush the second rcu_cblist structure onto the first one, obliterating 37 * any contents of the first. If rhp is non-NULL, enqueue it as the sole 38 * element of the second rcu_cblist structure, but ensuring that the second 39 * rcu_cblist structure, if initially non-empty, always appears non-empty 40 * throughout the process. If rdp is NULL, the second rcu_cblist structure 41 * is instead initialized to empty. 42 */ 43 void rcu_cblist_flush_enqueue(struct rcu_cblist *drclp, 44 struct rcu_cblist *srclp, 45 struct rcu_head *rhp) 46 { 47 drclp->head = srclp->head; 48 if (drclp->head) 49 drclp->tail = srclp->tail; 50 else 51 drclp->tail = &drclp->head; 52 drclp->len = srclp->len; 53 if (!rhp) { 54 rcu_cblist_init(srclp); 55 } else { 56 rhp->next = NULL; 57 srclp->head = rhp; 58 srclp->tail = &rhp->next; 59 WRITE_ONCE(srclp->len, 1); 60 } 61 } 62 63 /* 64 * Dequeue the oldest rcu_head structure from the specified callback 65 * list. 66 */ 67 struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp) 68 { 69 struct rcu_head *rhp; 70 71 rhp = rclp->head; 72 if (!rhp) 73 return NULL; 74 rclp->len--; 75 rclp->head = rhp->next; 76 if (!rclp->head) 77 rclp->tail = &rclp->head; 78 return rhp; 79 } 80 81 /* Set the length of an rcu_segcblist structure. */ 82 static void rcu_segcblist_set_len(struct rcu_segcblist *rsclp, long v) 83 { 84 #ifdef CONFIG_RCU_NOCB_CPU 85 atomic_long_set(&rsclp->len, v); 86 #else 87 WRITE_ONCE(rsclp->len, v); 88 #endif 89 } 90 91 /* Get the length of a segment of the rcu_segcblist structure. */ 92 static long rcu_segcblist_get_seglen(struct rcu_segcblist *rsclp, int seg) 93 { 94 return READ_ONCE(rsclp->seglen[seg]); 95 } 96 97 /* Return number of callbacks in segmented callback list by summing seglen. */ 98 long rcu_segcblist_n_segment_cbs(struct rcu_segcblist *rsclp) 99 { 100 long len = 0; 101 int i; 102 103 for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++) 104 len += rcu_segcblist_get_seglen(rsclp, i); 105 106 return len; 107 } 108 109 /* Set the length of a segment of the rcu_segcblist structure. */ 110 static void rcu_segcblist_set_seglen(struct rcu_segcblist *rsclp, int seg, long v) 111 { 112 WRITE_ONCE(rsclp->seglen[seg], v); 113 } 114 115 /* Increase the numeric length of a segment by a specified amount. */ 116 static void rcu_segcblist_add_seglen(struct rcu_segcblist *rsclp, int seg, long v) 117 { 118 WRITE_ONCE(rsclp->seglen[seg], rsclp->seglen[seg] + v); 119 } 120 121 /* Move from's segment length to to's segment. */ 122 static void rcu_segcblist_move_seglen(struct rcu_segcblist *rsclp, int from, int to) 123 { 124 long len; 125 126 if (from == to) 127 return; 128 129 len = rcu_segcblist_get_seglen(rsclp, from); 130 if (!len) 131 return; 132 133 rcu_segcblist_add_seglen(rsclp, to, len); 134 rcu_segcblist_set_seglen(rsclp, from, 0); 135 } 136 137 /* Increment segment's length. */ 138 static void rcu_segcblist_inc_seglen(struct rcu_segcblist *rsclp, int seg) 139 { 140 rcu_segcblist_add_seglen(rsclp, seg, 1); 141 } 142 143 /* 144 * Increase the numeric length of an rcu_segcblist structure by the 145 * specified amount, which can be negative. This can cause the ->len 146 * field to disagree with the actual number of callbacks on the structure. 147 * This increase is fully ordered with respect to the callers accesses 148 * both before and after. 149 * 150 * So why on earth is a memory barrier required both before and after 151 * the update to the ->len field??? 152 * 153 * The reason is that rcu_barrier() locklessly samples each CPU's ->len 154 * field, and if a given CPU's field is zero, avoids IPIing that CPU. 155 * This can of course race with both queuing and invoking of callbacks. 156 * Failing to correctly handle either of these races could result in 157 * rcu_barrier() failing to IPI a CPU that actually had callbacks queued 158 * which rcu_barrier() was obligated to wait on. And if rcu_barrier() 159 * failed to wait on such a callback, unloading certain kernel modules 160 * would result in calls to functions whose code was no longer present in 161 * the kernel, for but one example. 162 * 163 * Therefore, ->len transitions from 1->0 and 0->1 have to be carefully 164 * ordered with respect with both list modifications and the rcu_barrier(). 165 * 166 * The queuing case is CASE 1 and the invoking case is CASE 2. 167 * 168 * CASE 1: Suppose that CPU 0 has no callbacks queued, but invokes 169 * call_rcu() just as CPU 1 invokes rcu_barrier(). CPU 0's ->len field 170 * will transition from 0->1, which is one of the transitions that must 171 * be handled carefully. Without the full memory barriers after the ->len 172 * update and at the beginning of rcu_barrier(), the following could happen: 173 * 174 * CPU 0 CPU 1 175 * 176 * call_rcu(). 177 * rcu_barrier() sees ->len as 0. 178 * set ->len = 1. 179 * rcu_barrier() does nothing. 180 * module is unloaded. 181 * callback invokes unloaded function! 182 * 183 * With the full barriers, any case where rcu_barrier() sees ->len as 0 will 184 * have unambiguously preceded the return from the racing call_rcu(), which 185 * means that this call_rcu() invocation is OK to not wait on. After all, 186 * you are supposed to make sure that any problematic call_rcu() invocations 187 * happen before the rcu_barrier(). 188 * 189 * 190 * CASE 2: Suppose that CPU 0 is invoking its last callback just as 191 * CPU 1 invokes rcu_barrier(). CPU 0's ->len field will transition from 192 * 1->0, which is one of the transitions that must be handled carefully. 193 * Without the full memory barriers before the ->len update and at the 194 * end of rcu_barrier(), the following could happen: 195 * 196 * CPU 0 CPU 1 197 * 198 * start invoking last callback 199 * set ->len = 0 (reordered) 200 * rcu_barrier() sees ->len as 0 201 * rcu_barrier() does nothing. 202 * module is unloaded 203 * callback executing after unloaded! 204 * 205 * With the full barriers, any case where rcu_barrier() sees ->len as 0 206 * will be fully ordered after the completion of the callback function, 207 * so that the module unloading operation is completely safe. 208 * 209 */ 210 void rcu_segcblist_add_len(struct rcu_segcblist *rsclp, long v) 211 { 212 #ifdef CONFIG_RCU_NOCB_CPU 213 smp_mb__before_atomic(); // Read header comment above. 214 atomic_long_add(v, &rsclp->len); 215 smp_mb__after_atomic(); // Read header comment above. 216 #else 217 smp_mb(); // Read header comment above. 218 WRITE_ONCE(rsclp->len, rsclp->len + v); 219 smp_mb(); // Read header comment above. 220 #endif 221 } 222 223 /* 224 * Increase the numeric length of an rcu_segcblist structure by one. 225 * This can cause the ->len field to disagree with the actual number of 226 * callbacks on the structure. This increase is fully ordered with respect 227 * to the callers accesses both before and after. 228 */ 229 void rcu_segcblist_inc_len(struct rcu_segcblist *rsclp) 230 { 231 rcu_segcblist_add_len(rsclp, 1); 232 } 233 234 /* 235 * Initialize an rcu_segcblist structure. 236 */ 237 void rcu_segcblist_init(struct rcu_segcblist *rsclp) 238 { 239 int i; 240 241 BUILD_BUG_ON(RCU_NEXT_TAIL + 1 != ARRAY_SIZE(rsclp->gp_seq)); 242 BUILD_BUG_ON(ARRAY_SIZE(rsclp->tails) != ARRAY_SIZE(rsclp->gp_seq)); 243 rsclp->head = NULL; 244 for (i = 0; i < RCU_CBLIST_NSEGS; i++) { 245 rsclp->tails[i] = &rsclp->head; 246 rcu_segcblist_set_seglen(rsclp, i, 0); 247 } 248 rcu_segcblist_set_len(rsclp, 0); 249 rcu_segcblist_set_flags(rsclp, SEGCBLIST_ENABLED); 250 } 251 252 /* 253 * Disable the specified rcu_segcblist structure, so that callbacks can 254 * no longer be posted to it. This structure must be empty. 255 */ 256 void rcu_segcblist_disable(struct rcu_segcblist *rsclp) 257 { 258 WARN_ON_ONCE(!rcu_segcblist_empty(rsclp)); 259 WARN_ON_ONCE(rcu_segcblist_n_cbs(rsclp)); 260 rcu_segcblist_clear_flags(rsclp, SEGCBLIST_ENABLED); 261 } 262 263 /* 264 * Mark the specified rcu_segcblist structure as offloaded. This 265 * structure must be empty. 266 */ 267 void rcu_segcblist_offload(struct rcu_segcblist *rsclp, bool offload) 268 { 269 if (offload) { 270 rcu_segcblist_clear_flags(rsclp, SEGCBLIST_SOFTIRQ_ONLY); 271 rcu_segcblist_set_flags(rsclp, SEGCBLIST_OFFLOADED); 272 } else { 273 rcu_segcblist_clear_flags(rsclp, SEGCBLIST_OFFLOADED); 274 } 275 } 276 277 /* 278 * Does the specified rcu_segcblist structure contain callbacks that 279 * are ready to be invoked? 280 */ 281 bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp) 282 { 283 return rcu_segcblist_is_enabled(rsclp) && 284 &rsclp->head != READ_ONCE(rsclp->tails[RCU_DONE_TAIL]); 285 } 286 287 /* 288 * Does the specified rcu_segcblist structure contain callbacks that 289 * are still pending, that is, not yet ready to be invoked? 290 */ 291 bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp) 292 { 293 return rcu_segcblist_is_enabled(rsclp) && 294 !rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL); 295 } 296 297 /* 298 * Return a pointer to the first callback in the specified rcu_segcblist 299 * structure. This is useful for diagnostics. 300 */ 301 struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp) 302 { 303 if (rcu_segcblist_is_enabled(rsclp)) 304 return rsclp->head; 305 return NULL; 306 } 307 308 /* 309 * Return a pointer to the first pending callback in the specified 310 * rcu_segcblist structure. This is useful just after posting a given 311 * callback -- if that callback is the first pending callback, then 312 * you cannot rely on someone else having already started up the required 313 * grace period. 314 */ 315 struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp) 316 { 317 if (rcu_segcblist_is_enabled(rsclp)) 318 return *rsclp->tails[RCU_DONE_TAIL]; 319 return NULL; 320 } 321 322 /* 323 * Return false if there are no CBs awaiting grace periods, otherwise, 324 * return true and store the nearest waited-upon grace period into *lp. 325 */ 326 bool rcu_segcblist_nextgp(struct rcu_segcblist *rsclp, unsigned long *lp) 327 { 328 if (!rcu_segcblist_pend_cbs(rsclp)) 329 return false; 330 *lp = rsclp->gp_seq[RCU_WAIT_TAIL]; 331 return true; 332 } 333 334 /* 335 * Enqueue the specified callback onto the specified rcu_segcblist 336 * structure, updating accounting as needed. Note that the ->len 337 * field may be accessed locklessly, hence the WRITE_ONCE(). 338 * The ->len field is used by rcu_barrier() and friends to determine 339 * if it must post a callback on this structure, and it is OK 340 * for rcu_barrier() to sometimes post callbacks needlessly, but 341 * absolutely not OK for it to ever miss posting a callback. 342 */ 343 void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp, 344 struct rcu_head *rhp) 345 { 346 rcu_segcblist_inc_len(rsclp); 347 rcu_segcblist_inc_seglen(rsclp, RCU_NEXT_TAIL); 348 rhp->next = NULL; 349 WRITE_ONCE(*rsclp->tails[RCU_NEXT_TAIL], rhp); 350 WRITE_ONCE(rsclp->tails[RCU_NEXT_TAIL], &rhp->next); 351 } 352 353 /* 354 * Entrain the specified callback onto the specified rcu_segcblist at 355 * the end of the last non-empty segment. If the entire rcu_segcblist 356 * is empty, make no change, but return false. 357 * 358 * This is intended for use by rcu_barrier()-like primitives, -not- 359 * for normal grace-period use. IMPORTANT: The callback you enqueue 360 * will wait for all prior callbacks, NOT necessarily for a grace 361 * period. You have been warned. 362 */ 363 bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp, 364 struct rcu_head *rhp) 365 { 366 int i; 367 368 if (rcu_segcblist_n_cbs(rsclp) == 0) 369 return false; 370 rcu_segcblist_inc_len(rsclp); 371 smp_mb(); /* Ensure counts are updated before callback is entrained. */ 372 rhp->next = NULL; 373 for (i = RCU_NEXT_TAIL; i > RCU_DONE_TAIL; i--) 374 if (rsclp->tails[i] != rsclp->tails[i - 1]) 375 break; 376 rcu_segcblist_inc_seglen(rsclp, i); 377 WRITE_ONCE(*rsclp->tails[i], rhp); 378 for (; i <= RCU_NEXT_TAIL; i++) 379 WRITE_ONCE(rsclp->tails[i], &rhp->next); 380 return true; 381 } 382 383 /* 384 * Extract only those callbacks ready to be invoked from the specified 385 * rcu_segcblist structure and place them in the specified rcu_cblist 386 * structure. 387 */ 388 void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp, 389 struct rcu_cblist *rclp) 390 { 391 int i; 392 393 if (!rcu_segcblist_ready_cbs(rsclp)) 394 return; /* Nothing to do. */ 395 rclp->len = rcu_segcblist_get_seglen(rsclp, RCU_DONE_TAIL); 396 *rclp->tail = rsclp->head; 397 WRITE_ONCE(rsclp->head, *rsclp->tails[RCU_DONE_TAIL]); 398 WRITE_ONCE(*rsclp->tails[RCU_DONE_TAIL], NULL); 399 rclp->tail = rsclp->tails[RCU_DONE_TAIL]; 400 for (i = RCU_CBLIST_NSEGS - 1; i >= RCU_DONE_TAIL; i--) 401 if (rsclp->tails[i] == rsclp->tails[RCU_DONE_TAIL]) 402 WRITE_ONCE(rsclp->tails[i], &rsclp->head); 403 rcu_segcblist_set_seglen(rsclp, RCU_DONE_TAIL, 0); 404 } 405 406 /* 407 * Extract only those callbacks still pending (not yet ready to be 408 * invoked) from the specified rcu_segcblist structure and place them in 409 * the specified rcu_cblist structure. Note that this loses information 410 * about any callbacks that might have been partway done waiting for 411 * their grace period. Too bad! They will have to start over. 412 */ 413 void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp, 414 struct rcu_cblist *rclp) 415 { 416 int i; 417 418 if (!rcu_segcblist_pend_cbs(rsclp)) 419 return; /* Nothing to do. */ 420 rclp->len = 0; 421 *rclp->tail = *rsclp->tails[RCU_DONE_TAIL]; 422 rclp->tail = rsclp->tails[RCU_NEXT_TAIL]; 423 WRITE_ONCE(*rsclp->tails[RCU_DONE_TAIL], NULL); 424 for (i = RCU_DONE_TAIL + 1; i < RCU_CBLIST_NSEGS; i++) { 425 rclp->len += rcu_segcblist_get_seglen(rsclp, i); 426 WRITE_ONCE(rsclp->tails[i], rsclp->tails[RCU_DONE_TAIL]); 427 rcu_segcblist_set_seglen(rsclp, i, 0); 428 } 429 } 430 431 /* 432 * Insert counts from the specified rcu_cblist structure in the 433 * specified rcu_segcblist structure. 434 */ 435 void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp, 436 struct rcu_cblist *rclp) 437 { 438 rcu_segcblist_add_len(rsclp, rclp->len); 439 } 440 441 /* 442 * Move callbacks from the specified rcu_cblist to the beginning of the 443 * done-callbacks segment of the specified rcu_segcblist. 444 */ 445 void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp, 446 struct rcu_cblist *rclp) 447 { 448 int i; 449 450 if (!rclp->head) 451 return; /* No callbacks to move. */ 452 rcu_segcblist_add_seglen(rsclp, RCU_DONE_TAIL, rclp->len); 453 *rclp->tail = rsclp->head; 454 WRITE_ONCE(rsclp->head, rclp->head); 455 for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++) 456 if (&rsclp->head == rsclp->tails[i]) 457 WRITE_ONCE(rsclp->tails[i], rclp->tail); 458 else 459 break; 460 rclp->head = NULL; 461 rclp->tail = &rclp->head; 462 } 463 464 /* 465 * Move callbacks from the specified rcu_cblist to the end of the 466 * new-callbacks segment of the specified rcu_segcblist. 467 */ 468 void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp, 469 struct rcu_cblist *rclp) 470 { 471 if (!rclp->head) 472 return; /* Nothing to do. */ 473 474 rcu_segcblist_add_seglen(rsclp, RCU_NEXT_TAIL, rclp->len); 475 WRITE_ONCE(*rsclp->tails[RCU_NEXT_TAIL], rclp->head); 476 WRITE_ONCE(rsclp->tails[RCU_NEXT_TAIL], rclp->tail); 477 } 478 479 /* 480 * Advance the callbacks in the specified rcu_segcblist structure based 481 * on the current value passed in for the grace-period counter. 482 */ 483 void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq) 484 { 485 int i, j; 486 487 WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp)); 488 if (rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)) 489 return; 490 491 /* 492 * Find all callbacks whose ->gp_seq numbers indicate that they 493 * are ready to invoke, and put them into the RCU_DONE_TAIL segment. 494 */ 495 for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) { 496 if (ULONG_CMP_LT(seq, rsclp->gp_seq[i])) 497 break; 498 WRITE_ONCE(rsclp->tails[RCU_DONE_TAIL], rsclp->tails[i]); 499 rcu_segcblist_move_seglen(rsclp, i, RCU_DONE_TAIL); 500 } 501 502 /* If no callbacks moved, nothing more need be done. */ 503 if (i == RCU_WAIT_TAIL) 504 return; 505 506 /* Clean up tail pointers that might have been misordered above. */ 507 for (j = RCU_WAIT_TAIL; j < i; j++) 508 WRITE_ONCE(rsclp->tails[j], rsclp->tails[RCU_DONE_TAIL]); 509 510 /* 511 * Callbacks moved, so clean up the misordered ->tails[] pointers 512 * that now point into the middle of the list of ready-to-invoke 513 * callbacks. The overall effect is to copy down the later pointers 514 * into the gap that was created by the now-ready segments. 515 */ 516 for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) { 517 if (rsclp->tails[j] == rsclp->tails[RCU_NEXT_TAIL]) 518 break; /* No more callbacks. */ 519 WRITE_ONCE(rsclp->tails[j], rsclp->tails[i]); 520 rcu_segcblist_move_seglen(rsclp, i, j); 521 rsclp->gp_seq[j] = rsclp->gp_seq[i]; 522 } 523 } 524 525 /* 526 * "Accelerate" callbacks based on more-accurate grace-period information. 527 * The reason for this is that RCU does not synchronize the beginnings and 528 * ends of grace periods, and that callbacks are posted locally. This in 529 * turn means that the callbacks must be labelled conservatively early 530 * on, as getting exact information would degrade both performance and 531 * scalability. When more accurate grace-period information becomes 532 * available, previously posted callbacks can be "accelerated", marking 533 * them to complete at the end of the earlier grace period. 534 * 535 * This function operates on an rcu_segcblist structure, and also the 536 * grace-period sequence number seq at which new callbacks would become 537 * ready to invoke. Returns true if there are callbacks that won't be 538 * ready to invoke until seq, false otherwise. 539 */ 540 bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq) 541 { 542 int i, j; 543 544 WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp)); 545 if (rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)) 546 return false; 547 548 /* 549 * Find the segment preceding the oldest segment of callbacks 550 * whose ->gp_seq[] completion is at or after that passed in via 551 * "seq", skipping any empty segments. This oldest segment, along 552 * with any later segments, can be merged in with any newly arrived 553 * callbacks in the RCU_NEXT_TAIL segment, and assigned "seq" 554 * as their ->gp_seq[] grace-period completion sequence number. 555 */ 556 for (i = RCU_NEXT_READY_TAIL; i > RCU_DONE_TAIL; i--) 557 if (rsclp->tails[i] != rsclp->tails[i - 1] && 558 ULONG_CMP_LT(rsclp->gp_seq[i], seq)) 559 break; 560 561 /* 562 * If all the segments contain callbacks that correspond to 563 * earlier grace-period sequence numbers than "seq", leave. 564 * Assuming that the rcu_segcblist structure has enough 565 * segments in its arrays, this can only happen if some of 566 * the non-done segments contain callbacks that really are 567 * ready to invoke. This situation will get straightened 568 * out by the next call to rcu_segcblist_advance(). 569 * 570 * Also advance to the oldest segment of callbacks whose 571 * ->gp_seq[] completion is at or after that passed in via "seq", 572 * skipping any empty segments. 573 * 574 * Note that segment "i" (and any lower-numbered segments 575 * containing older callbacks) will be unaffected, and their 576 * grace-period numbers remain unchanged. For example, if i == 577 * WAIT_TAIL, then neither WAIT_TAIL nor DONE_TAIL will be touched. 578 * Instead, the CBs in NEXT_TAIL will be merged with those in 579 * NEXT_READY_TAIL and the grace-period number of NEXT_READY_TAIL 580 * would be updated. NEXT_TAIL would then be empty. 581 */ 582 if (rcu_segcblist_restempty(rsclp, i) || ++i >= RCU_NEXT_TAIL) 583 return false; 584 585 /* Accounting: everything below i is about to get merged into i. */ 586 for (j = i + 1; j <= RCU_NEXT_TAIL; j++) 587 rcu_segcblist_move_seglen(rsclp, j, i); 588 589 /* 590 * Merge all later callbacks, including newly arrived callbacks, 591 * into the segment located by the for-loop above. Assign "seq" 592 * as the ->gp_seq[] value in order to correctly handle the case 593 * where there were no pending callbacks in the rcu_segcblist 594 * structure other than in the RCU_NEXT_TAIL segment. 595 */ 596 for (; i < RCU_NEXT_TAIL; i++) { 597 WRITE_ONCE(rsclp->tails[i], rsclp->tails[RCU_NEXT_TAIL]); 598 rsclp->gp_seq[i] = seq; 599 } 600 return true; 601 } 602 603 /* 604 * Merge the source rcu_segcblist structure into the destination 605 * rcu_segcblist structure, then initialize the source. Any pending 606 * callbacks from the source get to start over. It is best to 607 * advance and accelerate both the destination and the source 608 * before merging. 609 */ 610 void rcu_segcblist_merge(struct rcu_segcblist *dst_rsclp, 611 struct rcu_segcblist *src_rsclp) 612 { 613 struct rcu_cblist donecbs; 614 struct rcu_cblist pendcbs; 615 616 lockdep_assert_cpus_held(); 617 618 rcu_cblist_init(&donecbs); 619 rcu_cblist_init(&pendcbs); 620 621 rcu_segcblist_extract_done_cbs(src_rsclp, &donecbs); 622 rcu_segcblist_extract_pend_cbs(src_rsclp, &pendcbs); 623 624 /* 625 * No need smp_mb() before setting length to 0, because CPU hotplug 626 * lock excludes rcu_barrier. 627 */ 628 rcu_segcblist_set_len(src_rsclp, 0); 629 630 rcu_segcblist_insert_count(dst_rsclp, &donecbs); 631 rcu_segcblist_insert_count(dst_rsclp, &pendcbs); 632 rcu_segcblist_insert_done_cbs(dst_rsclp, &donecbs); 633 rcu_segcblist_insert_pend_cbs(dst_rsclp, &pendcbs); 634 635 rcu_segcblist_init(src_rsclp); 636 } 637