1 /* 2 * RCU expedited grace periods 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright IBM Corporation, 2016 19 * 20 * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 21 */ 22 23 #include <linux/lockdep.h> 24 25 /* 26 * Record the start of an expedited grace period. 27 */ 28 static void rcu_exp_gp_seq_start(void) 29 { 30 rcu_seq_start(&rcu_state.expedited_sequence); 31 } 32 33 /* 34 * Return then value that expedited-grace-period counter will have 35 * at the end of the current grace period. 36 */ 37 static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void) 38 { 39 return rcu_seq_endval(&rcu_state.expedited_sequence); 40 } 41 42 /* 43 * Record the end of an expedited grace period. 44 */ 45 static void rcu_exp_gp_seq_end(void) 46 { 47 rcu_seq_end(&rcu_state.expedited_sequence); 48 smp_mb(); /* Ensure that consecutive grace periods serialize. */ 49 } 50 51 /* 52 * Take a snapshot of the expedited-grace-period counter. 53 */ 54 static unsigned long rcu_exp_gp_seq_snap(void) 55 { 56 unsigned long s; 57 58 smp_mb(); /* Caller's modifications seen first by other CPUs. */ 59 s = rcu_seq_snap(&rcu_state.expedited_sequence); 60 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap")); 61 return s; 62 } 63 64 /* 65 * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true 66 * if a full expedited grace period has elapsed since that snapshot 67 * was taken. 68 */ 69 static bool rcu_exp_gp_seq_done(unsigned long s) 70 { 71 return rcu_seq_done(&rcu_state.expedited_sequence, s); 72 } 73 74 /* 75 * Reset the ->expmaskinit values in the rcu_node tree to reflect any 76 * recent CPU-online activity. Note that these masks are not cleared 77 * when CPUs go offline, so they reflect the union of all CPUs that have 78 * ever been online. This means that this function normally takes its 79 * no-work-to-do fastpath. 80 */ 81 static void sync_exp_reset_tree_hotplug(void) 82 { 83 bool done; 84 unsigned long flags; 85 unsigned long mask; 86 unsigned long oldmask; 87 int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */ 88 struct rcu_node *rnp; 89 struct rcu_node *rnp_up; 90 91 /* If no new CPUs onlined since last time, nothing to do. */ 92 if (likely(ncpus == rcu_state.ncpus_snap)) 93 return; 94 rcu_state.ncpus_snap = ncpus; 95 96 /* 97 * Each pass through the following loop propagates newly onlined 98 * CPUs for the current rcu_node structure up the rcu_node tree. 99 */ 100 rcu_for_each_leaf_node(rnp) { 101 raw_spin_lock_irqsave_rcu_node(rnp, flags); 102 if (rnp->expmaskinit == rnp->expmaskinitnext) { 103 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 104 continue; /* No new CPUs, nothing to do. */ 105 } 106 107 /* Update this node's mask, track old value for propagation. */ 108 oldmask = rnp->expmaskinit; 109 rnp->expmaskinit = rnp->expmaskinitnext; 110 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 111 112 /* If was already nonzero, nothing to propagate. */ 113 if (oldmask) 114 continue; 115 116 /* Propagate the new CPU up the tree. */ 117 mask = rnp->grpmask; 118 rnp_up = rnp->parent; 119 done = false; 120 while (rnp_up) { 121 raw_spin_lock_irqsave_rcu_node(rnp_up, flags); 122 if (rnp_up->expmaskinit) 123 done = true; 124 rnp_up->expmaskinit |= mask; 125 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags); 126 if (done) 127 break; 128 mask = rnp_up->grpmask; 129 rnp_up = rnp_up->parent; 130 } 131 } 132 } 133 134 /* 135 * Reset the ->expmask values in the rcu_node tree in preparation for 136 * a new expedited grace period. 137 */ 138 static void __maybe_unused sync_exp_reset_tree(void) 139 { 140 unsigned long flags; 141 struct rcu_node *rnp; 142 143 sync_exp_reset_tree_hotplug(); 144 rcu_for_each_node_breadth_first(rnp) { 145 raw_spin_lock_irqsave_rcu_node(rnp, flags); 146 WARN_ON_ONCE(rnp->expmask); 147 rnp->expmask = rnp->expmaskinit; 148 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 149 } 150 } 151 152 /* 153 * Return non-zero if there is no RCU expedited grace period in progress 154 * for the specified rcu_node structure, in other words, if all CPUs and 155 * tasks covered by the specified rcu_node structure have done their bit 156 * for the current expedited grace period. Works only for preemptible 157 * RCU -- other RCU implementation use other means. 158 * 159 * Caller must hold the specificed rcu_node structure's ->lock 160 */ 161 static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp) 162 { 163 raw_lockdep_assert_held_rcu_node(rnp); 164 165 return rnp->exp_tasks == NULL && 166 READ_ONCE(rnp->expmask) == 0; 167 } 168 169 /* 170 * Like sync_rcu_preempt_exp_done(), but this function assumes the caller 171 * doesn't hold the rcu_node's ->lock, and will acquire and release the lock 172 * itself 173 */ 174 static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node *rnp) 175 { 176 unsigned long flags; 177 bool ret; 178 179 raw_spin_lock_irqsave_rcu_node(rnp, flags); 180 ret = sync_rcu_preempt_exp_done(rnp); 181 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 182 183 return ret; 184 } 185 186 187 /* 188 * Report the exit from RCU read-side critical section for the last task 189 * that queued itself during or before the current expedited preemptible-RCU 190 * grace period. This event is reported either to the rcu_node structure on 191 * which the task was queued or to one of that rcu_node structure's ancestors, 192 * recursively up the tree. (Calm down, calm down, we do the recursion 193 * iteratively!) 194 * 195 * Caller must hold the specified rcu_node structure's ->lock. 196 */ 197 static void __rcu_report_exp_rnp(struct rcu_node *rnp, 198 bool wake, unsigned long flags) 199 __releases(rnp->lock) 200 { 201 unsigned long mask; 202 203 for (;;) { 204 if (!sync_rcu_preempt_exp_done(rnp)) { 205 if (!rnp->expmask) 206 rcu_initiate_boost(rnp, flags); 207 else 208 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 209 break; 210 } 211 if (rnp->parent == NULL) { 212 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 213 if (wake) { 214 smp_mb(); /* EGP done before wake_up(). */ 215 swake_up_one(&rcu_state.expedited_wq); 216 } 217 break; 218 } 219 mask = rnp->grpmask; 220 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */ 221 rnp = rnp->parent; 222 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */ 223 WARN_ON_ONCE(!(rnp->expmask & mask)); 224 rnp->expmask &= ~mask; 225 } 226 } 227 228 /* 229 * Report expedited quiescent state for specified node. This is a 230 * lock-acquisition wrapper function for __rcu_report_exp_rnp(). 231 */ 232 static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake) 233 { 234 unsigned long flags; 235 236 raw_spin_lock_irqsave_rcu_node(rnp, flags); 237 __rcu_report_exp_rnp(rnp, wake, flags); 238 } 239 240 /* 241 * Report expedited quiescent state for multiple CPUs, all covered by the 242 * specified leaf rcu_node structure. 243 */ 244 static void rcu_report_exp_cpu_mult(struct rcu_node *rnp, 245 unsigned long mask, bool wake) 246 { 247 unsigned long flags; 248 249 raw_spin_lock_irqsave_rcu_node(rnp, flags); 250 if (!(rnp->expmask & mask)) { 251 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 252 return; 253 } 254 rnp->expmask &= ~mask; 255 __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */ 256 } 257 258 /* 259 * Report expedited quiescent state for specified rcu_data (CPU). 260 */ 261 static void rcu_report_exp_rdp(struct rcu_data *rdp) 262 { 263 WRITE_ONCE(rdp->deferred_qs, false); 264 rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true); 265 } 266 267 /* Common code for work-done checking. */ 268 static bool sync_exp_work_done(unsigned long s) 269 { 270 if (rcu_exp_gp_seq_done(s)) { 271 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done")); 272 /* Ensure test happens before caller kfree(). */ 273 smp_mb__before_atomic(); /* ^^^ */ 274 return true; 275 } 276 return false; 277 } 278 279 /* 280 * Funnel-lock acquisition for expedited grace periods. Returns true 281 * if some other task completed an expedited grace period that this task 282 * can piggy-back on, and with no mutex held. Otherwise, returns false 283 * with the mutex held, indicating that the caller must actually do the 284 * expedited grace period. 285 */ 286 static bool exp_funnel_lock(unsigned long s) 287 { 288 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id()); 289 struct rcu_node *rnp = rdp->mynode; 290 struct rcu_node *rnp_root = rcu_get_root(); 291 292 /* Low-contention fastpath. */ 293 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) && 294 (rnp == rnp_root || 295 ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) && 296 mutex_trylock(&rcu_state.exp_mutex)) 297 goto fastpath; 298 299 /* 300 * Each pass through the following loop works its way up 301 * the rcu_node tree, returning if others have done the work or 302 * otherwise falls through to acquire ->exp_mutex. The mapping 303 * from CPU to rcu_node structure can be inexact, as it is just 304 * promoting locality and is not strictly needed for correctness. 305 */ 306 for (; rnp != NULL; rnp = rnp->parent) { 307 if (sync_exp_work_done(s)) 308 return true; 309 310 /* Work not done, either wait here or go up. */ 311 spin_lock(&rnp->exp_lock); 312 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) { 313 314 /* Someone else doing GP, so wait for them. */ 315 spin_unlock(&rnp->exp_lock); 316 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level, 317 rnp->grplo, rnp->grphi, 318 TPS("wait")); 319 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], 320 sync_exp_work_done(s)); 321 return true; 322 } 323 rnp->exp_seq_rq = s; /* Followers can wait on us. */ 324 spin_unlock(&rnp->exp_lock); 325 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level, 326 rnp->grplo, rnp->grphi, TPS("nxtlvl")); 327 } 328 mutex_lock(&rcu_state.exp_mutex); 329 fastpath: 330 if (sync_exp_work_done(s)) { 331 mutex_unlock(&rcu_state.exp_mutex); 332 return true; 333 } 334 rcu_exp_gp_seq_start(); 335 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start")); 336 return false; 337 } 338 339 /* 340 * Select the CPUs within the specified rcu_node that the upcoming 341 * expedited grace period needs to wait for. 342 */ 343 static void sync_rcu_exp_select_node_cpus(struct work_struct *wp) 344 { 345 int cpu; 346 unsigned long flags; 347 smp_call_func_t func; 348 unsigned long mask_ofl_test; 349 unsigned long mask_ofl_ipi; 350 int ret; 351 struct rcu_exp_work *rewp = 352 container_of(wp, struct rcu_exp_work, rew_work); 353 struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew); 354 355 func = rewp->rew_func; 356 raw_spin_lock_irqsave_rcu_node(rnp, flags); 357 358 /* Each pass checks a CPU for identity, offline, and idle. */ 359 mask_ofl_test = 0; 360 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) { 361 unsigned long mask = leaf_node_cpu_bit(rnp, cpu); 362 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 363 int snap; 364 365 if (raw_smp_processor_id() == cpu || 366 !(rnp->qsmaskinitnext & mask)) { 367 mask_ofl_test |= mask; 368 } else { 369 snap = rcu_dynticks_snap(rdp); 370 if (rcu_dynticks_in_eqs(snap)) 371 mask_ofl_test |= mask; 372 else 373 rdp->exp_dynticks_snap = snap; 374 } 375 } 376 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test; 377 378 /* 379 * Need to wait for any blocked tasks as well. Note that 380 * additional blocking tasks will also block the expedited GP 381 * until such time as the ->expmask bits are cleared. 382 */ 383 if (rcu_preempt_has_tasks(rnp)) 384 rnp->exp_tasks = rnp->blkd_tasks.next; 385 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 386 387 /* IPI the remaining CPUs for expedited quiescent state. */ 388 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) { 389 unsigned long mask = leaf_node_cpu_bit(rnp, cpu); 390 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 391 392 if (!(mask_ofl_ipi & mask)) 393 continue; 394 retry_ipi: 395 if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) { 396 mask_ofl_test |= mask; 397 continue; 398 } 399 ret = smp_call_function_single(cpu, func, NULL, 0); 400 if (!ret) { 401 mask_ofl_ipi &= ~mask; 402 continue; 403 } 404 /* Failed, raced with CPU hotplug operation. */ 405 raw_spin_lock_irqsave_rcu_node(rnp, flags); 406 if ((rnp->qsmaskinitnext & mask) && 407 (rnp->expmask & mask)) { 408 /* Online, so delay for a bit and try again. */ 409 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 410 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl")); 411 schedule_timeout_uninterruptible(1); 412 goto retry_ipi; 413 } 414 /* CPU really is offline, so we can ignore it. */ 415 if (!(rnp->expmask & mask)) 416 mask_ofl_ipi &= ~mask; 417 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 418 } 419 /* Report quiescent states for those that went offline. */ 420 mask_ofl_test |= mask_ofl_ipi; 421 if (mask_ofl_test) 422 rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false); 423 } 424 425 /* 426 * Select the nodes that the upcoming expedited grace period needs 427 * to wait for. 428 */ 429 static void sync_rcu_exp_select_cpus(smp_call_func_t func) 430 { 431 int cpu; 432 struct rcu_node *rnp; 433 434 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset")); 435 sync_exp_reset_tree(); 436 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select")); 437 438 /* Schedule work for each leaf rcu_node structure. */ 439 rcu_for_each_leaf_node(rnp) { 440 rnp->exp_need_flush = false; 441 if (!READ_ONCE(rnp->expmask)) 442 continue; /* Avoid early boot non-existent wq. */ 443 rnp->rew.rew_func = func; 444 if (!READ_ONCE(rcu_par_gp_wq) || 445 rcu_scheduler_active != RCU_SCHEDULER_RUNNING || 446 rcu_is_last_leaf_node(rnp)) { 447 /* No workqueues yet or last leaf, do direct call. */ 448 sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work); 449 continue; 450 } 451 INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); 452 preempt_disable(); 453 cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1); 454 /* If all offline, queue the work on an unbound CPU. */ 455 if (unlikely(cpu > rnp->grphi - rnp->grplo)) 456 cpu = WORK_CPU_UNBOUND; 457 else 458 cpu += rnp->grplo; 459 queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work); 460 preempt_enable(); 461 rnp->exp_need_flush = true; 462 } 463 464 /* Wait for workqueue jobs (if any) to complete. */ 465 rcu_for_each_leaf_node(rnp) 466 if (rnp->exp_need_flush) 467 flush_work(&rnp->rew.rew_work); 468 } 469 470 static void synchronize_sched_expedited_wait(void) 471 { 472 int cpu; 473 unsigned long jiffies_stall; 474 unsigned long jiffies_start; 475 unsigned long mask; 476 int ndetected; 477 struct rcu_node *rnp; 478 struct rcu_node *rnp_root = rcu_get_root(); 479 int ret; 480 481 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait")); 482 jiffies_stall = rcu_jiffies_till_stall_check(); 483 jiffies_start = jiffies; 484 485 for (;;) { 486 ret = swait_event_timeout_exclusive( 487 rcu_state.expedited_wq, 488 sync_rcu_preempt_exp_done_unlocked(rnp_root), 489 jiffies_stall); 490 if (ret > 0 || sync_rcu_preempt_exp_done_unlocked(rnp_root)) 491 return; 492 WARN_ON(ret < 0); /* workqueues should not be signaled. */ 493 if (rcu_cpu_stall_suppress) 494 continue; 495 panic_on_rcu_stall(); 496 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {", 497 rcu_state.name); 498 ndetected = 0; 499 rcu_for_each_leaf_node(rnp) { 500 ndetected += rcu_print_task_exp_stall(rnp); 501 for_each_leaf_node_possible_cpu(rnp, cpu) { 502 struct rcu_data *rdp; 503 504 mask = leaf_node_cpu_bit(rnp, cpu); 505 if (!(rnp->expmask & mask)) 506 continue; 507 ndetected++; 508 rdp = per_cpu_ptr(&rcu_data, cpu); 509 pr_cont(" %d-%c%c%c", cpu, 510 "O."[!!cpu_online(cpu)], 511 "o."[!!(rdp->grpmask & rnp->expmaskinit)], 512 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]); 513 } 514 } 515 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n", 516 jiffies - jiffies_start, rcu_state.expedited_sequence, 517 rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]); 518 if (ndetected) { 519 pr_err("blocking rcu_node structures:"); 520 rcu_for_each_node_breadth_first(rnp) { 521 if (rnp == rnp_root) 522 continue; /* printed unconditionally */ 523 if (sync_rcu_preempt_exp_done_unlocked(rnp)) 524 continue; 525 pr_cont(" l=%u:%d-%d:%#lx/%c", 526 rnp->level, rnp->grplo, rnp->grphi, 527 rnp->expmask, 528 ".T"[!!rnp->exp_tasks]); 529 } 530 pr_cont("\n"); 531 } 532 rcu_for_each_leaf_node(rnp) { 533 for_each_leaf_node_possible_cpu(rnp, cpu) { 534 mask = leaf_node_cpu_bit(rnp, cpu); 535 if (!(rnp->expmask & mask)) 536 continue; 537 dump_cpu_task(cpu); 538 } 539 } 540 jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3; 541 } 542 } 543 544 /* 545 * Wait for the current expedited grace period to complete, and then 546 * wake up everyone who piggybacked on the just-completed expedited 547 * grace period. Also update all the ->exp_seq_rq counters as needed 548 * in order to avoid counter-wrap problems. 549 */ 550 static void rcu_exp_wait_wake(unsigned long s) 551 { 552 struct rcu_node *rnp; 553 554 synchronize_sched_expedited_wait(); 555 rcu_exp_gp_seq_end(); 556 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end")); 557 558 /* 559 * Switch over to wakeup mode, allowing the next GP, but -only- the 560 * next GP, to proceed. 561 */ 562 mutex_lock(&rcu_state.exp_wake_mutex); 563 564 rcu_for_each_node_breadth_first(rnp) { 565 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) { 566 spin_lock(&rnp->exp_lock); 567 /* Recheck, avoid hang in case someone just arrived. */ 568 if (ULONG_CMP_LT(rnp->exp_seq_rq, s)) 569 rnp->exp_seq_rq = s; 570 spin_unlock(&rnp->exp_lock); 571 } 572 smp_mb(); /* All above changes before wakeup. */ 573 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rcu_state.expedited_sequence) & 0x3]); 574 } 575 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake")); 576 mutex_unlock(&rcu_state.exp_wake_mutex); 577 } 578 579 /* 580 * Common code to drive an expedited grace period forward, used by 581 * workqueues and mid-boot-time tasks. 582 */ 583 static void rcu_exp_sel_wait_wake(smp_call_func_t func, unsigned long s) 584 { 585 /* Initialize the rcu_node tree in preparation for the wait. */ 586 sync_rcu_exp_select_cpus(func); 587 588 /* Wait and clean up, including waking everyone. */ 589 rcu_exp_wait_wake(s); 590 } 591 592 /* 593 * Work-queue handler to drive an expedited grace period forward. 594 */ 595 static void wait_rcu_exp_gp(struct work_struct *wp) 596 { 597 struct rcu_exp_work *rewp; 598 599 rewp = container_of(wp, struct rcu_exp_work, rew_work); 600 rcu_exp_sel_wait_wake(rewp->rew_func, rewp->rew_s); 601 } 602 603 /* 604 * Given a smp_call_function() handler, kick off the specified 605 * implementation of expedited grace period. 606 */ 607 static void _synchronize_rcu_expedited(smp_call_func_t func) 608 { 609 struct rcu_data *rdp; 610 struct rcu_exp_work rew; 611 struct rcu_node *rnp; 612 unsigned long s; 613 614 /* If expedited grace periods are prohibited, fall back to normal. */ 615 if (rcu_gp_is_normal()) { 616 wait_rcu_gp(call_rcu); 617 return; 618 } 619 620 /* Take a snapshot of the sequence number. */ 621 s = rcu_exp_gp_seq_snap(); 622 if (exp_funnel_lock(s)) 623 return; /* Someone else did our work for us. */ 624 625 /* Ensure that load happens before action based on it. */ 626 if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) { 627 /* Direct call during scheduler init and early_initcalls(). */ 628 rcu_exp_sel_wait_wake(func, s); 629 } else { 630 /* Marshall arguments & schedule the expedited grace period. */ 631 rew.rew_func = func; 632 rew.rew_s = s; 633 INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp); 634 queue_work(rcu_gp_wq, &rew.rew_work); 635 } 636 637 /* Wait for expedited grace period to complete. */ 638 rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id()); 639 rnp = rcu_get_root(); 640 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], 641 sync_exp_work_done(s)); 642 smp_mb(); /* Workqueue actions happen before return. */ 643 644 /* Let the next expedited grace period start. */ 645 mutex_unlock(&rcu_state.exp_mutex); 646 } 647 648 #ifdef CONFIG_PREEMPT_RCU 649 650 /* 651 * Remote handler for smp_call_function_single(). If there is an 652 * RCU read-side critical section in effect, request that the 653 * next rcu_read_unlock() record the quiescent state up the 654 * ->expmask fields in the rcu_node tree. Otherwise, immediately 655 * report the quiescent state. 656 */ 657 static void sync_rcu_exp_handler(void *unused) 658 { 659 unsigned long flags; 660 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 661 struct rcu_node *rnp = rdp->mynode; 662 struct task_struct *t = current; 663 664 /* 665 * First, the common case of not being in an RCU read-side 666 * critical section. If also enabled or idle, immediately 667 * report the quiescent state, otherwise defer. 668 */ 669 if (!t->rcu_read_lock_nesting) { 670 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) || 671 rcu_dynticks_curr_cpu_in_eqs()) { 672 rcu_report_exp_rdp(rdp); 673 } else { 674 rdp->deferred_qs = true; 675 set_tsk_need_resched(t); 676 set_preempt_need_resched(); 677 } 678 return; 679 } 680 681 /* 682 * Second, the less-common case of being in an RCU read-side 683 * critical section. In this case we can count on a future 684 * rcu_read_unlock(). However, this rcu_read_unlock() might 685 * execute on some other CPU, but in that case there will be 686 * a future context switch. Either way, if the expedited 687 * grace period is still waiting on this CPU, set ->deferred_qs 688 * so that the eventual quiescent state will be reported. 689 * Note that there is a large group of race conditions that 690 * can have caused this quiescent state to already have been 691 * reported, so we really do need to check ->expmask. 692 */ 693 if (t->rcu_read_lock_nesting > 0) { 694 raw_spin_lock_irqsave_rcu_node(rnp, flags); 695 if (rnp->expmask & rdp->grpmask) { 696 rdp->deferred_qs = true; 697 WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, true); 698 } 699 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 700 } 701 702 /* 703 * The final and least likely case is where the interrupted 704 * code was just about to or just finished exiting the RCU-preempt 705 * read-side critical section, and no, we can't tell which. 706 * So either way, set ->deferred_qs to flag later code that 707 * a quiescent state is required. 708 * 709 * If the CPU is fully enabled (or if some buggy RCU-preempt 710 * read-side critical section is being used from idle), just 711 * invoke rcu_preempt_defer_qs() to immediately report the 712 * quiescent state. We cannot use rcu_read_unlock_special() 713 * because we are in an interrupt handler, which will cause that 714 * function to take an early exit without doing anything. 715 * 716 * Otherwise, force a context switch after the CPU enables everything. 717 */ 718 rdp->deferred_qs = true; 719 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) || 720 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) { 721 rcu_preempt_deferred_qs(t); 722 } else { 723 set_tsk_need_resched(t); 724 set_preempt_need_resched(); 725 } 726 } 727 728 /* PREEMPT=y, so no PREEMPT=n expedited grace period to clean up after. */ 729 static void sync_sched_exp_online_cleanup(int cpu) 730 { 731 } 732 733 /** 734 * synchronize_rcu_expedited - Brute-force RCU grace period 735 * 736 * Wait for an RCU-preempt grace period, but expedite it. The basic 737 * idea is to IPI all non-idle non-nohz online CPUs. The IPI handler 738 * checks whether the CPU is in an RCU-preempt critical section, and 739 * if so, it sets a flag that causes the outermost rcu_read_unlock() 740 * to report the quiescent state. On the other hand, if the CPU is 741 * not in an RCU read-side critical section, the IPI handler reports 742 * the quiescent state immediately. 743 * 744 * Although this is a greate improvement over previous expedited 745 * implementations, it is still unfriendly to real-time workloads, so is 746 * thus not recommended for any sort of common-case code. In fact, if 747 * you are using synchronize_rcu_expedited() in a loop, please restructure 748 * your code to batch your updates, and then Use a single synchronize_rcu() 749 * instead. 750 * 751 * This has the same semantics as (but is more brutal than) synchronize_rcu(). 752 */ 753 void synchronize_rcu_expedited(void) 754 { 755 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 756 lock_is_held(&rcu_lock_map) || 757 lock_is_held(&rcu_sched_lock_map), 758 "Illegal synchronize_rcu_expedited() in RCU read-side critical section"); 759 760 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) 761 return; 762 _synchronize_rcu_expedited(sync_rcu_exp_handler); 763 } 764 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 765 766 #else /* #ifdef CONFIG_PREEMPT_RCU */ 767 768 /* Invoked on each online non-idle CPU for expedited quiescent state. */ 769 static void sync_sched_exp_handler(void *unused) 770 { 771 struct rcu_data *rdp; 772 struct rcu_node *rnp; 773 774 rdp = this_cpu_ptr(&rcu_data); 775 rnp = rdp->mynode; 776 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || 777 __this_cpu_read(rcu_data.cpu_no_qs.b.exp)) 778 return; 779 if (rcu_is_cpu_rrupt_from_idle()) { 780 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); 781 return; 782 } 783 __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true); 784 /* Store .exp before .rcu_urgent_qs. */ 785 smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true); 786 set_tsk_need_resched(current); 787 set_preempt_need_resched(); 788 } 789 790 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */ 791 static void sync_sched_exp_online_cleanup(int cpu) 792 { 793 struct rcu_data *rdp; 794 int ret; 795 struct rcu_node *rnp; 796 797 rdp = per_cpu_ptr(&rcu_data, cpu); 798 rnp = rdp->mynode; 799 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask)) 800 return; 801 ret = smp_call_function_single(cpu, sync_sched_exp_handler, NULL, 0); 802 WARN_ON_ONCE(ret); 803 } 804 805 /* 806 * Because a context switch is a grace period for !PREEMPT, any 807 * blocking grace-period wait automatically implies a grace period if 808 * there is only one CPU online at any point time during execution of 809 * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to 810 * occasionally incorrectly indicate that there are multiple CPUs online 811 * when there was in fact only one the whole time, as this just adds some 812 * overhead: RCU still operates correctly. 813 */ 814 static int rcu_blocking_is_gp(void) 815 { 816 int ret; 817 818 might_sleep(); /* Check for RCU read-side critical section. */ 819 preempt_disable(); 820 ret = num_online_cpus() <= 1; 821 preempt_enable(); 822 return ret; 823 } 824 825 /* PREEMPT=n implementation of synchronize_rcu_expedited(). */ 826 void synchronize_rcu_expedited(void) 827 { 828 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 829 lock_is_held(&rcu_lock_map) || 830 lock_is_held(&rcu_sched_lock_map), 831 "Illegal synchronize_rcu_expedited() in RCU read-side critical section"); 832 833 /* If only one CPU, this is automatically a grace period. */ 834 if (rcu_blocking_is_gp()) 835 return; 836 837 _synchronize_rcu_expedited(sync_sched_exp_handler); 838 } 839 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 840 841 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 842