1 /* 2 * RCU expedited grace periods 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright IBM Corporation, 2016 19 * 20 * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 21 */ 22 23 /* 24 * Record the start of an expedited grace period. 25 */ 26 static void rcu_exp_gp_seq_start(struct rcu_state *rsp) 27 { 28 rcu_seq_start(&rsp->expedited_sequence); 29 } 30 31 /* 32 * Record the end of an expedited grace period. 33 */ 34 static void rcu_exp_gp_seq_end(struct rcu_state *rsp) 35 { 36 rcu_seq_end(&rsp->expedited_sequence); 37 smp_mb(); /* Ensure that consecutive grace periods serialize. */ 38 } 39 40 /* 41 * Take a snapshot of the expedited-grace-period counter. 42 */ 43 static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp) 44 { 45 unsigned long s; 46 47 smp_mb(); /* Caller's modifications seen first by other CPUs. */ 48 s = rcu_seq_snap(&rsp->expedited_sequence); 49 trace_rcu_exp_grace_period(rsp->name, s, TPS("snap")); 50 return s; 51 } 52 53 /* 54 * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true 55 * if a full expedited grace period has elapsed since that snapshot 56 * was taken. 57 */ 58 static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s) 59 { 60 return rcu_seq_done(&rsp->expedited_sequence, s); 61 } 62 63 /* 64 * Reset the ->expmaskinit values in the rcu_node tree to reflect any 65 * recent CPU-online activity. Note that these masks are not cleared 66 * when CPUs go offline, so they reflect the union of all CPUs that have 67 * ever been online. This means that this function normally takes its 68 * no-work-to-do fastpath. 69 */ 70 static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp) 71 { 72 bool done; 73 unsigned long flags; 74 unsigned long mask; 75 unsigned long oldmask; 76 int ncpus = READ_ONCE(rsp->ncpus); 77 struct rcu_node *rnp; 78 struct rcu_node *rnp_up; 79 80 /* If no new CPUs onlined since last time, nothing to do. */ 81 if (likely(ncpus == rsp->ncpus_snap)) 82 return; 83 rsp->ncpus_snap = ncpus; 84 85 /* 86 * Each pass through the following loop propagates newly onlined 87 * CPUs for the current rcu_node structure up the rcu_node tree. 88 */ 89 rcu_for_each_leaf_node(rsp, rnp) { 90 raw_spin_lock_irqsave_rcu_node(rnp, flags); 91 if (rnp->expmaskinit == rnp->expmaskinitnext) { 92 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 93 continue; /* No new CPUs, nothing to do. */ 94 } 95 96 /* Update this node's mask, track old value for propagation. */ 97 oldmask = rnp->expmaskinit; 98 rnp->expmaskinit = rnp->expmaskinitnext; 99 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 100 101 /* If was already nonzero, nothing to propagate. */ 102 if (oldmask) 103 continue; 104 105 /* Propagate the new CPU up the tree. */ 106 mask = rnp->grpmask; 107 rnp_up = rnp->parent; 108 done = false; 109 while (rnp_up) { 110 raw_spin_lock_irqsave_rcu_node(rnp_up, flags); 111 if (rnp_up->expmaskinit) 112 done = true; 113 rnp_up->expmaskinit |= mask; 114 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags); 115 if (done) 116 break; 117 mask = rnp_up->grpmask; 118 rnp_up = rnp_up->parent; 119 } 120 } 121 } 122 123 /* 124 * Reset the ->expmask values in the rcu_node tree in preparation for 125 * a new expedited grace period. 126 */ 127 static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp) 128 { 129 unsigned long flags; 130 struct rcu_node *rnp; 131 132 sync_exp_reset_tree_hotplug(rsp); 133 rcu_for_each_node_breadth_first(rsp, rnp) { 134 raw_spin_lock_irqsave_rcu_node(rnp, flags); 135 WARN_ON_ONCE(rnp->expmask); 136 rnp->expmask = rnp->expmaskinit; 137 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 138 } 139 } 140 141 /* 142 * Return non-zero if there is no RCU expedited grace period in progress 143 * for the specified rcu_node structure, in other words, if all CPUs and 144 * tasks covered by the specified rcu_node structure have done their bit 145 * for the current expedited grace period. Works only for preemptible 146 * RCU -- other RCU implementation use other means. 147 * 148 * Caller must hold the rcu_state's exp_mutex. 149 */ 150 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) 151 { 152 return rnp->exp_tasks == NULL && 153 READ_ONCE(rnp->expmask) == 0; 154 } 155 156 /* 157 * Report the exit from RCU read-side critical section for the last task 158 * that queued itself during or before the current expedited preemptible-RCU 159 * grace period. This event is reported either to the rcu_node structure on 160 * which the task was queued or to one of that rcu_node structure's ancestors, 161 * recursively up the tree. (Calm down, calm down, we do the recursion 162 * iteratively!) 163 * 164 * Caller must hold the rcu_state's exp_mutex and the specified rcu_node 165 * structure's ->lock. 166 */ 167 static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, 168 bool wake, unsigned long flags) 169 __releases(rnp->lock) 170 { 171 unsigned long mask; 172 173 for (;;) { 174 if (!sync_rcu_preempt_exp_done(rnp)) { 175 if (!rnp->expmask) 176 rcu_initiate_boost(rnp, flags); 177 else 178 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 179 break; 180 } 181 if (rnp->parent == NULL) { 182 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 183 if (wake) { 184 smp_mb(); /* EGP done before wake_up(). */ 185 swake_up(&rsp->expedited_wq); 186 } 187 break; 188 } 189 mask = rnp->grpmask; 190 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */ 191 rnp = rnp->parent; 192 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */ 193 WARN_ON_ONCE(!(rnp->expmask & mask)); 194 rnp->expmask &= ~mask; 195 } 196 } 197 198 /* 199 * Report expedited quiescent state for specified node. This is a 200 * lock-acquisition wrapper function for __rcu_report_exp_rnp(). 201 * 202 * Caller must hold the rcu_state's exp_mutex. 203 */ 204 static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp, 205 struct rcu_node *rnp, bool wake) 206 { 207 unsigned long flags; 208 209 raw_spin_lock_irqsave_rcu_node(rnp, flags); 210 __rcu_report_exp_rnp(rsp, rnp, wake, flags); 211 } 212 213 /* 214 * Report expedited quiescent state for multiple CPUs, all covered by the 215 * specified leaf rcu_node structure. Caller must hold the rcu_state's 216 * exp_mutex. 217 */ 218 static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp, 219 unsigned long mask, bool wake) 220 { 221 unsigned long flags; 222 223 raw_spin_lock_irqsave_rcu_node(rnp, flags); 224 if (!(rnp->expmask & mask)) { 225 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 226 return; 227 } 228 rnp->expmask &= ~mask; 229 __rcu_report_exp_rnp(rsp, rnp, wake, flags); /* Releases rnp->lock. */ 230 } 231 232 /* 233 * Report expedited quiescent state for specified rcu_data (CPU). 234 */ 235 static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp, 236 bool wake) 237 { 238 rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake); 239 } 240 241 /* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */ 242 static bool sync_exp_work_done(struct rcu_state *rsp, atomic_long_t *stat, 243 unsigned long s) 244 { 245 if (rcu_exp_gp_seq_done(rsp, s)) { 246 trace_rcu_exp_grace_period(rsp->name, s, TPS("done")); 247 /* Ensure test happens before caller kfree(). */ 248 smp_mb__before_atomic(); /* ^^^ */ 249 atomic_long_inc(stat); 250 return true; 251 } 252 return false; 253 } 254 255 /* 256 * Funnel-lock acquisition for expedited grace periods. Returns true 257 * if some other task completed an expedited grace period that this task 258 * can piggy-back on, and with no mutex held. Otherwise, returns false 259 * with the mutex held, indicating that the caller must actually do the 260 * expedited grace period. 261 */ 262 static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s) 263 { 264 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id()); 265 struct rcu_node *rnp = rdp->mynode; 266 struct rcu_node *rnp_root = rcu_get_root(rsp); 267 268 /* Low-contention fastpath. */ 269 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) && 270 (rnp == rnp_root || 271 ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) && 272 mutex_trylock(&rsp->exp_mutex)) 273 goto fastpath; 274 275 /* 276 * Each pass through the following loop works its way up 277 * the rcu_node tree, returning if others have done the work or 278 * otherwise falls through to acquire rsp->exp_mutex. The mapping 279 * from CPU to rcu_node structure can be inexact, as it is just 280 * promoting locality and is not strictly needed for correctness. 281 */ 282 for (; rnp != NULL; rnp = rnp->parent) { 283 if (sync_exp_work_done(rsp, &rdp->exp_workdone1, s)) 284 return true; 285 286 /* Work not done, either wait here or go up. */ 287 spin_lock(&rnp->exp_lock); 288 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) { 289 290 /* Someone else doing GP, so wait for them. */ 291 spin_unlock(&rnp->exp_lock); 292 trace_rcu_exp_funnel_lock(rsp->name, rnp->level, 293 rnp->grplo, rnp->grphi, 294 TPS("wait")); 295 wait_event(rnp->exp_wq[(s >> 1) & 0x3], 296 sync_exp_work_done(rsp, 297 &rdp->exp_workdone2, s)); 298 return true; 299 } 300 rnp->exp_seq_rq = s; /* Followers can wait on us. */ 301 spin_unlock(&rnp->exp_lock); 302 trace_rcu_exp_funnel_lock(rsp->name, rnp->level, rnp->grplo, 303 rnp->grphi, TPS("nxtlvl")); 304 } 305 mutex_lock(&rsp->exp_mutex); 306 fastpath: 307 if (sync_exp_work_done(rsp, &rdp->exp_workdone3, s)) { 308 mutex_unlock(&rsp->exp_mutex); 309 return true; 310 } 311 rcu_exp_gp_seq_start(rsp); 312 trace_rcu_exp_grace_period(rsp->name, s, TPS("start")); 313 return false; 314 } 315 316 /* Invoked on each online non-idle CPU for expedited quiescent state. */ 317 static void sync_sched_exp_handler(void *data) 318 { 319 struct rcu_data *rdp; 320 struct rcu_node *rnp; 321 struct rcu_state *rsp = data; 322 323 rdp = this_cpu_ptr(rsp->rda); 324 rnp = rdp->mynode; 325 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || 326 __this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) 327 return; 328 if (rcu_is_cpu_rrupt_from_idle()) { 329 rcu_report_exp_rdp(&rcu_sched_state, 330 this_cpu_ptr(&rcu_sched_data), true); 331 return; 332 } 333 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true); 334 resched_cpu(smp_processor_id()); 335 } 336 337 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */ 338 static void sync_sched_exp_online_cleanup(int cpu) 339 { 340 struct rcu_data *rdp; 341 int ret; 342 struct rcu_node *rnp; 343 struct rcu_state *rsp = &rcu_sched_state; 344 345 rdp = per_cpu_ptr(rsp->rda, cpu); 346 rnp = rdp->mynode; 347 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask)) 348 return; 349 ret = smp_call_function_single(cpu, sync_sched_exp_handler, rsp, 0); 350 WARN_ON_ONCE(ret); 351 } 352 353 /* 354 * Select the nodes that the upcoming expedited grace period needs 355 * to wait for. 356 */ 357 static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, 358 smp_call_func_t func) 359 { 360 int cpu; 361 unsigned long flags; 362 unsigned long mask_ofl_test; 363 unsigned long mask_ofl_ipi; 364 int ret; 365 struct rcu_node *rnp; 366 367 sync_exp_reset_tree(rsp); 368 rcu_for_each_leaf_node(rsp, rnp) { 369 raw_spin_lock_irqsave_rcu_node(rnp, flags); 370 371 /* Each pass checks a CPU for identity, offline, and idle. */ 372 mask_ofl_test = 0; 373 for_each_leaf_node_possible_cpu(rnp, cpu) { 374 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 375 376 rdp->exp_dynticks_snap = 377 rcu_dynticks_snap(rdp->dynticks); 378 if (raw_smp_processor_id() == cpu || 379 rcu_dynticks_in_eqs(rdp->exp_dynticks_snap) || 380 !(rnp->qsmaskinitnext & rdp->grpmask)) 381 mask_ofl_test |= rdp->grpmask; 382 } 383 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test; 384 385 /* 386 * Need to wait for any blocked tasks as well. Note that 387 * additional blocking tasks will also block the expedited 388 * GP until such time as the ->expmask bits are cleared. 389 */ 390 if (rcu_preempt_has_tasks(rnp)) 391 rnp->exp_tasks = rnp->blkd_tasks.next; 392 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 393 394 /* IPI the remaining CPUs for expedited quiescent state. */ 395 for_each_leaf_node_possible_cpu(rnp, cpu) { 396 unsigned long mask = leaf_node_cpu_bit(rnp, cpu); 397 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 398 399 if (!(mask_ofl_ipi & mask)) 400 continue; 401 retry_ipi: 402 if (rcu_dynticks_in_eqs_since(rdp->dynticks, 403 rdp->exp_dynticks_snap)) { 404 mask_ofl_test |= mask; 405 continue; 406 } 407 ret = smp_call_function_single(cpu, func, rsp, 0); 408 if (!ret) { 409 mask_ofl_ipi &= ~mask; 410 continue; 411 } 412 /* Failed, raced with CPU hotplug operation. */ 413 raw_spin_lock_irqsave_rcu_node(rnp, flags); 414 if ((rnp->qsmaskinitnext & mask) && 415 (rnp->expmask & mask)) { 416 /* Online, so delay for a bit and try again. */ 417 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 418 schedule_timeout_uninterruptible(1); 419 goto retry_ipi; 420 } 421 /* CPU really is offline, so we can ignore it. */ 422 if (!(rnp->expmask & mask)) 423 mask_ofl_ipi &= ~mask; 424 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 425 } 426 /* Report quiescent states for those that went offline. */ 427 mask_ofl_test |= mask_ofl_ipi; 428 if (mask_ofl_test) 429 rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false); 430 } 431 } 432 433 static void synchronize_sched_expedited_wait(struct rcu_state *rsp) 434 { 435 int cpu; 436 unsigned long jiffies_stall; 437 unsigned long jiffies_start; 438 unsigned long mask; 439 int ndetected; 440 struct rcu_node *rnp; 441 struct rcu_node *rnp_root = rcu_get_root(rsp); 442 int ret; 443 444 jiffies_stall = rcu_jiffies_till_stall_check(); 445 jiffies_start = jiffies; 446 447 for (;;) { 448 ret = swait_event_timeout( 449 rsp->expedited_wq, 450 sync_rcu_preempt_exp_done(rnp_root), 451 jiffies_stall); 452 if (ret > 0 || sync_rcu_preempt_exp_done(rnp_root)) 453 return; 454 WARN_ON(ret < 0); /* workqueues should not be signaled. */ 455 if (rcu_cpu_stall_suppress) 456 continue; 457 panic_on_rcu_stall(); 458 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {", 459 rsp->name); 460 ndetected = 0; 461 rcu_for_each_leaf_node(rsp, rnp) { 462 ndetected += rcu_print_task_exp_stall(rnp); 463 for_each_leaf_node_possible_cpu(rnp, cpu) { 464 struct rcu_data *rdp; 465 466 mask = leaf_node_cpu_bit(rnp, cpu); 467 if (!(rnp->expmask & mask)) 468 continue; 469 ndetected++; 470 rdp = per_cpu_ptr(rsp->rda, cpu); 471 pr_cont(" %d-%c%c%c", cpu, 472 "O."[!!cpu_online(cpu)], 473 "o."[!!(rdp->grpmask & rnp->expmaskinit)], 474 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]); 475 } 476 } 477 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n", 478 jiffies - jiffies_start, rsp->expedited_sequence, 479 rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]); 480 if (ndetected) { 481 pr_err("blocking rcu_node structures:"); 482 rcu_for_each_node_breadth_first(rsp, rnp) { 483 if (rnp == rnp_root) 484 continue; /* printed unconditionally */ 485 if (sync_rcu_preempt_exp_done(rnp)) 486 continue; 487 pr_cont(" l=%u:%d-%d:%#lx/%c", 488 rnp->level, rnp->grplo, rnp->grphi, 489 rnp->expmask, 490 ".T"[!!rnp->exp_tasks]); 491 } 492 pr_cont("\n"); 493 } 494 rcu_for_each_leaf_node(rsp, rnp) { 495 for_each_leaf_node_possible_cpu(rnp, cpu) { 496 mask = leaf_node_cpu_bit(rnp, cpu); 497 if (!(rnp->expmask & mask)) 498 continue; 499 dump_cpu_task(cpu); 500 } 501 } 502 jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3; 503 } 504 } 505 506 /* 507 * Wait for the current expedited grace period to complete, and then 508 * wake up everyone who piggybacked on the just-completed expedited 509 * grace period. Also update all the ->exp_seq_rq counters as needed 510 * in order to avoid counter-wrap problems. 511 */ 512 static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s) 513 { 514 struct rcu_node *rnp; 515 516 synchronize_sched_expedited_wait(rsp); 517 rcu_exp_gp_seq_end(rsp); 518 trace_rcu_exp_grace_period(rsp->name, s, TPS("end")); 519 520 /* 521 * Switch over to wakeup mode, allowing the next GP, but -only- the 522 * next GP, to proceed. 523 */ 524 mutex_lock(&rsp->exp_wake_mutex); 525 526 rcu_for_each_node_breadth_first(rsp, rnp) { 527 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) { 528 spin_lock(&rnp->exp_lock); 529 /* Recheck, avoid hang in case someone just arrived. */ 530 if (ULONG_CMP_LT(rnp->exp_seq_rq, s)) 531 rnp->exp_seq_rq = s; 532 spin_unlock(&rnp->exp_lock); 533 } 534 wake_up_all(&rnp->exp_wq[(rsp->expedited_sequence >> 1) & 0x3]); 535 } 536 trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake")); 537 mutex_unlock(&rsp->exp_wake_mutex); 538 } 539 540 /* Let the workqueue handler know what it is supposed to do. */ 541 struct rcu_exp_work { 542 smp_call_func_t rew_func; 543 struct rcu_state *rew_rsp; 544 unsigned long rew_s; 545 struct work_struct rew_work; 546 }; 547 548 /* 549 * Common code to drive an expedited grace period forward, used by 550 * workqueues and mid-boot-time tasks. 551 */ 552 static void rcu_exp_sel_wait_wake(struct rcu_state *rsp, 553 smp_call_func_t func, unsigned long s) 554 { 555 /* Initialize the rcu_node tree in preparation for the wait. */ 556 sync_rcu_exp_select_cpus(rsp, func); 557 558 /* Wait and clean up, including waking everyone. */ 559 rcu_exp_wait_wake(rsp, s); 560 } 561 562 /* 563 * Work-queue handler to drive an expedited grace period forward. 564 */ 565 static void wait_rcu_exp_gp(struct work_struct *wp) 566 { 567 struct rcu_exp_work *rewp; 568 569 rewp = container_of(wp, struct rcu_exp_work, rew_work); 570 rcu_exp_sel_wait_wake(rewp->rew_rsp, rewp->rew_func, rewp->rew_s); 571 } 572 573 /* 574 * Given an rcu_state pointer and a smp_call_function() handler, kick 575 * off the specified flavor of expedited grace period. 576 */ 577 static void _synchronize_rcu_expedited(struct rcu_state *rsp, 578 smp_call_func_t func) 579 { 580 struct rcu_data *rdp; 581 struct rcu_exp_work rew; 582 struct rcu_node *rnp; 583 unsigned long s; 584 585 /* If expedited grace periods are prohibited, fall back to normal. */ 586 if (rcu_gp_is_normal()) { 587 wait_rcu_gp(rsp->call); 588 return; 589 } 590 591 /* Take a snapshot of the sequence number. */ 592 s = rcu_exp_gp_seq_snap(rsp); 593 if (exp_funnel_lock(rsp, s)) 594 return; /* Someone else did our work for us. */ 595 596 /* Ensure that load happens before action based on it. */ 597 if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) { 598 /* Direct call during scheduler init and early_initcalls(). */ 599 rcu_exp_sel_wait_wake(rsp, func, s); 600 } else { 601 /* Marshall arguments & schedule the expedited grace period. */ 602 rew.rew_func = func; 603 rew.rew_rsp = rsp; 604 rew.rew_s = s; 605 INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp); 606 schedule_work(&rew.rew_work); 607 } 608 609 /* Wait for expedited grace period to complete. */ 610 rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id()); 611 rnp = rcu_get_root(rsp); 612 wait_event(rnp->exp_wq[(s >> 1) & 0x3], 613 sync_exp_work_done(rsp, 614 &rdp->exp_workdone0, s)); 615 616 /* Let the next expedited grace period start. */ 617 mutex_unlock(&rsp->exp_mutex); 618 } 619 620 /** 621 * synchronize_sched_expedited - Brute-force RCU-sched grace period 622 * 623 * Wait for an RCU-sched grace period to elapse, but use a "big hammer" 624 * approach to force the grace period to end quickly. This consumes 625 * significant time on all CPUs and is unfriendly to real-time workloads, 626 * so is thus not recommended for any sort of common-case code. In fact, 627 * if you are using synchronize_sched_expedited() in a loop, please 628 * restructure your code to batch your updates, and then use a single 629 * synchronize_sched() instead. 630 * 631 * This implementation can be thought of as an application of sequence 632 * locking to expedited grace periods, but using the sequence counter to 633 * determine when someone else has already done the work instead of for 634 * retrying readers. 635 */ 636 void synchronize_sched_expedited(void) 637 { 638 struct rcu_state *rsp = &rcu_sched_state; 639 640 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 641 lock_is_held(&rcu_lock_map) || 642 lock_is_held(&rcu_sched_lock_map), 643 "Illegal synchronize_sched_expedited() in RCU read-side critical section"); 644 645 /* If only one CPU, this is automatically a grace period. */ 646 if (rcu_blocking_is_gp()) 647 return; 648 649 _synchronize_rcu_expedited(rsp, sync_sched_exp_handler); 650 } 651 EXPORT_SYMBOL_GPL(synchronize_sched_expedited); 652 653 #ifdef CONFIG_PREEMPT_RCU 654 655 /* 656 * Remote handler for smp_call_function_single(). If there is an 657 * RCU read-side critical section in effect, request that the 658 * next rcu_read_unlock() record the quiescent state up the 659 * ->expmask fields in the rcu_node tree. Otherwise, immediately 660 * report the quiescent state. 661 */ 662 static void sync_rcu_exp_handler(void *info) 663 { 664 struct rcu_data *rdp; 665 struct rcu_state *rsp = info; 666 struct task_struct *t = current; 667 668 /* 669 * Within an RCU read-side critical section, request that the next 670 * rcu_read_unlock() report. Unless this RCU read-side critical 671 * section has already blocked, in which case it is already set 672 * up for the expedited grace period to wait on it. 673 */ 674 if (t->rcu_read_lock_nesting > 0 && 675 !t->rcu_read_unlock_special.b.blocked) { 676 t->rcu_read_unlock_special.b.exp_need_qs = true; 677 return; 678 } 679 680 /* 681 * We are either exiting an RCU read-side critical section (negative 682 * values of t->rcu_read_lock_nesting) or are not in one at all 683 * (zero value of t->rcu_read_lock_nesting). Or we are in an RCU 684 * read-side critical section that blocked before this expedited 685 * grace period started. Either way, we can immediately report 686 * the quiescent state. 687 */ 688 rdp = this_cpu_ptr(rsp->rda); 689 rcu_report_exp_rdp(rsp, rdp, true); 690 } 691 692 /** 693 * synchronize_rcu_expedited - Brute-force RCU grace period 694 * 695 * Wait for an RCU-preempt grace period, but expedite it. The basic 696 * idea is to IPI all non-idle non-nohz online CPUs. The IPI handler 697 * checks whether the CPU is in an RCU-preempt critical section, and 698 * if so, it sets a flag that causes the outermost rcu_read_unlock() 699 * to report the quiescent state. On the other hand, if the CPU is 700 * not in an RCU read-side critical section, the IPI handler reports 701 * the quiescent state immediately. 702 * 703 * Although this is a greate improvement over previous expedited 704 * implementations, it is still unfriendly to real-time workloads, so is 705 * thus not recommended for any sort of common-case code. In fact, if 706 * you are using synchronize_rcu_expedited() in a loop, please restructure 707 * your code to batch your updates, and then Use a single synchronize_rcu() 708 * instead. 709 */ 710 void synchronize_rcu_expedited(void) 711 { 712 struct rcu_state *rsp = rcu_state_p; 713 714 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 715 lock_is_held(&rcu_lock_map) || 716 lock_is_held(&rcu_sched_lock_map), 717 "Illegal synchronize_rcu_expedited() in RCU read-side critical section"); 718 719 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) 720 return; 721 _synchronize_rcu_expedited(rsp, sync_rcu_exp_handler); 722 } 723 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 724 725 #else /* #ifdef CONFIG_PREEMPT_RCU */ 726 727 /* 728 * Wait for an rcu-preempt grace period, but make it happen quickly. 729 * But because preemptible RCU does not exist, map to rcu-sched. 730 */ 731 void synchronize_rcu_expedited(void) 732 { 733 synchronize_sched_expedited(); 734 } 735 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 736 737 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 738 739 /* 740 * Switch to run-time mode once Tree RCU has fully initialized. 741 */ 742 static int __init rcu_exp_runtime_mode(void) 743 { 744 rcu_test_sync_prims(); 745 rcu_scheduler_active = RCU_SCHEDULER_RUNNING; 746 rcu_test_sync_prims(); 747 return 0; 748 } 749 core_initcall(rcu_exp_runtime_mode); 750