1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * RCU expedited grace periods 4 * 5 * Copyright IBM Corporation, 2016 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 */ 9 10 #include <linux/lockdep.h> 11 12 static void rcu_exp_handler(void *unused); 13 14 /* 15 * Record the start of an expedited grace period. 16 */ 17 static void rcu_exp_gp_seq_start(void) 18 { 19 rcu_seq_start(&rcu_state.expedited_sequence); 20 } 21 22 /* 23 * Return then value that expedited-grace-period counter will have 24 * at the end of the current grace period. 25 */ 26 static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void) 27 { 28 return rcu_seq_endval(&rcu_state.expedited_sequence); 29 } 30 31 /* 32 * Record the end of an expedited grace period. 33 */ 34 static void rcu_exp_gp_seq_end(void) 35 { 36 rcu_seq_end(&rcu_state.expedited_sequence); 37 smp_mb(); /* Ensure that consecutive grace periods serialize. */ 38 } 39 40 /* 41 * Take a snapshot of the expedited-grace-period counter. 42 */ 43 static unsigned long rcu_exp_gp_seq_snap(void) 44 { 45 unsigned long s; 46 47 smp_mb(); /* Caller's modifications seen first by other CPUs. */ 48 s = rcu_seq_snap(&rcu_state.expedited_sequence); 49 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap")); 50 return s; 51 } 52 53 /* 54 * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true 55 * if a full expedited grace period has elapsed since that snapshot 56 * was taken. 57 */ 58 static bool rcu_exp_gp_seq_done(unsigned long s) 59 { 60 return rcu_seq_done(&rcu_state.expedited_sequence, s); 61 } 62 63 /* 64 * Reset the ->expmaskinit values in the rcu_node tree to reflect any 65 * recent CPU-online activity. Note that these masks are not cleared 66 * when CPUs go offline, so they reflect the union of all CPUs that have 67 * ever been online. This means that this function normally takes its 68 * no-work-to-do fastpath. 69 */ 70 static void sync_exp_reset_tree_hotplug(void) 71 { 72 bool done; 73 unsigned long flags; 74 unsigned long mask; 75 unsigned long oldmask; 76 int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */ 77 struct rcu_node *rnp; 78 struct rcu_node *rnp_up; 79 80 /* If no new CPUs onlined since last time, nothing to do. */ 81 if (likely(ncpus == rcu_state.ncpus_snap)) 82 return; 83 rcu_state.ncpus_snap = ncpus; 84 85 /* 86 * Each pass through the following loop propagates newly onlined 87 * CPUs for the current rcu_node structure up the rcu_node tree. 88 */ 89 rcu_for_each_leaf_node(rnp) { 90 raw_spin_lock_irqsave_rcu_node(rnp, flags); 91 if (rnp->expmaskinit == rnp->expmaskinitnext) { 92 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 93 continue; /* No new CPUs, nothing to do. */ 94 } 95 96 /* Update this node's mask, track old value for propagation. */ 97 oldmask = rnp->expmaskinit; 98 rnp->expmaskinit = rnp->expmaskinitnext; 99 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 100 101 /* If was already nonzero, nothing to propagate. */ 102 if (oldmask) 103 continue; 104 105 /* Propagate the new CPU up the tree. */ 106 mask = rnp->grpmask; 107 rnp_up = rnp->parent; 108 done = false; 109 while (rnp_up) { 110 raw_spin_lock_irqsave_rcu_node(rnp_up, flags); 111 if (rnp_up->expmaskinit) 112 done = true; 113 rnp_up->expmaskinit |= mask; 114 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags); 115 if (done) 116 break; 117 mask = rnp_up->grpmask; 118 rnp_up = rnp_up->parent; 119 } 120 } 121 } 122 123 /* 124 * Reset the ->expmask values in the rcu_node tree in preparation for 125 * a new expedited grace period. 126 */ 127 static void __maybe_unused sync_exp_reset_tree(void) 128 { 129 unsigned long flags; 130 struct rcu_node *rnp; 131 132 sync_exp_reset_tree_hotplug(); 133 rcu_for_each_node_breadth_first(rnp) { 134 raw_spin_lock_irqsave_rcu_node(rnp, flags); 135 WARN_ON_ONCE(rnp->expmask); 136 rnp->expmask = rnp->expmaskinit; 137 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 138 } 139 } 140 141 /* 142 * Return non-zero if there is no RCU expedited grace period in progress 143 * for the specified rcu_node structure, in other words, if all CPUs and 144 * tasks covered by the specified rcu_node structure have done their bit 145 * for the current expedited grace period. Works only for preemptible 146 * RCU -- other RCU implementation use other means. 147 * 148 * Caller must hold the specificed rcu_node structure's ->lock 149 */ 150 static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp) 151 { 152 raw_lockdep_assert_held_rcu_node(rnp); 153 154 return rnp->exp_tasks == NULL && 155 READ_ONCE(rnp->expmask) == 0; 156 } 157 158 /* 159 * Like sync_rcu_preempt_exp_done(), but this function assumes the caller 160 * doesn't hold the rcu_node's ->lock, and will acquire and release the lock 161 * itself 162 */ 163 static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node *rnp) 164 { 165 unsigned long flags; 166 bool ret; 167 168 raw_spin_lock_irqsave_rcu_node(rnp, flags); 169 ret = sync_rcu_preempt_exp_done(rnp); 170 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 171 172 return ret; 173 } 174 175 176 /* 177 * Report the exit from RCU read-side critical section for the last task 178 * that queued itself during or before the current expedited preemptible-RCU 179 * grace period. This event is reported either to the rcu_node structure on 180 * which the task was queued or to one of that rcu_node structure's ancestors, 181 * recursively up the tree. (Calm down, calm down, we do the recursion 182 * iteratively!) 183 * 184 * Caller must hold the specified rcu_node structure's ->lock. 185 */ 186 static void __rcu_report_exp_rnp(struct rcu_node *rnp, 187 bool wake, unsigned long flags) 188 __releases(rnp->lock) 189 { 190 unsigned long mask; 191 192 for (;;) { 193 if (!sync_rcu_preempt_exp_done(rnp)) { 194 if (!rnp->expmask) 195 rcu_initiate_boost(rnp, flags); 196 else 197 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 198 break; 199 } 200 if (rnp->parent == NULL) { 201 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 202 if (wake) { 203 smp_mb(); /* EGP done before wake_up(). */ 204 swake_up_one(&rcu_state.expedited_wq); 205 } 206 break; 207 } 208 mask = rnp->grpmask; 209 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */ 210 rnp = rnp->parent; 211 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */ 212 WARN_ON_ONCE(!(rnp->expmask & mask)); 213 rnp->expmask &= ~mask; 214 } 215 } 216 217 /* 218 * Report expedited quiescent state for specified node. This is a 219 * lock-acquisition wrapper function for __rcu_report_exp_rnp(). 220 */ 221 static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake) 222 { 223 unsigned long flags; 224 225 raw_spin_lock_irqsave_rcu_node(rnp, flags); 226 __rcu_report_exp_rnp(rnp, wake, flags); 227 } 228 229 /* 230 * Report expedited quiescent state for multiple CPUs, all covered by the 231 * specified leaf rcu_node structure. 232 */ 233 static void rcu_report_exp_cpu_mult(struct rcu_node *rnp, 234 unsigned long mask, bool wake) 235 { 236 unsigned long flags; 237 238 raw_spin_lock_irqsave_rcu_node(rnp, flags); 239 if (!(rnp->expmask & mask)) { 240 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 241 return; 242 } 243 rnp->expmask &= ~mask; 244 __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */ 245 } 246 247 /* 248 * Report expedited quiescent state for specified rcu_data (CPU). 249 */ 250 static void rcu_report_exp_rdp(struct rcu_data *rdp) 251 { 252 WRITE_ONCE(rdp->deferred_qs, false); 253 rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true); 254 } 255 256 /* Common code for work-done checking. */ 257 static bool sync_exp_work_done(unsigned long s) 258 { 259 if (rcu_exp_gp_seq_done(s)) { 260 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done")); 261 /* Ensure test happens before caller kfree(). */ 262 smp_mb__before_atomic(); /* ^^^ */ 263 return true; 264 } 265 return false; 266 } 267 268 /* 269 * Funnel-lock acquisition for expedited grace periods. Returns true 270 * if some other task completed an expedited grace period that this task 271 * can piggy-back on, and with no mutex held. Otherwise, returns false 272 * with the mutex held, indicating that the caller must actually do the 273 * expedited grace period. 274 */ 275 static bool exp_funnel_lock(unsigned long s) 276 { 277 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id()); 278 struct rcu_node *rnp = rdp->mynode; 279 struct rcu_node *rnp_root = rcu_get_root(); 280 281 /* Low-contention fastpath. */ 282 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) && 283 (rnp == rnp_root || 284 ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) && 285 mutex_trylock(&rcu_state.exp_mutex)) 286 goto fastpath; 287 288 /* 289 * Each pass through the following loop works its way up 290 * the rcu_node tree, returning if others have done the work or 291 * otherwise falls through to acquire ->exp_mutex. The mapping 292 * from CPU to rcu_node structure can be inexact, as it is just 293 * promoting locality and is not strictly needed for correctness. 294 */ 295 for (; rnp != NULL; rnp = rnp->parent) { 296 if (sync_exp_work_done(s)) 297 return true; 298 299 /* Work not done, either wait here or go up. */ 300 spin_lock(&rnp->exp_lock); 301 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) { 302 303 /* Someone else doing GP, so wait for them. */ 304 spin_unlock(&rnp->exp_lock); 305 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level, 306 rnp->grplo, rnp->grphi, 307 TPS("wait")); 308 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], 309 sync_exp_work_done(s)); 310 return true; 311 } 312 rnp->exp_seq_rq = s; /* Followers can wait on us. */ 313 spin_unlock(&rnp->exp_lock); 314 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level, 315 rnp->grplo, rnp->grphi, TPS("nxtlvl")); 316 } 317 mutex_lock(&rcu_state.exp_mutex); 318 fastpath: 319 if (sync_exp_work_done(s)) { 320 mutex_unlock(&rcu_state.exp_mutex); 321 return true; 322 } 323 rcu_exp_gp_seq_start(); 324 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start")); 325 return false; 326 } 327 328 /* 329 * Select the CPUs within the specified rcu_node that the upcoming 330 * expedited grace period needs to wait for. 331 */ 332 static void sync_rcu_exp_select_node_cpus(struct work_struct *wp) 333 { 334 int cpu; 335 unsigned long flags; 336 unsigned long mask_ofl_test; 337 unsigned long mask_ofl_ipi; 338 int ret; 339 struct rcu_exp_work *rewp = 340 container_of(wp, struct rcu_exp_work, rew_work); 341 struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew); 342 343 raw_spin_lock_irqsave_rcu_node(rnp, flags); 344 345 /* Each pass checks a CPU for identity, offline, and idle. */ 346 mask_ofl_test = 0; 347 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) { 348 unsigned long mask = leaf_node_cpu_bit(rnp, cpu); 349 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 350 int snap; 351 352 if (raw_smp_processor_id() == cpu || 353 !(rnp->qsmaskinitnext & mask)) { 354 mask_ofl_test |= mask; 355 } else { 356 snap = rcu_dynticks_snap(rdp); 357 if (rcu_dynticks_in_eqs(snap)) 358 mask_ofl_test |= mask; 359 else 360 rdp->exp_dynticks_snap = snap; 361 } 362 } 363 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test; 364 365 /* 366 * Need to wait for any blocked tasks as well. Note that 367 * additional blocking tasks will also block the expedited GP 368 * until such time as the ->expmask bits are cleared. 369 */ 370 if (rcu_preempt_has_tasks(rnp)) 371 rnp->exp_tasks = rnp->blkd_tasks.next; 372 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 373 374 /* IPI the remaining CPUs for expedited quiescent state. */ 375 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) { 376 unsigned long mask = leaf_node_cpu_bit(rnp, cpu); 377 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 378 379 if (!(mask_ofl_ipi & mask)) 380 continue; 381 retry_ipi: 382 if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) { 383 mask_ofl_test |= mask; 384 continue; 385 } 386 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0); 387 if (!ret) { 388 mask_ofl_ipi &= ~mask; 389 continue; 390 } 391 /* Failed, raced with CPU hotplug operation. */ 392 raw_spin_lock_irqsave_rcu_node(rnp, flags); 393 if ((rnp->qsmaskinitnext & mask) && 394 (rnp->expmask & mask)) { 395 /* Online, so delay for a bit and try again. */ 396 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 397 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl")); 398 schedule_timeout_uninterruptible(1); 399 goto retry_ipi; 400 } 401 /* CPU really is offline, so we can ignore it. */ 402 if (!(rnp->expmask & mask)) 403 mask_ofl_ipi &= ~mask; 404 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 405 } 406 /* Report quiescent states for those that went offline. */ 407 mask_ofl_test |= mask_ofl_ipi; 408 if (mask_ofl_test) 409 rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false); 410 } 411 412 /* 413 * Select the nodes that the upcoming expedited grace period needs 414 * to wait for. 415 */ 416 static void sync_rcu_exp_select_cpus(void) 417 { 418 int cpu; 419 struct rcu_node *rnp; 420 421 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset")); 422 sync_exp_reset_tree(); 423 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select")); 424 425 /* Schedule work for each leaf rcu_node structure. */ 426 rcu_for_each_leaf_node(rnp) { 427 rnp->exp_need_flush = false; 428 if (!READ_ONCE(rnp->expmask)) 429 continue; /* Avoid early boot non-existent wq. */ 430 if (!READ_ONCE(rcu_par_gp_wq) || 431 rcu_scheduler_active != RCU_SCHEDULER_RUNNING || 432 rcu_is_last_leaf_node(rnp)) { 433 /* No workqueues yet or last leaf, do direct call. */ 434 sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work); 435 continue; 436 } 437 INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); 438 cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1); 439 /* If all offline, queue the work on an unbound CPU. */ 440 if (unlikely(cpu > rnp->grphi - rnp->grplo)) 441 cpu = WORK_CPU_UNBOUND; 442 else 443 cpu += rnp->grplo; 444 queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work); 445 rnp->exp_need_flush = true; 446 } 447 448 /* Wait for workqueue jobs (if any) to complete. */ 449 rcu_for_each_leaf_node(rnp) 450 if (rnp->exp_need_flush) 451 flush_work(&rnp->rew.rew_work); 452 } 453 454 static void synchronize_sched_expedited_wait(void) 455 { 456 int cpu; 457 unsigned long jiffies_stall; 458 unsigned long jiffies_start; 459 unsigned long mask; 460 int ndetected; 461 struct rcu_node *rnp; 462 struct rcu_node *rnp_root = rcu_get_root(); 463 int ret; 464 465 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait")); 466 jiffies_stall = rcu_jiffies_till_stall_check(); 467 jiffies_start = jiffies; 468 469 for (;;) { 470 ret = swait_event_timeout_exclusive( 471 rcu_state.expedited_wq, 472 sync_rcu_preempt_exp_done_unlocked(rnp_root), 473 jiffies_stall); 474 if (ret > 0 || sync_rcu_preempt_exp_done_unlocked(rnp_root)) 475 return; 476 WARN_ON(ret < 0); /* workqueues should not be signaled. */ 477 if (rcu_cpu_stall_suppress) 478 continue; 479 panic_on_rcu_stall(); 480 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {", 481 rcu_state.name); 482 ndetected = 0; 483 rcu_for_each_leaf_node(rnp) { 484 ndetected += rcu_print_task_exp_stall(rnp); 485 for_each_leaf_node_possible_cpu(rnp, cpu) { 486 struct rcu_data *rdp; 487 488 mask = leaf_node_cpu_bit(rnp, cpu); 489 if (!(rnp->expmask & mask)) 490 continue; 491 ndetected++; 492 rdp = per_cpu_ptr(&rcu_data, cpu); 493 pr_cont(" %d-%c%c%c", cpu, 494 "O."[!!cpu_online(cpu)], 495 "o."[!!(rdp->grpmask & rnp->expmaskinit)], 496 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]); 497 } 498 } 499 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n", 500 jiffies - jiffies_start, rcu_state.expedited_sequence, 501 rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]); 502 if (ndetected) { 503 pr_err("blocking rcu_node structures:"); 504 rcu_for_each_node_breadth_first(rnp) { 505 if (rnp == rnp_root) 506 continue; /* printed unconditionally */ 507 if (sync_rcu_preempt_exp_done_unlocked(rnp)) 508 continue; 509 pr_cont(" l=%u:%d-%d:%#lx/%c", 510 rnp->level, rnp->grplo, rnp->grphi, 511 rnp->expmask, 512 ".T"[!!rnp->exp_tasks]); 513 } 514 pr_cont("\n"); 515 } 516 rcu_for_each_leaf_node(rnp) { 517 for_each_leaf_node_possible_cpu(rnp, cpu) { 518 mask = leaf_node_cpu_bit(rnp, cpu); 519 if (!(rnp->expmask & mask)) 520 continue; 521 dump_cpu_task(cpu); 522 } 523 } 524 jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3; 525 } 526 } 527 528 /* 529 * Wait for the current expedited grace period to complete, and then 530 * wake up everyone who piggybacked on the just-completed expedited 531 * grace period. Also update all the ->exp_seq_rq counters as needed 532 * in order to avoid counter-wrap problems. 533 */ 534 static void rcu_exp_wait_wake(unsigned long s) 535 { 536 struct rcu_node *rnp; 537 538 synchronize_sched_expedited_wait(); 539 rcu_exp_gp_seq_end(); 540 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end")); 541 542 /* 543 * Switch over to wakeup mode, allowing the next GP, but -only- the 544 * next GP, to proceed. 545 */ 546 mutex_lock(&rcu_state.exp_wake_mutex); 547 548 rcu_for_each_node_breadth_first(rnp) { 549 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) { 550 spin_lock(&rnp->exp_lock); 551 /* Recheck, avoid hang in case someone just arrived. */ 552 if (ULONG_CMP_LT(rnp->exp_seq_rq, s)) 553 rnp->exp_seq_rq = s; 554 spin_unlock(&rnp->exp_lock); 555 } 556 smp_mb(); /* All above changes before wakeup. */ 557 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rcu_state.expedited_sequence) & 0x3]); 558 } 559 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake")); 560 mutex_unlock(&rcu_state.exp_wake_mutex); 561 } 562 563 /* 564 * Common code to drive an expedited grace period forward, used by 565 * workqueues and mid-boot-time tasks. 566 */ 567 static void rcu_exp_sel_wait_wake(unsigned long s) 568 { 569 /* Initialize the rcu_node tree in preparation for the wait. */ 570 sync_rcu_exp_select_cpus(); 571 572 /* Wait and clean up, including waking everyone. */ 573 rcu_exp_wait_wake(s); 574 } 575 576 /* 577 * Work-queue handler to drive an expedited grace period forward. 578 */ 579 static void wait_rcu_exp_gp(struct work_struct *wp) 580 { 581 struct rcu_exp_work *rewp; 582 583 rewp = container_of(wp, struct rcu_exp_work, rew_work); 584 rcu_exp_sel_wait_wake(rewp->rew_s); 585 } 586 587 #ifdef CONFIG_PREEMPT_RCU 588 589 /* 590 * Remote handler for smp_call_function_single(). If there is an 591 * RCU read-side critical section in effect, request that the 592 * next rcu_read_unlock() record the quiescent state up the 593 * ->expmask fields in the rcu_node tree. Otherwise, immediately 594 * report the quiescent state. 595 */ 596 static void rcu_exp_handler(void *unused) 597 { 598 unsigned long flags; 599 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 600 struct rcu_node *rnp = rdp->mynode; 601 struct task_struct *t = current; 602 603 /* 604 * First, the common case of not being in an RCU read-side 605 * critical section. If also enabled or idle, immediately 606 * report the quiescent state, otherwise defer. 607 */ 608 if (!t->rcu_read_lock_nesting) { 609 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) || 610 rcu_dynticks_curr_cpu_in_eqs()) { 611 rcu_report_exp_rdp(rdp); 612 } else { 613 rdp->deferred_qs = true; 614 set_tsk_need_resched(t); 615 set_preempt_need_resched(); 616 } 617 return; 618 } 619 620 /* 621 * Second, the less-common case of being in an RCU read-side 622 * critical section. In this case we can count on a future 623 * rcu_read_unlock(). However, this rcu_read_unlock() might 624 * execute on some other CPU, but in that case there will be 625 * a future context switch. Either way, if the expedited 626 * grace period is still waiting on this CPU, set ->deferred_qs 627 * so that the eventual quiescent state will be reported. 628 * Note that there is a large group of race conditions that 629 * can have caused this quiescent state to already have been 630 * reported, so we really do need to check ->expmask. 631 */ 632 if (t->rcu_read_lock_nesting > 0) { 633 raw_spin_lock_irqsave_rcu_node(rnp, flags); 634 if (rnp->expmask & rdp->grpmask) { 635 rdp->deferred_qs = true; 636 WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, true); 637 } 638 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 639 return; 640 } 641 642 /* 643 * The final and least likely case is where the interrupted 644 * code was just about to or just finished exiting the RCU-preempt 645 * read-side critical section, and no, we can't tell which. 646 * So either way, set ->deferred_qs to flag later code that 647 * a quiescent state is required. 648 * 649 * If the CPU is fully enabled (or if some buggy RCU-preempt 650 * read-side critical section is being used from idle), just 651 * invoke rcu_preempt_defer_qs() to immediately report the 652 * quiescent state. We cannot use rcu_read_unlock_special() 653 * because we are in an interrupt handler, which will cause that 654 * function to take an early exit without doing anything. 655 * 656 * Otherwise, force a context switch after the CPU enables everything. 657 */ 658 rdp->deferred_qs = true; 659 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) || 660 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) { 661 rcu_preempt_deferred_qs(t); 662 } else { 663 set_tsk_need_resched(t); 664 set_preempt_need_resched(); 665 } 666 } 667 668 /* PREEMPT=y, so no PREEMPT=n expedited grace period to clean up after. */ 669 static void sync_sched_exp_online_cleanup(int cpu) 670 { 671 } 672 673 #else /* #ifdef CONFIG_PREEMPT_RCU */ 674 675 /* Invoked on each online non-idle CPU for expedited quiescent state. */ 676 static void rcu_exp_handler(void *unused) 677 { 678 struct rcu_data *rdp; 679 struct rcu_node *rnp; 680 681 rdp = this_cpu_ptr(&rcu_data); 682 rnp = rdp->mynode; 683 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || 684 __this_cpu_read(rcu_data.cpu_no_qs.b.exp)) 685 return; 686 if (rcu_is_cpu_rrupt_from_idle()) { 687 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); 688 return; 689 } 690 __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true); 691 /* Store .exp before .rcu_urgent_qs. */ 692 smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true); 693 set_tsk_need_resched(current); 694 set_preempt_need_resched(); 695 } 696 697 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */ 698 static void sync_sched_exp_online_cleanup(int cpu) 699 { 700 struct rcu_data *rdp; 701 int ret; 702 struct rcu_node *rnp; 703 704 rdp = per_cpu_ptr(&rcu_data, cpu); 705 rnp = rdp->mynode; 706 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask)) 707 return; 708 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0); 709 WARN_ON_ONCE(ret); 710 } 711 712 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 713 714 /** 715 * synchronize_rcu_expedited - Brute-force RCU grace period 716 * 717 * Wait for an RCU grace period, but expedite it. The basic idea is to 718 * IPI all non-idle non-nohz online CPUs. The IPI handler checks whether 719 * the CPU is in an RCU critical section, and if so, it sets a flag that 720 * causes the outermost rcu_read_unlock() to report the quiescent state 721 * for RCU-preempt or asks the scheduler for help for RCU-sched. On the 722 * other hand, if the CPU is not in an RCU read-side critical section, 723 * the IPI handler reports the quiescent state immediately. 724 * 725 * Although this is a greate improvement over previous expedited 726 * implementations, it is still unfriendly to real-time workloads, so is 727 * thus not recommended for any sort of common-case code. In fact, if 728 * you are using synchronize_rcu_expedited() in a loop, please restructure 729 * your code to batch your updates, and then Use a single synchronize_rcu() 730 * instead. 731 * 732 * This has the same semantics as (but is more brutal than) synchronize_rcu(). 733 */ 734 void synchronize_rcu_expedited(void) 735 { 736 struct rcu_data *rdp; 737 struct rcu_exp_work rew; 738 struct rcu_node *rnp; 739 unsigned long s; 740 741 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 742 lock_is_held(&rcu_lock_map) || 743 lock_is_held(&rcu_sched_lock_map), 744 "Illegal synchronize_rcu_expedited() in RCU read-side critical section"); 745 746 /* Is the state is such that the call is a grace period? */ 747 if (rcu_blocking_is_gp()) 748 return; 749 750 /* If expedited grace periods are prohibited, fall back to normal. */ 751 if (rcu_gp_is_normal()) { 752 wait_rcu_gp(call_rcu); 753 return; 754 } 755 756 /* Take a snapshot of the sequence number. */ 757 s = rcu_exp_gp_seq_snap(); 758 if (exp_funnel_lock(s)) 759 return; /* Someone else did our work for us. */ 760 761 /* Ensure that load happens before action based on it. */ 762 if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) { 763 /* Direct call during scheduler init and early_initcalls(). */ 764 rcu_exp_sel_wait_wake(s); 765 } else { 766 /* Marshall arguments & schedule the expedited grace period. */ 767 rew.rew_s = s; 768 INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp); 769 queue_work(rcu_gp_wq, &rew.rew_work); 770 } 771 772 /* Wait for expedited grace period to complete. */ 773 rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id()); 774 rnp = rcu_get_root(); 775 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], 776 sync_exp_work_done(s)); 777 smp_mb(); /* Workqueue actions happen before return. */ 778 779 /* Let the next expedited grace period start. */ 780 mutex_unlock(&rcu_state.exp_mutex); 781 } 782 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 783