1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * RCU expedited grace periods 4 * 5 * Copyright IBM Corporation, 2016 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 */ 9 10 #include <linux/lockdep.h> 11 12 static void rcu_exp_handler(void *unused); 13 static int rcu_print_task_exp_stall(struct rcu_node *rnp); 14 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp); 15 16 /* 17 * Record the start of an expedited grace period. 18 */ 19 static void rcu_exp_gp_seq_start(void) 20 { 21 rcu_seq_start(&rcu_state.expedited_sequence); 22 rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_exp_snap); 23 } 24 25 /* 26 * Return the value that the expedited-grace-period counter will have 27 * at the end of the current grace period. 28 */ 29 static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void) 30 { 31 return rcu_seq_endval(&rcu_state.expedited_sequence); 32 } 33 34 /* 35 * Record the end of an expedited grace period. 36 */ 37 static void rcu_exp_gp_seq_end(void) 38 { 39 rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_exp_snap); 40 rcu_seq_end(&rcu_state.expedited_sequence); 41 smp_mb(); /* Ensure that consecutive grace periods serialize. */ 42 } 43 44 /* 45 * Take a snapshot of the expedited-grace-period counter, which is the 46 * earliest value that will indicate that a full grace period has 47 * elapsed since the current time. 48 */ 49 static unsigned long rcu_exp_gp_seq_snap(void) 50 { 51 unsigned long s; 52 53 smp_mb(); /* Caller's modifications seen first by other CPUs. */ 54 s = rcu_seq_snap(&rcu_state.expedited_sequence); 55 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap")); 56 return s; 57 } 58 59 /* 60 * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true 61 * if a full expedited grace period has elapsed since that snapshot 62 * was taken. 63 */ 64 static bool rcu_exp_gp_seq_done(unsigned long s) 65 { 66 return rcu_seq_done(&rcu_state.expedited_sequence, s); 67 } 68 69 /* 70 * Reset the ->expmaskinit values in the rcu_node tree to reflect any 71 * recent CPU-online activity. Note that these masks are not cleared 72 * when CPUs go offline, so they reflect the union of all CPUs that have 73 * ever been online. This means that this function normally takes its 74 * no-work-to-do fastpath. 75 */ 76 static void sync_exp_reset_tree_hotplug(void) 77 { 78 bool done; 79 unsigned long flags; 80 unsigned long mask; 81 unsigned long oldmask; 82 int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */ 83 struct rcu_node *rnp; 84 struct rcu_node *rnp_up; 85 86 /* If no new CPUs onlined since last time, nothing to do. */ 87 if (likely(ncpus == rcu_state.ncpus_snap)) 88 return; 89 rcu_state.ncpus_snap = ncpus; 90 91 /* 92 * Each pass through the following loop propagates newly onlined 93 * CPUs for the current rcu_node structure up the rcu_node tree. 94 */ 95 rcu_for_each_leaf_node(rnp) { 96 raw_spin_lock_irqsave_rcu_node(rnp, flags); 97 if (rnp->expmaskinit == rnp->expmaskinitnext) { 98 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 99 continue; /* No new CPUs, nothing to do. */ 100 } 101 102 /* Update this node's mask, track old value for propagation. */ 103 oldmask = rnp->expmaskinit; 104 rnp->expmaskinit = rnp->expmaskinitnext; 105 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 106 107 /* If was already nonzero, nothing to propagate. */ 108 if (oldmask) 109 continue; 110 111 /* Propagate the new CPU up the tree. */ 112 mask = rnp->grpmask; 113 rnp_up = rnp->parent; 114 done = false; 115 while (rnp_up) { 116 raw_spin_lock_irqsave_rcu_node(rnp_up, flags); 117 if (rnp_up->expmaskinit) 118 done = true; 119 rnp_up->expmaskinit |= mask; 120 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags); 121 if (done) 122 break; 123 mask = rnp_up->grpmask; 124 rnp_up = rnp_up->parent; 125 } 126 } 127 } 128 129 /* 130 * Reset the ->expmask values in the rcu_node tree in preparation for 131 * a new expedited grace period. 132 */ 133 static void __maybe_unused sync_exp_reset_tree(void) 134 { 135 unsigned long flags; 136 struct rcu_node *rnp; 137 138 sync_exp_reset_tree_hotplug(); 139 rcu_for_each_node_breadth_first(rnp) { 140 raw_spin_lock_irqsave_rcu_node(rnp, flags); 141 WARN_ON_ONCE(rnp->expmask); 142 WRITE_ONCE(rnp->expmask, rnp->expmaskinit); 143 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 144 } 145 } 146 147 /* 148 * Return non-zero if there is no RCU expedited grace period in progress 149 * for the specified rcu_node structure, in other words, if all CPUs and 150 * tasks covered by the specified rcu_node structure have done their bit 151 * for the current expedited grace period. 152 */ 153 static bool sync_rcu_exp_done(struct rcu_node *rnp) 154 { 155 raw_lockdep_assert_held_rcu_node(rnp); 156 return READ_ONCE(rnp->exp_tasks) == NULL && 157 READ_ONCE(rnp->expmask) == 0; 158 } 159 160 /* 161 * Like sync_rcu_exp_done(), but where the caller does not hold the 162 * rcu_node's ->lock. 163 */ 164 static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp) 165 { 166 unsigned long flags; 167 bool ret; 168 169 raw_spin_lock_irqsave_rcu_node(rnp, flags); 170 ret = sync_rcu_exp_done(rnp); 171 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 172 173 return ret; 174 } 175 176 177 /* 178 * Report the exit from RCU read-side critical section for the last task 179 * that queued itself during or before the current expedited preemptible-RCU 180 * grace period. This event is reported either to the rcu_node structure on 181 * which the task was queued or to one of that rcu_node structure's ancestors, 182 * recursively up the tree. (Calm down, calm down, we do the recursion 183 * iteratively!) 184 */ 185 static void __rcu_report_exp_rnp(struct rcu_node *rnp, 186 bool wake, unsigned long flags) 187 __releases(rnp->lock) 188 { 189 unsigned long mask; 190 191 raw_lockdep_assert_held_rcu_node(rnp); 192 for (;;) { 193 if (!sync_rcu_exp_done(rnp)) { 194 if (!rnp->expmask) 195 rcu_initiate_boost(rnp, flags); 196 else 197 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 198 break; 199 } 200 if (rnp->parent == NULL) { 201 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 202 if (wake) { 203 smp_mb(); /* EGP done before wake_up(). */ 204 swake_up_one(&rcu_state.expedited_wq); 205 } 206 break; 207 } 208 mask = rnp->grpmask; 209 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */ 210 rnp = rnp->parent; 211 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */ 212 WARN_ON_ONCE(!(rnp->expmask & mask)); 213 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask); 214 } 215 } 216 217 /* 218 * Report expedited quiescent state for specified node. This is a 219 * lock-acquisition wrapper function for __rcu_report_exp_rnp(). 220 */ 221 static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake) 222 { 223 unsigned long flags; 224 225 raw_spin_lock_irqsave_rcu_node(rnp, flags); 226 __rcu_report_exp_rnp(rnp, wake, flags); 227 } 228 229 /* 230 * Report expedited quiescent state for multiple CPUs, all covered by the 231 * specified leaf rcu_node structure. 232 */ 233 static void rcu_report_exp_cpu_mult(struct rcu_node *rnp, 234 unsigned long mask, bool wake) 235 { 236 int cpu; 237 unsigned long flags; 238 struct rcu_data *rdp; 239 240 raw_spin_lock_irqsave_rcu_node(rnp, flags); 241 if (!(rnp->expmask & mask)) { 242 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 243 return; 244 } 245 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask); 246 for_each_leaf_node_cpu_mask(rnp, cpu, mask) { 247 rdp = per_cpu_ptr(&rcu_data, cpu); 248 if (!IS_ENABLED(CONFIG_NO_HZ_FULL) || !rdp->rcu_forced_tick_exp) 249 continue; 250 rdp->rcu_forced_tick_exp = false; 251 tick_dep_clear_cpu(cpu, TICK_DEP_BIT_RCU_EXP); 252 } 253 __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */ 254 } 255 256 /* 257 * Report expedited quiescent state for specified rcu_data (CPU). 258 */ 259 static void rcu_report_exp_rdp(struct rcu_data *rdp) 260 { 261 WRITE_ONCE(rdp->cpu_no_qs.b.exp, false); 262 rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true); 263 } 264 265 /* Common code for work-done checking. */ 266 static bool sync_exp_work_done(unsigned long s) 267 { 268 if (rcu_exp_gp_seq_done(s)) { 269 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done")); 270 smp_mb(); /* Ensure test happens before caller kfree(). */ 271 return true; 272 } 273 return false; 274 } 275 276 /* 277 * Funnel-lock acquisition for expedited grace periods. Returns true 278 * if some other task completed an expedited grace period that this task 279 * can piggy-back on, and with no mutex held. Otherwise, returns false 280 * with the mutex held, indicating that the caller must actually do the 281 * expedited grace period. 282 */ 283 static bool exp_funnel_lock(unsigned long s) 284 { 285 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id()); 286 struct rcu_node *rnp = rdp->mynode; 287 struct rcu_node *rnp_root = rcu_get_root(); 288 289 /* Low-contention fastpath. */ 290 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) && 291 (rnp == rnp_root || 292 ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) && 293 mutex_trylock(&rcu_state.exp_mutex)) 294 goto fastpath; 295 296 /* 297 * Each pass through the following loop works its way up 298 * the rcu_node tree, returning if others have done the work or 299 * otherwise falls through to acquire ->exp_mutex. The mapping 300 * from CPU to rcu_node structure can be inexact, as it is just 301 * promoting locality and is not strictly needed for correctness. 302 */ 303 for (; rnp != NULL; rnp = rnp->parent) { 304 if (sync_exp_work_done(s)) 305 return true; 306 307 /* Work not done, either wait here or go up. */ 308 spin_lock(&rnp->exp_lock); 309 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) { 310 311 /* Someone else doing GP, so wait for them. */ 312 spin_unlock(&rnp->exp_lock); 313 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level, 314 rnp->grplo, rnp->grphi, 315 TPS("wait")); 316 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], 317 sync_exp_work_done(s)); 318 return true; 319 } 320 WRITE_ONCE(rnp->exp_seq_rq, s); /* Followers can wait on us. */ 321 spin_unlock(&rnp->exp_lock); 322 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level, 323 rnp->grplo, rnp->grphi, TPS("nxtlvl")); 324 } 325 mutex_lock(&rcu_state.exp_mutex); 326 fastpath: 327 if (sync_exp_work_done(s)) { 328 mutex_unlock(&rcu_state.exp_mutex); 329 return true; 330 } 331 rcu_exp_gp_seq_start(); 332 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start")); 333 return false; 334 } 335 336 /* 337 * Select the CPUs within the specified rcu_node that the upcoming 338 * expedited grace period needs to wait for. 339 */ 340 static void __sync_rcu_exp_select_node_cpus(struct rcu_exp_work *rewp) 341 { 342 int cpu; 343 unsigned long flags; 344 unsigned long mask_ofl_test; 345 unsigned long mask_ofl_ipi; 346 int ret; 347 struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew); 348 349 raw_spin_lock_irqsave_rcu_node(rnp, flags); 350 351 /* Each pass checks a CPU for identity, offline, and idle. */ 352 mask_ofl_test = 0; 353 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) { 354 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 355 unsigned long mask = rdp->grpmask; 356 int snap; 357 358 if (raw_smp_processor_id() == cpu || 359 !(rnp->qsmaskinitnext & mask)) { 360 mask_ofl_test |= mask; 361 } else { 362 snap = rcu_dynticks_snap(cpu); 363 if (rcu_dynticks_in_eqs(snap)) 364 mask_ofl_test |= mask; 365 else 366 rdp->exp_dynticks_snap = snap; 367 } 368 } 369 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test; 370 371 /* 372 * Need to wait for any blocked tasks as well. Note that 373 * additional blocking tasks will also block the expedited GP 374 * until such time as the ->expmask bits are cleared. 375 */ 376 if (rcu_preempt_has_tasks(rnp)) 377 WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next); 378 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 379 380 /* IPI the remaining CPUs for expedited quiescent state. */ 381 for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) { 382 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 383 unsigned long mask = rdp->grpmask; 384 385 retry_ipi: 386 if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) { 387 mask_ofl_test |= mask; 388 continue; 389 } 390 if (get_cpu() == cpu) { 391 mask_ofl_test |= mask; 392 put_cpu(); 393 continue; 394 } 395 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0); 396 put_cpu(); 397 /* The CPU will report the QS in response to the IPI. */ 398 if (!ret) 399 continue; 400 401 /* Failed, raced with CPU hotplug operation. */ 402 raw_spin_lock_irqsave_rcu_node(rnp, flags); 403 if ((rnp->qsmaskinitnext & mask) && 404 (rnp->expmask & mask)) { 405 /* Online, so delay for a bit and try again. */ 406 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 407 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl")); 408 schedule_timeout_idle(1); 409 goto retry_ipi; 410 } 411 /* CPU really is offline, so we must report its QS. */ 412 if (rnp->expmask & mask) 413 mask_ofl_test |= mask; 414 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 415 } 416 /* Report quiescent states for those that went offline. */ 417 if (mask_ofl_test) 418 rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false); 419 } 420 421 static void rcu_exp_sel_wait_wake(unsigned long s); 422 423 #ifdef CONFIG_RCU_EXP_KTHREAD 424 static void sync_rcu_exp_select_node_cpus(struct kthread_work *wp) 425 { 426 struct rcu_exp_work *rewp = 427 container_of(wp, struct rcu_exp_work, rew_work); 428 429 __sync_rcu_exp_select_node_cpus(rewp); 430 } 431 432 static inline bool rcu_gp_par_worker_started(void) 433 { 434 return !!READ_ONCE(rcu_exp_par_gp_kworker); 435 } 436 437 static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp) 438 { 439 kthread_init_work(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); 440 /* 441 * Use rcu_exp_par_gp_kworker, because flushing a work item from 442 * another work item on the same kthread worker can result in 443 * deadlock. 444 */ 445 kthread_queue_work(rcu_exp_par_gp_kworker, &rnp->rew.rew_work); 446 } 447 448 static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp) 449 { 450 kthread_flush_work(&rnp->rew.rew_work); 451 } 452 453 /* 454 * Work-queue handler to drive an expedited grace period forward. 455 */ 456 static void wait_rcu_exp_gp(struct kthread_work *wp) 457 { 458 struct rcu_exp_work *rewp; 459 460 rewp = container_of(wp, struct rcu_exp_work, rew_work); 461 rcu_exp_sel_wait_wake(rewp->rew_s); 462 } 463 464 static inline void synchronize_rcu_expedited_queue_work(struct rcu_exp_work *rew) 465 { 466 kthread_init_work(&rew->rew_work, wait_rcu_exp_gp); 467 kthread_queue_work(rcu_exp_gp_kworker, &rew->rew_work); 468 } 469 470 static inline void synchronize_rcu_expedited_destroy_work(struct rcu_exp_work *rew) 471 { 472 } 473 #else /* !CONFIG_RCU_EXP_KTHREAD */ 474 static void sync_rcu_exp_select_node_cpus(struct work_struct *wp) 475 { 476 struct rcu_exp_work *rewp = 477 container_of(wp, struct rcu_exp_work, rew_work); 478 479 __sync_rcu_exp_select_node_cpus(rewp); 480 } 481 482 static inline bool rcu_gp_par_worker_started(void) 483 { 484 return !!READ_ONCE(rcu_par_gp_wq); 485 } 486 487 static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp) 488 { 489 int cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1); 490 491 INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); 492 /* If all offline, queue the work on an unbound CPU. */ 493 if (unlikely(cpu > rnp->grphi - rnp->grplo)) 494 cpu = WORK_CPU_UNBOUND; 495 else 496 cpu += rnp->grplo; 497 queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work); 498 } 499 500 static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp) 501 { 502 flush_work(&rnp->rew.rew_work); 503 } 504 505 /* 506 * Work-queue handler to drive an expedited grace period forward. 507 */ 508 static void wait_rcu_exp_gp(struct work_struct *wp) 509 { 510 struct rcu_exp_work *rewp; 511 512 rewp = container_of(wp, struct rcu_exp_work, rew_work); 513 rcu_exp_sel_wait_wake(rewp->rew_s); 514 } 515 516 static inline void synchronize_rcu_expedited_queue_work(struct rcu_exp_work *rew) 517 { 518 INIT_WORK_ONSTACK(&rew->rew_work, wait_rcu_exp_gp); 519 queue_work(rcu_gp_wq, &rew->rew_work); 520 } 521 522 static inline void synchronize_rcu_expedited_destroy_work(struct rcu_exp_work *rew) 523 { 524 destroy_work_on_stack(&rew->rew_work); 525 } 526 #endif /* CONFIG_RCU_EXP_KTHREAD */ 527 528 /* 529 * Select the nodes that the upcoming expedited grace period needs 530 * to wait for. 531 */ 532 static void sync_rcu_exp_select_cpus(void) 533 { 534 struct rcu_node *rnp; 535 536 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset")); 537 sync_exp_reset_tree(); 538 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select")); 539 540 /* Schedule work for each leaf rcu_node structure. */ 541 rcu_for_each_leaf_node(rnp) { 542 rnp->exp_need_flush = false; 543 if (!READ_ONCE(rnp->expmask)) 544 continue; /* Avoid early boot non-existent wq. */ 545 if (!rcu_gp_par_worker_started() || 546 rcu_scheduler_active != RCU_SCHEDULER_RUNNING || 547 rcu_is_last_leaf_node(rnp)) { 548 /* No worker started yet or last leaf, do direct call. */ 549 sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work); 550 continue; 551 } 552 sync_rcu_exp_select_cpus_queue_work(rnp); 553 rnp->exp_need_flush = true; 554 } 555 556 /* Wait for jobs (if any) to complete. */ 557 rcu_for_each_leaf_node(rnp) 558 if (rnp->exp_need_flush) 559 sync_rcu_exp_select_cpus_flush_work(rnp); 560 } 561 562 /* 563 * Wait for the expedited grace period to elapse, within time limit. 564 * If the time limit is exceeded without the grace period elapsing, 565 * return false, otherwise return true. 566 */ 567 static bool synchronize_rcu_expedited_wait_once(long tlimit) 568 { 569 int t; 570 struct rcu_node *rnp_root = rcu_get_root(); 571 572 t = swait_event_timeout_exclusive(rcu_state.expedited_wq, 573 sync_rcu_exp_done_unlocked(rnp_root), 574 tlimit); 575 // Workqueues should not be signaled. 576 if (t > 0 || sync_rcu_exp_done_unlocked(rnp_root)) 577 return true; 578 WARN_ON(t < 0); /* workqueues should not be signaled. */ 579 return false; 580 } 581 582 /* 583 * Wait for the expedited grace period to elapse, issuing any needed 584 * RCU CPU stall warnings along the way. 585 */ 586 static void synchronize_rcu_expedited_wait(void) 587 { 588 int cpu; 589 unsigned long j; 590 unsigned long jiffies_stall; 591 unsigned long jiffies_start; 592 unsigned long mask; 593 int ndetected; 594 struct rcu_data *rdp; 595 struct rcu_node *rnp; 596 struct rcu_node *rnp_root = rcu_get_root(); 597 598 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait")); 599 jiffies_stall = rcu_exp_jiffies_till_stall_check(); 600 jiffies_start = jiffies; 601 if (tick_nohz_full_enabled() && rcu_inkernel_boot_has_ended()) { 602 if (synchronize_rcu_expedited_wait_once(1)) 603 return; 604 rcu_for_each_leaf_node(rnp) { 605 mask = READ_ONCE(rnp->expmask); 606 for_each_leaf_node_cpu_mask(rnp, cpu, mask) { 607 rdp = per_cpu_ptr(&rcu_data, cpu); 608 if (rdp->rcu_forced_tick_exp) 609 continue; 610 rdp->rcu_forced_tick_exp = true; 611 preempt_disable(); 612 if (cpu_online(cpu)) 613 tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP); 614 preempt_enable(); 615 } 616 } 617 j = READ_ONCE(jiffies_till_first_fqs); 618 if (synchronize_rcu_expedited_wait_once(j + HZ)) 619 return; 620 } 621 622 for (;;) { 623 if (synchronize_rcu_expedited_wait_once(jiffies_stall)) 624 return; 625 if (rcu_stall_is_suppressed()) 626 continue; 627 trace_rcu_stall_warning(rcu_state.name, TPS("ExpeditedStall")); 628 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {", 629 rcu_state.name); 630 ndetected = 0; 631 rcu_for_each_leaf_node(rnp) { 632 ndetected += rcu_print_task_exp_stall(rnp); 633 for_each_leaf_node_possible_cpu(rnp, cpu) { 634 struct rcu_data *rdp; 635 636 mask = leaf_node_cpu_bit(rnp, cpu); 637 if (!(READ_ONCE(rnp->expmask) & mask)) 638 continue; 639 ndetected++; 640 rdp = per_cpu_ptr(&rcu_data, cpu); 641 pr_cont(" %d-%c%c%c%c", cpu, 642 "O."[!!cpu_online(cpu)], 643 "o."[!!(rdp->grpmask & rnp->expmaskinit)], 644 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)], 645 "D."[!!(rdp->cpu_no_qs.b.exp)]); 646 } 647 } 648 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n", 649 jiffies - jiffies_start, rcu_state.expedited_sequence, 650 data_race(rnp_root->expmask), 651 ".T"[!!data_race(rnp_root->exp_tasks)]); 652 if (ndetected) { 653 pr_err("blocking rcu_node structures (internal RCU debug):"); 654 rcu_for_each_node_breadth_first(rnp) { 655 if (rnp == rnp_root) 656 continue; /* printed unconditionally */ 657 if (sync_rcu_exp_done_unlocked(rnp)) 658 continue; 659 pr_cont(" l=%u:%d-%d:%#lx/%c", 660 rnp->level, rnp->grplo, rnp->grphi, 661 data_race(rnp->expmask), 662 ".T"[!!data_race(rnp->exp_tasks)]); 663 } 664 pr_cont("\n"); 665 } 666 rcu_for_each_leaf_node(rnp) { 667 for_each_leaf_node_possible_cpu(rnp, cpu) { 668 mask = leaf_node_cpu_bit(rnp, cpu); 669 if (!(READ_ONCE(rnp->expmask) & mask)) 670 continue; 671 preempt_disable(); // For smp_processor_id() in dump_cpu_task(). 672 dump_cpu_task(cpu); 673 preempt_enable(); 674 } 675 rcu_exp_print_detail_task_stall_rnp(rnp); 676 } 677 jiffies_stall = 3 * rcu_exp_jiffies_till_stall_check() + 3; 678 panic_on_rcu_stall(); 679 } 680 } 681 682 /* 683 * Wait for the current expedited grace period to complete, and then 684 * wake up everyone who piggybacked on the just-completed expedited 685 * grace period. Also update all the ->exp_seq_rq counters as needed 686 * in order to avoid counter-wrap problems. 687 */ 688 static void rcu_exp_wait_wake(unsigned long s) 689 { 690 struct rcu_node *rnp; 691 692 synchronize_rcu_expedited_wait(); 693 694 // Switch over to wakeup mode, allowing the next GP to proceed. 695 // End the previous grace period only after acquiring the mutex 696 // to ensure that only one GP runs concurrently with wakeups. 697 mutex_lock(&rcu_state.exp_wake_mutex); 698 rcu_exp_gp_seq_end(); 699 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end")); 700 701 rcu_for_each_node_breadth_first(rnp) { 702 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) { 703 spin_lock(&rnp->exp_lock); 704 /* Recheck, avoid hang in case someone just arrived. */ 705 if (ULONG_CMP_LT(rnp->exp_seq_rq, s)) 706 WRITE_ONCE(rnp->exp_seq_rq, s); 707 spin_unlock(&rnp->exp_lock); 708 } 709 smp_mb(); /* All above changes before wakeup. */ 710 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]); 711 } 712 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake")); 713 mutex_unlock(&rcu_state.exp_wake_mutex); 714 } 715 716 /* 717 * Common code to drive an expedited grace period forward, used by 718 * workqueues and mid-boot-time tasks. 719 */ 720 static void rcu_exp_sel_wait_wake(unsigned long s) 721 { 722 /* Initialize the rcu_node tree in preparation for the wait. */ 723 sync_rcu_exp_select_cpus(); 724 725 /* Wait and clean up, including waking everyone. */ 726 rcu_exp_wait_wake(s); 727 } 728 729 #ifdef CONFIG_PREEMPT_RCU 730 731 /* 732 * Remote handler for smp_call_function_single(). If there is an 733 * RCU read-side critical section in effect, request that the 734 * next rcu_read_unlock() record the quiescent state up the 735 * ->expmask fields in the rcu_node tree. Otherwise, immediately 736 * report the quiescent state. 737 */ 738 static void rcu_exp_handler(void *unused) 739 { 740 int depth = rcu_preempt_depth(); 741 unsigned long flags; 742 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 743 struct rcu_node *rnp = rdp->mynode; 744 struct task_struct *t = current; 745 746 /* 747 * First, the common case of not being in an RCU read-side 748 * critical section. If also enabled or idle, immediately 749 * report the quiescent state, otherwise defer. 750 */ 751 if (!depth) { 752 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) || 753 rcu_is_cpu_rrupt_from_idle()) { 754 rcu_report_exp_rdp(rdp); 755 } else { 756 WRITE_ONCE(rdp->cpu_no_qs.b.exp, true); 757 set_tsk_need_resched(t); 758 set_preempt_need_resched(); 759 } 760 return; 761 } 762 763 /* 764 * Second, the less-common case of being in an RCU read-side 765 * critical section. In this case we can count on a future 766 * rcu_read_unlock(). However, this rcu_read_unlock() might 767 * execute on some other CPU, but in that case there will be 768 * a future context switch. Either way, if the expedited 769 * grace period is still waiting on this CPU, set ->deferred_qs 770 * so that the eventual quiescent state will be reported. 771 * Note that there is a large group of race conditions that 772 * can have caused this quiescent state to already have been 773 * reported, so we really do need to check ->expmask. 774 */ 775 if (depth > 0) { 776 raw_spin_lock_irqsave_rcu_node(rnp, flags); 777 if (rnp->expmask & rdp->grpmask) { 778 WRITE_ONCE(rdp->cpu_no_qs.b.exp, true); 779 t->rcu_read_unlock_special.b.exp_hint = true; 780 } 781 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 782 return; 783 } 784 785 // Finally, negative nesting depth should not happen. 786 WARN_ON_ONCE(1); 787 } 788 789 /* PREEMPTION=y, so no PREEMPTION=n expedited grace period to clean up after. */ 790 static void sync_sched_exp_online_cleanup(int cpu) 791 { 792 } 793 794 /* 795 * Scan the current list of tasks blocked within RCU read-side critical 796 * sections, printing out the tid of each that is blocking the current 797 * expedited grace period. 798 */ 799 static int rcu_print_task_exp_stall(struct rcu_node *rnp) 800 { 801 unsigned long flags; 802 int ndetected = 0; 803 struct task_struct *t; 804 805 if (!READ_ONCE(rnp->exp_tasks)) 806 return 0; 807 raw_spin_lock_irqsave_rcu_node(rnp, flags); 808 t = list_entry(rnp->exp_tasks->prev, 809 struct task_struct, rcu_node_entry); 810 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { 811 pr_cont(" P%d", t->pid); 812 ndetected++; 813 } 814 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 815 return ndetected; 816 } 817 818 /* 819 * Scan the current list of tasks blocked within RCU read-side critical 820 * sections, dumping the stack of each that is blocking the current 821 * expedited grace period. 822 */ 823 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp) 824 { 825 unsigned long flags; 826 struct task_struct *t; 827 828 if (!rcu_exp_stall_task_details) 829 return; 830 raw_spin_lock_irqsave_rcu_node(rnp, flags); 831 if (!READ_ONCE(rnp->exp_tasks)) { 832 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 833 return; 834 } 835 t = list_entry(rnp->exp_tasks->prev, 836 struct task_struct, rcu_node_entry); 837 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { 838 /* 839 * We could be printing a lot while holding a spinlock. 840 * Avoid triggering hard lockup. 841 */ 842 touch_nmi_watchdog(); 843 sched_show_task(t); 844 } 845 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 846 } 847 848 #else /* #ifdef CONFIG_PREEMPT_RCU */ 849 850 /* Request an expedited quiescent state. */ 851 static void rcu_exp_need_qs(void) 852 { 853 __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true); 854 /* Store .exp before .rcu_urgent_qs. */ 855 smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true); 856 set_tsk_need_resched(current); 857 set_preempt_need_resched(); 858 } 859 860 /* Invoked on each online non-idle CPU for expedited quiescent state. */ 861 static void rcu_exp_handler(void *unused) 862 { 863 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 864 struct rcu_node *rnp = rdp->mynode; 865 bool preempt_bh_enabled = !(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)); 866 867 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || 868 __this_cpu_read(rcu_data.cpu_no_qs.b.exp)) 869 return; 870 if (rcu_is_cpu_rrupt_from_idle() || 871 (IS_ENABLED(CONFIG_PREEMPT_COUNT) && preempt_bh_enabled)) { 872 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); 873 return; 874 } 875 rcu_exp_need_qs(); 876 } 877 878 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */ 879 static void sync_sched_exp_online_cleanup(int cpu) 880 { 881 unsigned long flags; 882 int my_cpu; 883 struct rcu_data *rdp; 884 int ret; 885 struct rcu_node *rnp; 886 887 rdp = per_cpu_ptr(&rcu_data, cpu); 888 rnp = rdp->mynode; 889 my_cpu = get_cpu(); 890 /* Quiescent state either not needed or already requested, leave. */ 891 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || 892 READ_ONCE(rdp->cpu_no_qs.b.exp)) { 893 put_cpu(); 894 return; 895 } 896 /* Quiescent state needed on current CPU, so set it up locally. */ 897 if (my_cpu == cpu) { 898 local_irq_save(flags); 899 rcu_exp_need_qs(); 900 local_irq_restore(flags); 901 put_cpu(); 902 return; 903 } 904 /* Quiescent state needed on some other CPU, send IPI. */ 905 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0); 906 put_cpu(); 907 WARN_ON_ONCE(ret); 908 } 909 910 /* 911 * Because preemptible RCU does not exist, we never have to check for 912 * tasks blocked within RCU read-side critical sections that are 913 * blocking the current expedited grace period. 914 */ 915 static int rcu_print_task_exp_stall(struct rcu_node *rnp) 916 { 917 return 0; 918 } 919 920 /* 921 * Because preemptible RCU does not exist, we never have to print out 922 * tasks blocked within RCU read-side critical sections that are blocking 923 * the current expedited grace period. 924 */ 925 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp) 926 { 927 } 928 929 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 930 931 /** 932 * synchronize_rcu_expedited - Brute-force RCU grace period 933 * 934 * Wait for an RCU grace period, but expedite it. The basic idea is to 935 * IPI all non-idle non-nohz online CPUs. The IPI handler checks whether 936 * the CPU is in an RCU critical section, and if so, it sets a flag that 937 * causes the outermost rcu_read_unlock() to report the quiescent state 938 * for RCU-preempt or asks the scheduler for help for RCU-sched. On the 939 * other hand, if the CPU is not in an RCU read-side critical section, 940 * the IPI handler reports the quiescent state immediately. 941 * 942 * Although this is a great improvement over previous expedited 943 * implementations, it is still unfriendly to real-time workloads, so is 944 * thus not recommended for any sort of common-case code. In fact, if 945 * you are using synchronize_rcu_expedited() in a loop, please restructure 946 * your code to batch your updates, and then use a single synchronize_rcu() 947 * instead. 948 * 949 * This has the same semantics as (but is more brutal than) synchronize_rcu(). 950 */ 951 void synchronize_rcu_expedited(void) 952 { 953 bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT); 954 unsigned long flags; 955 struct rcu_exp_work rew; 956 struct rcu_node *rnp; 957 unsigned long s; 958 959 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 960 lock_is_held(&rcu_lock_map) || 961 lock_is_held(&rcu_sched_lock_map), 962 "Illegal synchronize_rcu_expedited() in RCU read-side critical section"); 963 964 /* Is the state is such that the call is a grace period? */ 965 if (rcu_blocking_is_gp()) { 966 // Note well that this code runs with !PREEMPT && !SMP. 967 // In addition, all code that advances grace periods runs 968 // at process level. Therefore, this expedited GP overlaps 969 // with other expedited GPs only by being fully nested within 970 // them, which allows reuse of ->gp_seq_polled_exp_snap. 971 rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_exp_snap); 972 rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_exp_snap); 973 974 local_irq_save(flags); 975 WARN_ON_ONCE(num_online_cpus() > 1); 976 rcu_state.expedited_sequence += (1 << RCU_SEQ_CTR_SHIFT); 977 local_irq_restore(flags); 978 return; // Context allows vacuous grace periods. 979 } 980 981 /* If expedited grace periods are prohibited, fall back to normal. */ 982 if (rcu_gp_is_normal()) { 983 wait_rcu_gp(call_rcu_hurry); 984 return; 985 } 986 987 /* Take a snapshot of the sequence number. */ 988 s = rcu_exp_gp_seq_snap(); 989 if (exp_funnel_lock(s)) 990 return; /* Someone else did our work for us. */ 991 992 /* Ensure that load happens before action based on it. */ 993 if (unlikely(boottime)) { 994 /* Direct call during scheduler init and early_initcalls(). */ 995 rcu_exp_sel_wait_wake(s); 996 } else { 997 /* Marshall arguments & schedule the expedited grace period. */ 998 rew.rew_s = s; 999 synchronize_rcu_expedited_queue_work(&rew); 1000 } 1001 1002 /* Wait for expedited grace period to complete. */ 1003 rnp = rcu_get_root(); 1004 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], 1005 sync_exp_work_done(s)); 1006 smp_mb(); /* Work actions happen before return. */ 1007 1008 /* Let the next expedited grace period start. */ 1009 mutex_unlock(&rcu_state.exp_mutex); 1010 1011 if (likely(!boottime)) 1012 synchronize_rcu_expedited_destroy_work(&rew); 1013 } 1014 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 1015 1016 /* 1017 * Ensure that start_poll_synchronize_rcu_expedited() has the expedited 1018 * RCU grace periods that it needs. 1019 */ 1020 static void sync_rcu_do_polled_gp(struct work_struct *wp) 1021 { 1022 unsigned long flags; 1023 int i = 0; 1024 struct rcu_node *rnp = container_of(wp, struct rcu_node, exp_poll_wq); 1025 unsigned long s; 1026 1027 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags); 1028 s = rnp->exp_seq_poll_rq; 1029 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED; 1030 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags); 1031 if (s == RCU_GET_STATE_COMPLETED) 1032 return; 1033 while (!poll_state_synchronize_rcu(s)) { 1034 synchronize_rcu_expedited(); 1035 if (i == 10 || i == 20) 1036 pr_info("%s: i = %d s = %lx gp_seq_polled = %lx\n", __func__, i, s, READ_ONCE(rcu_state.gp_seq_polled)); 1037 i++; 1038 } 1039 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags); 1040 s = rnp->exp_seq_poll_rq; 1041 if (poll_state_synchronize_rcu(s)) 1042 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED; 1043 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags); 1044 } 1045 1046 /** 1047 * start_poll_synchronize_rcu_expedited - Snapshot current RCU state and start expedited grace period 1048 * 1049 * Returns a cookie to pass to a call to cond_synchronize_rcu(), 1050 * cond_synchronize_rcu_expedited(), or poll_state_synchronize_rcu(), 1051 * allowing them to determine whether or not any sort of grace period has 1052 * elapsed in the meantime. If the needed expedited grace period is not 1053 * already slated to start, initiates that grace period. 1054 */ 1055 unsigned long start_poll_synchronize_rcu_expedited(void) 1056 { 1057 unsigned long flags; 1058 struct rcu_data *rdp; 1059 struct rcu_node *rnp; 1060 unsigned long s; 1061 1062 s = get_state_synchronize_rcu(); 1063 rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id()); 1064 rnp = rdp->mynode; 1065 if (rcu_init_invoked()) 1066 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags); 1067 if (!poll_state_synchronize_rcu(s)) { 1068 rnp->exp_seq_poll_rq = s; 1069 if (rcu_init_invoked()) 1070 queue_work(rcu_gp_wq, &rnp->exp_poll_wq); 1071 } 1072 if (rcu_init_invoked()) 1073 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags); 1074 1075 return s; 1076 } 1077 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_expedited); 1078 1079 /** 1080 * start_poll_synchronize_rcu_expedited_full - Take a full snapshot and start expedited grace period 1081 * @rgosp: Place to put snapshot of grace-period state 1082 * 1083 * Places the normal and expedited grace-period states in rgosp. This 1084 * state value can be passed to a later call to cond_synchronize_rcu_full() 1085 * or poll_state_synchronize_rcu_full() to determine whether or not a 1086 * grace period (whether normal or expedited) has elapsed in the meantime. 1087 * If the needed expedited grace period is not already slated to start, 1088 * initiates that grace period. 1089 */ 1090 void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp) 1091 { 1092 get_state_synchronize_rcu_full(rgosp); 1093 (void)start_poll_synchronize_rcu_expedited(); 1094 } 1095 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_expedited_full); 1096 1097 /** 1098 * cond_synchronize_rcu_expedited - Conditionally wait for an expedited RCU grace period 1099 * 1100 * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited() 1101 * 1102 * If any type of full RCU grace period has elapsed since the earlier 1103 * call to get_state_synchronize_rcu(), start_poll_synchronize_rcu(), 1104 * or start_poll_synchronize_rcu_expedited(), just return. Otherwise, 1105 * invoke synchronize_rcu_expedited() to wait for a full grace period. 1106 * 1107 * Yes, this function does not take counter wrap into account. 1108 * But counter wrap is harmless. If the counter wraps, we have waited for 1109 * more than 2 billion grace periods (and way more on a 64-bit system!), 1110 * so waiting for a couple of additional grace periods should be just fine. 1111 * 1112 * This function provides the same memory-ordering guarantees that 1113 * would be provided by a synchronize_rcu() that was invoked at the call 1114 * to the function that provided @oldstate and that returned at the end 1115 * of this function. 1116 */ 1117 void cond_synchronize_rcu_expedited(unsigned long oldstate) 1118 { 1119 if (!poll_state_synchronize_rcu(oldstate)) 1120 synchronize_rcu_expedited(); 1121 } 1122 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_expedited); 1123 1124 /** 1125 * cond_synchronize_rcu_expedited_full - Conditionally wait for an expedited RCU grace period 1126 * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full() 1127 * 1128 * If a full RCU grace period has elapsed since the call to 1129 * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), 1130 * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was 1131 * obtained, just return. Otherwise, invoke synchronize_rcu_expedited() 1132 * to wait for a full grace period. 1133 * 1134 * Yes, this function does not take counter wrap into account. 1135 * But counter wrap is harmless. If the counter wraps, we have waited for 1136 * more than 2 billion grace periods (and way more on a 64-bit system!), 1137 * so waiting for a couple of additional grace periods should be just fine. 1138 * 1139 * This function provides the same memory-ordering guarantees that 1140 * would be provided by a synchronize_rcu() that was invoked at the call 1141 * to the function that provided @rgosp and that returned at the end of 1142 * this function. 1143 */ 1144 void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp) 1145 { 1146 if (!poll_state_synchronize_rcu_full(rgosp)) 1147 synchronize_rcu_expedited(); 1148 } 1149 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_expedited_full); 1150