1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * RCU expedited grace periods 4 * 5 * Copyright IBM Corporation, 2016 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 */ 9 10 #include <linux/lockdep.h> 11 12 static void rcu_exp_handler(void *unused); 13 static int rcu_print_task_exp_stall(struct rcu_node *rnp); 14 15 /* 16 * Record the start of an expedited grace period. 17 */ 18 static void rcu_exp_gp_seq_start(void) 19 { 20 rcu_seq_start(&rcu_state.expedited_sequence); 21 rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_exp_snap); 22 } 23 24 /* 25 * Return the value that the expedited-grace-period counter will have 26 * at the end of the current grace period. 27 */ 28 static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void) 29 { 30 return rcu_seq_endval(&rcu_state.expedited_sequence); 31 } 32 33 /* 34 * Record the end of an expedited grace period. 35 */ 36 static void rcu_exp_gp_seq_end(void) 37 { 38 rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_exp_snap); 39 rcu_seq_end(&rcu_state.expedited_sequence); 40 smp_mb(); /* Ensure that consecutive grace periods serialize. */ 41 } 42 43 /* 44 * Take a snapshot of the expedited-grace-period counter, which is the 45 * earliest value that will indicate that a full grace period has 46 * elapsed since the current time. 47 */ 48 static unsigned long rcu_exp_gp_seq_snap(void) 49 { 50 unsigned long s; 51 52 smp_mb(); /* Caller's modifications seen first by other CPUs. */ 53 s = rcu_seq_snap(&rcu_state.expedited_sequence); 54 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap")); 55 return s; 56 } 57 58 /* 59 * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true 60 * if a full expedited grace period has elapsed since that snapshot 61 * was taken. 62 */ 63 static bool rcu_exp_gp_seq_done(unsigned long s) 64 { 65 return rcu_seq_done(&rcu_state.expedited_sequence, s); 66 } 67 68 /* 69 * Reset the ->expmaskinit values in the rcu_node tree to reflect any 70 * recent CPU-online activity. Note that these masks are not cleared 71 * when CPUs go offline, so they reflect the union of all CPUs that have 72 * ever been online. This means that this function normally takes its 73 * no-work-to-do fastpath. 74 */ 75 static void sync_exp_reset_tree_hotplug(void) 76 { 77 bool done; 78 unsigned long flags; 79 unsigned long mask; 80 unsigned long oldmask; 81 int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */ 82 struct rcu_node *rnp; 83 struct rcu_node *rnp_up; 84 85 /* If no new CPUs onlined since last time, nothing to do. */ 86 if (likely(ncpus == rcu_state.ncpus_snap)) 87 return; 88 rcu_state.ncpus_snap = ncpus; 89 90 /* 91 * Each pass through the following loop propagates newly onlined 92 * CPUs for the current rcu_node structure up the rcu_node tree. 93 */ 94 rcu_for_each_leaf_node(rnp) { 95 raw_spin_lock_irqsave_rcu_node(rnp, flags); 96 if (rnp->expmaskinit == rnp->expmaskinitnext) { 97 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 98 continue; /* No new CPUs, nothing to do. */ 99 } 100 101 /* Update this node's mask, track old value for propagation. */ 102 oldmask = rnp->expmaskinit; 103 rnp->expmaskinit = rnp->expmaskinitnext; 104 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 105 106 /* If was already nonzero, nothing to propagate. */ 107 if (oldmask) 108 continue; 109 110 /* Propagate the new CPU up the tree. */ 111 mask = rnp->grpmask; 112 rnp_up = rnp->parent; 113 done = false; 114 while (rnp_up) { 115 raw_spin_lock_irqsave_rcu_node(rnp_up, flags); 116 if (rnp_up->expmaskinit) 117 done = true; 118 rnp_up->expmaskinit |= mask; 119 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags); 120 if (done) 121 break; 122 mask = rnp_up->grpmask; 123 rnp_up = rnp_up->parent; 124 } 125 } 126 } 127 128 /* 129 * Reset the ->expmask values in the rcu_node tree in preparation for 130 * a new expedited grace period. 131 */ 132 static void __maybe_unused sync_exp_reset_tree(void) 133 { 134 unsigned long flags; 135 struct rcu_node *rnp; 136 137 sync_exp_reset_tree_hotplug(); 138 rcu_for_each_node_breadth_first(rnp) { 139 raw_spin_lock_irqsave_rcu_node(rnp, flags); 140 WARN_ON_ONCE(rnp->expmask); 141 WRITE_ONCE(rnp->expmask, rnp->expmaskinit); 142 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 143 } 144 } 145 146 /* 147 * Return non-zero if there is no RCU expedited grace period in progress 148 * for the specified rcu_node structure, in other words, if all CPUs and 149 * tasks covered by the specified rcu_node structure have done their bit 150 * for the current expedited grace period. 151 */ 152 static bool sync_rcu_exp_done(struct rcu_node *rnp) 153 { 154 raw_lockdep_assert_held_rcu_node(rnp); 155 return READ_ONCE(rnp->exp_tasks) == NULL && 156 READ_ONCE(rnp->expmask) == 0; 157 } 158 159 /* 160 * Like sync_rcu_exp_done(), but where the caller does not hold the 161 * rcu_node's ->lock. 162 */ 163 static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp) 164 { 165 unsigned long flags; 166 bool ret; 167 168 raw_spin_lock_irqsave_rcu_node(rnp, flags); 169 ret = sync_rcu_exp_done(rnp); 170 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 171 172 return ret; 173 } 174 175 176 /* 177 * Report the exit from RCU read-side critical section for the last task 178 * that queued itself during or before the current expedited preemptible-RCU 179 * grace period. This event is reported either to the rcu_node structure on 180 * which the task was queued or to one of that rcu_node structure's ancestors, 181 * recursively up the tree. (Calm down, calm down, we do the recursion 182 * iteratively!) 183 */ 184 static void __rcu_report_exp_rnp(struct rcu_node *rnp, 185 bool wake, unsigned long flags) 186 __releases(rnp->lock) 187 { 188 unsigned long mask; 189 190 raw_lockdep_assert_held_rcu_node(rnp); 191 for (;;) { 192 if (!sync_rcu_exp_done(rnp)) { 193 if (!rnp->expmask) 194 rcu_initiate_boost(rnp, flags); 195 else 196 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 197 break; 198 } 199 if (rnp->parent == NULL) { 200 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 201 if (wake) { 202 smp_mb(); /* EGP done before wake_up(). */ 203 swake_up_one(&rcu_state.expedited_wq); 204 } 205 break; 206 } 207 mask = rnp->grpmask; 208 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */ 209 rnp = rnp->parent; 210 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */ 211 WARN_ON_ONCE(!(rnp->expmask & mask)); 212 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask); 213 } 214 } 215 216 /* 217 * Report expedited quiescent state for specified node. This is a 218 * lock-acquisition wrapper function for __rcu_report_exp_rnp(). 219 */ 220 static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake) 221 { 222 unsigned long flags; 223 224 raw_spin_lock_irqsave_rcu_node(rnp, flags); 225 __rcu_report_exp_rnp(rnp, wake, flags); 226 } 227 228 /* 229 * Report expedited quiescent state for multiple CPUs, all covered by the 230 * specified leaf rcu_node structure. 231 */ 232 static void rcu_report_exp_cpu_mult(struct rcu_node *rnp, 233 unsigned long mask, bool wake) 234 { 235 int cpu; 236 unsigned long flags; 237 struct rcu_data *rdp; 238 239 raw_spin_lock_irqsave_rcu_node(rnp, flags); 240 if (!(rnp->expmask & mask)) { 241 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 242 return; 243 } 244 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask); 245 for_each_leaf_node_cpu_mask(rnp, cpu, mask) { 246 rdp = per_cpu_ptr(&rcu_data, cpu); 247 if (!IS_ENABLED(CONFIG_NO_HZ_FULL) || !rdp->rcu_forced_tick_exp) 248 continue; 249 rdp->rcu_forced_tick_exp = false; 250 tick_dep_clear_cpu(cpu, TICK_DEP_BIT_RCU_EXP); 251 } 252 __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */ 253 } 254 255 /* 256 * Report expedited quiescent state for specified rcu_data (CPU). 257 */ 258 static void rcu_report_exp_rdp(struct rcu_data *rdp) 259 { 260 WRITE_ONCE(rdp->cpu_no_qs.b.exp, false); 261 rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true); 262 } 263 264 /* Common code for work-done checking. */ 265 static bool sync_exp_work_done(unsigned long s) 266 { 267 if (rcu_exp_gp_seq_done(s)) { 268 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done")); 269 smp_mb(); /* Ensure test happens before caller kfree(). */ 270 return true; 271 } 272 return false; 273 } 274 275 /* 276 * Funnel-lock acquisition for expedited grace periods. Returns true 277 * if some other task completed an expedited grace period that this task 278 * can piggy-back on, and with no mutex held. Otherwise, returns false 279 * with the mutex held, indicating that the caller must actually do the 280 * expedited grace period. 281 */ 282 static bool exp_funnel_lock(unsigned long s) 283 { 284 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id()); 285 struct rcu_node *rnp = rdp->mynode; 286 struct rcu_node *rnp_root = rcu_get_root(); 287 288 /* Low-contention fastpath. */ 289 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) && 290 (rnp == rnp_root || 291 ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) && 292 mutex_trylock(&rcu_state.exp_mutex)) 293 goto fastpath; 294 295 /* 296 * Each pass through the following loop works its way up 297 * the rcu_node tree, returning if others have done the work or 298 * otherwise falls through to acquire ->exp_mutex. The mapping 299 * from CPU to rcu_node structure can be inexact, as it is just 300 * promoting locality and is not strictly needed for correctness. 301 */ 302 for (; rnp != NULL; rnp = rnp->parent) { 303 if (sync_exp_work_done(s)) 304 return true; 305 306 /* Work not done, either wait here or go up. */ 307 spin_lock(&rnp->exp_lock); 308 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) { 309 310 /* Someone else doing GP, so wait for them. */ 311 spin_unlock(&rnp->exp_lock); 312 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level, 313 rnp->grplo, rnp->grphi, 314 TPS("wait")); 315 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], 316 sync_exp_work_done(s)); 317 return true; 318 } 319 WRITE_ONCE(rnp->exp_seq_rq, s); /* Followers can wait on us. */ 320 spin_unlock(&rnp->exp_lock); 321 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level, 322 rnp->grplo, rnp->grphi, TPS("nxtlvl")); 323 } 324 mutex_lock(&rcu_state.exp_mutex); 325 fastpath: 326 if (sync_exp_work_done(s)) { 327 mutex_unlock(&rcu_state.exp_mutex); 328 return true; 329 } 330 rcu_exp_gp_seq_start(); 331 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start")); 332 return false; 333 } 334 335 /* 336 * Select the CPUs within the specified rcu_node that the upcoming 337 * expedited grace period needs to wait for. 338 */ 339 static void __sync_rcu_exp_select_node_cpus(struct rcu_exp_work *rewp) 340 { 341 int cpu; 342 unsigned long flags; 343 unsigned long mask_ofl_test; 344 unsigned long mask_ofl_ipi; 345 int ret; 346 struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew); 347 348 raw_spin_lock_irqsave_rcu_node(rnp, flags); 349 350 /* Each pass checks a CPU for identity, offline, and idle. */ 351 mask_ofl_test = 0; 352 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) { 353 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 354 unsigned long mask = rdp->grpmask; 355 int snap; 356 357 if (raw_smp_processor_id() == cpu || 358 !(rnp->qsmaskinitnext & mask)) { 359 mask_ofl_test |= mask; 360 } else { 361 snap = rcu_dynticks_snap(cpu); 362 if (rcu_dynticks_in_eqs(snap)) 363 mask_ofl_test |= mask; 364 else 365 rdp->exp_dynticks_snap = snap; 366 } 367 } 368 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test; 369 370 /* 371 * Need to wait for any blocked tasks as well. Note that 372 * additional blocking tasks will also block the expedited GP 373 * until such time as the ->expmask bits are cleared. 374 */ 375 if (rcu_preempt_has_tasks(rnp)) 376 WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next); 377 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 378 379 /* IPI the remaining CPUs for expedited quiescent state. */ 380 for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) { 381 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 382 unsigned long mask = rdp->grpmask; 383 384 retry_ipi: 385 if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) { 386 mask_ofl_test |= mask; 387 continue; 388 } 389 if (get_cpu() == cpu) { 390 mask_ofl_test |= mask; 391 put_cpu(); 392 continue; 393 } 394 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0); 395 put_cpu(); 396 /* The CPU will report the QS in response to the IPI. */ 397 if (!ret) 398 continue; 399 400 /* Failed, raced with CPU hotplug operation. */ 401 raw_spin_lock_irqsave_rcu_node(rnp, flags); 402 if ((rnp->qsmaskinitnext & mask) && 403 (rnp->expmask & mask)) { 404 /* Online, so delay for a bit and try again. */ 405 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 406 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl")); 407 schedule_timeout_idle(1); 408 goto retry_ipi; 409 } 410 /* CPU really is offline, so we must report its QS. */ 411 if (rnp->expmask & mask) 412 mask_ofl_test |= mask; 413 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 414 } 415 /* Report quiescent states for those that went offline. */ 416 if (mask_ofl_test) 417 rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false); 418 } 419 420 static void rcu_exp_sel_wait_wake(unsigned long s); 421 422 #ifdef CONFIG_RCU_EXP_KTHREAD 423 static void sync_rcu_exp_select_node_cpus(struct kthread_work *wp) 424 { 425 struct rcu_exp_work *rewp = 426 container_of(wp, struct rcu_exp_work, rew_work); 427 428 __sync_rcu_exp_select_node_cpus(rewp); 429 } 430 431 static inline bool rcu_gp_par_worker_started(void) 432 { 433 return !!READ_ONCE(rcu_exp_par_gp_kworker); 434 } 435 436 static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp) 437 { 438 kthread_init_work(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); 439 /* 440 * Use rcu_exp_par_gp_kworker, because flushing a work item from 441 * another work item on the same kthread worker can result in 442 * deadlock. 443 */ 444 kthread_queue_work(rcu_exp_par_gp_kworker, &rnp->rew.rew_work); 445 } 446 447 static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp) 448 { 449 kthread_flush_work(&rnp->rew.rew_work); 450 } 451 452 /* 453 * Work-queue handler to drive an expedited grace period forward. 454 */ 455 static void wait_rcu_exp_gp(struct kthread_work *wp) 456 { 457 struct rcu_exp_work *rewp; 458 459 rewp = container_of(wp, struct rcu_exp_work, rew_work); 460 rcu_exp_sel_wait_wake(rewp->rew_s); 461 } 462 463 static inline void synchronize_rcu_expedited_queue_work(struct rcu_exp_work *rew) 464 { 465 kthread_init_work(&rew->rew_work, wait_rcu_exp_gp); 466 kthread_queue_work(rcu_exp_gp_kworker, &rew->rew_work); 467 } 468 469 static inline void synchronize_rcu_expedited_destroy_work(struct rcu_exp_work *rew) 470 { 471 } 472 #else /* !CONFIG_RCU_EXP_KTHREAD */ 473 static void sync_rcu_exp_select_node_cpus(struct work_struct *wp) 474 { 475 struct rcu_exp_work *rewp = 476 container_of(wp, struct rcu_exp_work, rew_work); 477 478 __sync_rcu_exp_select_node_cpus(rewp); 479 } 480 481 static inline bool rcu_gp_par_worker_started(void) 482 { 483 return !!READ_ONCE(rcu_par_gp_wq); 484 } 485 486 static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp) 487 { 488 int cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1); 489 490 INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); 491 /* If all offline, queue the work on an unbound CPU. */ 492 if (unlikely(cpu > rnp->grphi - rnp->grplo)) 493 cpu = WORK_CPU_UNBOUND; 494 else 495 cpu += rnp->grplo; 496 queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work); 497 } 498 499 static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp) 500 { 501 flush_work(&rnp->rew.rew_work); 502 } 503 504 /* 505 * Work-queue handler to drive an expedited grace period forward. 506 */ 507 static void wait_rcu_exp_gp(struct work_struct *wp) 508 { 509 struct rcu_exp_work *rewp; 510 511 rewp = container_of(wp, struct rcu_exp_work, rew_work); 512 rcu_exp_sel_wait_wake(rewp->rew_s); 513 } 514 515 static inline void synchronize_rcu_expedited_queue_work(struct rcu_exp_work *rew) 516 { 517 INIT_WORK_ONSTACK(&rew->rew_work, wait_rcu_exp_gp); 518 queue_work(rcu_gp_wq, &rew->rew_work); 519 } 520 521 static inline void synchronize_rcu_expedited_destroy_work(struct rcu_exp_work *rew) 522 { 523 destroy_work_on_stack(&rew->rew_work); 524 } 525 #endif /* CONFIG_RCU_EXP_KTHREAD */ 526 527 /* 528 * Select the nodes that the upcoming expedited grace period needs 529 * to wait for. 530 */ 531 static void sync_rcu_exp_select_cpus(void) 532 { 533 struct rcu_node *rnp; 534 535 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset")); 536 sync_exp_reset_tree(); 537 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select")); 538 539 /* Schedule work for each leaf rcu_node structure. */ 540 rcu_for_each_leaf_node(rnp) { 541 rnp->exp_need_flush = false; 542 if (!READ_ONCE(rnp->expmask)) 543 continue; /* Avoid early boot non-existent wq. */ 544 if (!rcu_gp_par_worker_started() || 545 rcu_scheduler_active != RCU_SCHEDULER_RUNNING || 546 rcu_is_last_leaf_node(rnp)) { 547 /* No worker started yet or last leaf, do direct call. */ 548 sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work); 549 continue; 550 } 551 sync_rcu_exp_select_cpus_queue_work(rnp); 552 rnp->exp_need_flush = true; 553 } 554 555 /* Wait for jobs (if any) to complete. */ 556 rcu_for_each_leaf_node(rnp) 557 if (rnp->exp_need_flush) 558 sync_rcu_exp_select_cpus_flush_work(rnp); 559 } 560 561 /* 562 * Wait for the expedited grace period to elapse, within time limit. 563 * If the time limit is exceeded without the grace period elapsing, 564 * return false, otherwise return true. 565 */ 566 static bool synchronize_rcu_expedited_wait_once(long tlimit) 567 { 568 int t; 569 struct rcu_node *rnp_root = rcu_get_root(); 570 571 t = swait_event_timeout_exclusive(rcu_state.expedited_wq, 572 sync_rcu_exp_done_unlocked(rnp_root), 573 tlimit); 574 // Workqueues should not be signaled. 575 if (t > 0 || sync_rcu_exp_done_unlocked(rnp_root)) 576 return true; 577 WARN_ON(t < 0); /* workqueues should not be signaled. */ 578 return false; 579 } 580 581 /* 582 * Wait for the expedited grace period to elapse, issuing any needed 583 * RCU CPU stall warnings along the way. 584 */ 585 static void synchronize_rcu_expedited_wait(void) 586 { 587 int cpu; 588 unsigned long j; 589 unsigned long jiffies_stall; 590 unsigned long jiffies_start; 591 unsigned long mask; 592 int ndetected; 593 struct rcu_data *rdp; 594 struct rcu_node *rnp; 595 struct rcu_node *rnp_root = rcu_get_root(); 596 597 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait")); 598 jiffies_stall = rcu_exp_jiffies_till_stall_check(); 599 jiffies_start = jiffies; 600 if (tick_nohz_full_enabled() && rcu_inkernel_boot_has_ended()) { 601 if (synchronize_rcu_expedited_wait_once(1)) 602 return; 603 rcu_for_each_leaf_node(rnp) { 604 mask = READ_ONCE(rnp->expmask); 605 for_each_leaf_node_cpu_mask(rnp, cpu, mask) { 606 rdp = per_cpu_ptr(&rcu_data, cpu); 607 if (rdp->rcu_forced_tick_exp) 608 continue; 609 rdp->rcu_forced_tick_exp = true; 610 preempt_disable(); 611 if (cpu_online(cpu)) 612 tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP); 613 preempt_enable(); 614 } 615 } 616 j = READ_ONCE(jiffies_till_first_fqs); 617 if (synchronize_rcu_expedited_wait_once(j + HZ)) 618 return; 619 } 620 621 for (;;) { 622 if (synchronize_rcu_expedited_wait_once(jiffies_stall)) 623 return; 624 if (rcu_stall_is_suppressed()) 625 continue; 626 trace_rcu_stall_warning(rcu_state.name, TPS("ExpeditedStall")); 627 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {", 628 rcu_state.name); 629 ndetected = 0; 630 rcu_for_each_leaf_node(rnp) { 631 ndetected += rcu_print_task_exp_stall(rnp); 632 for_each_leaf_node_possible_cpu(rnp, cpu) { 633 struct rcu_data *rdp; 634 635 mask = leaf_node_cpu_bit(rnp, cpu); 636 if (!(READ_ONCE(rnp->expmask) & mask)) 637 continue; 638 ndetected++; 639 rdp = per_cpu_ptr(&rcu_data, cpu); 640 pr_cont(" %d-%c%c%c%c", cpu, 641 "O."[!!cpu_online(cpu)], 642 "o."[!!(rdp->grpmask & rnp->expmaskinit)], 643 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)], 644 "D."[!!(rdp->cpu_no_qs.b.exp)]); 645 } 646 } 647 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n", 648 jiffies - jiffies_start, rcu_state.expedited_sequence, 649 data_race(rnp_root->expmask), 650 ".T"[!!data_race(rnp_root->exp_tasks)]); 651 if (ndetected) { 652 pr_err("blocking rcu_node structures (internal RCU debug):"); 653 rcu_for_each_node_breadth_first(rnp) { 654 if (rnp == rnp_root) 655 continue; /* printed unconditionally */ 656 if (sync_rcu_exp_done_unlocked(rnp)) 657 continue; 658 pr_cont(" l=%u:%d-%d:%#lx/%c", 659 rnp->level, rnp->grplo, rnp->grphi, 660 data_race(rnp->expmask), 661 ".T"[!!data_race(rnp->exp_tasks)]); 662 } 663 pr_cont("\n"); 664 } 665 rcu_for_each_leaf_node(rnp) { 666 for_each_leaf_node_possible_cpu(rnp, cpu) { 667 mask = leaf_node_cpu_bit(rnp, cpu); 668 if (!(READ_ONCE(rnp->expmask) & mask)) 669 continue; 670 dump_cpu_task(cpu); 671 } 672 } 673 jiffies_stall = 3 * rcu_exp_jiffies_till_stall_check() + 3; 674 panic_on_rcu_stall(); 675 } 676 } 677 678 /* 679 * Wait for the current expedited grace period to complete, and then 680 * wake up everyone who piggybacked on the just-completed expedited 681 * grace period. Also update all the ->exp_seq_rq counters as needed 682 * in order to avoid counter-wrap problems. 683 */ 684 static void rcu_exp_wait_wake(unsigned long s) 685 { 686 struct rcu_node *rnp; 687 688 synchronize_rcu_expedited_wait(); 689 690 // Switch over to wakeup mode, allowing the next GP to proceed. 691 // End the previous grace period only after acquiring the mutex 692 // to ensure that only one GP runs concurrently with wakeups. 693 mutex_lock(&rcu_state.exp_wake_mutex); 694 rcu_exp_gp_seq_end(); 695 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end")); 696 697 rcu_for_each_node_breadth_first(rnp) { 698 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) { 699 spin_lock(&rnp->exp_lock); 700 /* Recheck, avoid hang in case someone just arrived. */ 701 if (ULONG_CMP_LT(rnp->exp_seq_rq, s)) 702 WRITE_ONCE(rnp->exp_seq_rq, s); 703 spin_unlock(&rnp->exp_lock); 704 } 705 smp_mb(); /* All above changes before wakeup. */ 706 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]); 707 } 708 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake")); 709 mutex_unlock(&rcu_state.exp_wake_mutex); 710 } 711 712 /* 713 * Common code to drive an expedited grace period forward, used by 714 * workqueues and mid-boot-time tasks. 715 */ 716 static void rcu_exp_sel_wait_wake(unsigned long s) 717 { 718 /* Initialize the rcu_node tree in preparation for the wait. */ 719 sync_rcu_exp_select_cpus(); 720 721 /* Wait and clean up, including waking everyone. */ 722 rcu_exp_wait_wake(s); 723 } 724 725 #ifdef CONFIG_PREEMPT_RCU 726 727 /* 728 * Remote handler for smp_call_function_single(). If there is an 729 * RCU read-side critical section in effect, request that the 730 * next rcu_read_unlock() record the quiescent state up the 731 * ->expmask fields in the rcu_node tree. Otherwise, immediately 732 * report the quiescent state. 733 */ 734 static void rcu_exp_handler(void *unused) 735 { 736 int depth = rcu_preempt_depth(); 737 unsigned long flags; 738 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 739 struct rcu_node *rnp = rdp->mynode; 740 struct task_struct *t = current; 741 742 /* 743 * First, the common case of not being in an RCU read-side 744 * critical section. If also enabled or idle, immediately 745 * report the quiescent state, otherwise defer. 746 */ 747 if (!depth) { 748 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) || 749 rcu_is_cpu_rrupt_from_idle()) { 750 rcu_report_exp_rdp(rdp); 751 } else { 752 WRITE_ONCE(rdp->cpu_no_qs.b.exp, true); 753 set_tsk_need_resched(t); 754 set_preempt_need_resched(); 755 } 756 return; 757 } 758 759 /* 760 * Second, the less-common case of being in an RCU read-side 761 * critical section. In this case we can count on a future 762 * rcu_read_unlock(). However, this rcu_read_unlock() might 763 * execute on some other CPU, but in that case there will be 764 * a future context switch. Either way, if the expedited 765 * grace period is still waiting on this CPU, set ->deferred_qs 766 * so that the eventual quiescent state will be reported. 767 * Note that there is a large group of race conditions that 768 * can have caused this quiescent state to already have been 769 * reported, so we really do need to check ->expmask. 770 */ 771 if (depth > 0) { 772 raw_spin_lock_irqsave_rcu_node(rnp, flags); 773 if (rnp->expmask & rdp->grpmask) { 774 WRITE_ONCE(rdp->cpu_no_qs.b.exp, true); 775 t->rcu_read_unlock_special.b.exp_hint = true; 776 } 777 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 778 return; 779 } 780 781 // Finally, negative nesting depth should not happen. 782 WARN_ON_ONCE(1); 783 } 784 785 /* PREEMPTION=y, so no PREEMPTION=n expedited grace period to clean up after. */ 786 static void sync_sched_exp_online_cleanup(int cpu) 787 { 788 } 789 790 /* 791 * Scan the current list of tasks blocked within RCU read-side critical 792 * sections, printing out the tid of each that is blocking the current 793 * expedited grace period. 794 */ 795 static int rcu_print_task_exp_stall(struct rcu_node *rnp) 796 { 797 unsigned long flags; 798 int ndetected = 0; 799 struct task_struct *t; 800 801 if (!READ_ONCE(rnp->exp_tasks)) 802 return 0; 803 raw_spin_lock_irqsave_rcu_node(rnp, flags); 804 t = list_entry(rnp->exp_tasks->prev, 805 struct task_struct, rcu_node_entry); 806 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { 807 pr_cont(" P%d", t->pid); 808 ndetected++; 809 } 810 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 811 return ndetected; 812 } 813 814 #else /* #ifdef CONFIG_PREEMPT_RCU */ 815 816 /* Request an expedited quiescent state. */ 817 static void rcu_exp_need_qs(void) 818 { 819 __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true); 820 /* Store .exp before .rcu_urgent_qs. */ 821 smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true); 822 set_tsk_need_resched(current); 823 set_preempt_need_resched(); 824 } 825 826 /* Invoked on each online non-idle CPU for expedited quiescent state. */ 827 static void rcu_exp_handler(void *unused) 828 { 829 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 830 struct rcu_node *rnp = rdp->mynode; 831 832 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || 833 __this_cpu_read(rcu_data.cpu_no_qs.b.exp)) 834 return; 835 if (rcu_is_cpu_rrupt_from_idle()) { 836 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); 837 return; 838 } 839 rcu_exp_need_qs(); 840 } 841 842 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */ 843 static void sync_sched_exp_online_cleanup(int cpu) 844 { 845 unsigned long flags; 846 int my_cpu; 847 struct rcu_data *rdp; 848 int ret; 849 struct rcu_node *rnp; 850 851 rdp = per_cpu_ptr(&rcu_data, cpu); 852 rnp = rdp->mynode; 853 my_cpu = get_cpu(); 854 /* Quiescent state either not needed or already requested, leave. */ 855 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || 856 READ_ONCE(rdp->cpu_no_qs.b.exp)) { 857 put_cpu(); 858 return; 859 } 860 /* Quiescent state needed on current CPU, so set it up locally. */ 861 if (my_cpu == cpu) { 862 local_irq_save(flags); 863 rcu_exp_need_qs(); 864 local_irq_restore(flags); 865 put_cpu(); 866 return; 867 } 868 /* Quiescent state needed on some other CPU, send IPI. */ 869 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0); 870 put_cpu(); 871 WARN_ON_ONCE(ret); 872 } 873 874 /* 875 * Because preemptible RCU does not exist, we never have to check for 876 * tasks blocked within RCU read-side critical sections that are 877 * blocking the current expedited grace period. 878 */ 879 static int rcu_print_task_exp_stall(struct rcu_node *rnp) 880 { 881 return 0; 882 } 883 884 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 885 886 /** 887 * synchronize_rcu_expedited - Brute-force RCU grace period 888 * 889 * Wait for an RCU grace period, but expedite it. The basic idea is to 890 * IPI all non-idle non-nohz online CPUs. The IPI handler checks whether 891 * the CPU is in an RCU critical section, and if so, it sets a flag that 892 * causes the outermost rcu_read_unlock() to report the quiescent state 893 * for RCU-preempt or asks the scheduler for help for RCU-sched. On the 894 * other hand, if the CPU is not in an RCU read-side critical section, 895 * the IPI handler reports the quiescent state immediately. 896 * 897 * Although this is a great improvement over previous expedited 898 * implementations, it is still unfriendly to real-time workloads, so is 899 * thus not recommended for any sort of common-case code. In fact, if 900 * you are using synchronize_rcu_expedited() in a loop, please restructure 901 * your code to batch your updates, and then use a single synchronize_rcu() 902 * instead. 903 * 904 * This has the same semantics as (but is more brutal than) synchronize_rcu(). 905 */ 906 void synchronize_rcu_expedited(void) 907 { 908 bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT); 909 struct rcu_exp_work rew; 910 struct rcu_node *rnp; 911 unsigned long s; 912 913 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 914 lock_is_held(&rcu_lock_map) || 915 lock_is_held(&rcu_sched_lock_map), 916 "Illegal synchronize_rcu_expedited() in RCU read-side critical section"); 917 918 /* Is the state is such that the call is a grace period? */ 919 if (rcu_blocking_is_gp()) { 920 // Note well that this code runs with !PREEMPT && !SMP. 921 // In addition, all code that advances grace periods runs 922 // at process level. Therefore, this expedited GP overlaps 923 // with other expedited GPs only by being fully nested within 924 // them, which allows reuse of ->gp_seq_polled_exp_snap. 925 rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_exp_snap); 926 rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_exp_snap); 927 if (rcu_init_invoked()) 928 cond_resched(); 929 return; // Context allows vacuous grace periods. 930 } 931 932 /* If expedited grace periods are prohibited, fall back to normal. */ 933 if (rcu_gp_is_normal()) { 934 wait_rcu_gp(call_rcu); 935 return; 936 } 937 938 /* Take a snapshot of the sequence number. */ 939 s = rcu_exp_gp_seq_snap(); 940 if (exp_funnel_lock(s)) 941 return; /* Someone else did our work for us. */ 942 943 /* Ensure that load happens before action based on it. */ 944 if (unlikely(boottime)) { 945 /* Direct call during scheduler init and early_initcalls(). */ 946 rcu_exp_sel_wait_wake(s); 947 } else { 948 /* Marshall arguments & schedule the expedited grace period. */ 949 rew.rew_s = s; 950 synchronize_rcu_expedited_queue_work(&rew); 951 } 952 953 /* Wait for expedited grace period to complete. */ 954 rnp = rcu_get_root(); 955 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], 956 sync_exp_work_done(s)); 957 smp_mb(); /* Work actions happen before return. */ 958 959 /* Let the next expedited grace period start. */ 960 mutex_unlock(&rcu_state.exp_mutex); 961 962 if (likely(!boottime)) 963 synchronize_rcu_expedited_destroy_work(&rew); 964 } 965 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 966 967 /* 968 * Ensure that start_poll_synchronize_rcu_expedited() has the expedited 969 * RCU grace periods that it needs. 970 */ 971 static void sync_rcu_do_polled_gp(struct work_struct *wp) 972 { 973 unsigned long flags; 974 int i = 0; 975 struct rcu_node *rnp = container_of(wp, struct rcu_node, exp_poll_wq); 976 unsigned long s; 977 978 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags); 979 s = rnp->exp_seq_poll_rq; 980 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED; 981 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags); 982 if (s == RCU_GET_STATE_COMPLETED) 983 return; 984 while (!poll_state_synchronize_rcu(s)) { 985 synchronize_rcu_expedited(); 986 if (i == 10 || i == 20) 987 pr_info("%s: i = %d s = %lx gp_seq_polled = %lx\n", __func__, i, s, READ_ONCE(rcu_state.gp_seq_polled)); 988 i++; 989 } 990 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags); 991 s = rnp->exp_seq_poll_rq; 992 if (poll_state_synchronize_rcu(s)) 993 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED; 994 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags); 995 } 996 997 /** 998 * start_poll_synchronize_rcu_expedited - Snapshot current RCU state and start expedited grace period 999 * 1000 * Returns a cookie to pass to a call to cond_synchronize_rcu(), 1001 * cond_synchronize_rcu_expedited(), or poll_state_synchronize_rcu(), 1002 * allowing them to determine whether or not any sort of grace period has 1003 * elapsed in the meantime. If the needed expedited grace period is not 1004 * already slated to start, initiates that grace period. 1005 */ 1006 unsigned long start_poll_synchronize_rcu_expedited(void) 1007 { 1008 unsigned long flags; 1009 struct rcu_data *rdp; 1010 struct rcu_node *rnp; 1011 unsigned long s; 1012 1013 s = get_state_synchronize_rcu(); 1014 rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id()); 1015 rnp = rdp->mynode; 1016 if (rcu_init_invoked()) 1017 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags); 1018 if (!poll_state_synchronize_rcu(s)) { 1019 rnp->exp_seq_poll_rq = s; 1020 if (rcu_init_invoked()) 1021 queue_work(rcu_gp_wq, &rnp->exp_poll_wq); 1022 } 1023 if (rcu_init_invoked()) 1024 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags); 1025 1026 return s; 1027 } 1028 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_expedited); 1029 1030 /** 1031 * cond_synchronize_rcu_expedited - Conditionally wait for an expedited RCU grace period 1032 * 1033 * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited() 1034 * 1035 * If any type of full RCU grace period has elapsed since the earlier 1036 * call to get_state_synchronize_rcu(), start_poll_synchronize_rcu(), 1037 * or start_poll_synchronize_rcu_expedited(), just return. Otherwise, 1038 * invoke synchronize_rcu_expedited() to wait for a full grace period. 1039 * 1040 * Yes, this function does not take counter wrap into account. 1041 * But counter wrap is harmless. If the counter wraps, we have waited for 1042 * more than 2 billion grace periods (and way more on a 64-bit system!), 1043 * so waiting for a couple of additional grace periods should be just fine. 1044 * 1045 * This function provides the same memory-ordering guarantees that 1046 * would be provided by a synchronize_rcu() that was invoked at the call 1047 * to the function that provided @oldstate and that returned at the end 1048 * of this function. 1049 */ 1050 void cond_synchronize_rcu_expedited(unsigned long oldstate) 1051 { 1052 if (!poll_state_synchronize_rcu(oldstate)) 1053 synchronize_rcu_expedited(); 1054 } 1055 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_expedited); 1056