1 /* 2 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 3 * Internal non-public definitions that provide either classic 4 * or preemptible semantics. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, you can access it online at 18 * http://www.gnu.org/licenses/gpl-2.0.html. 19 * 20 * Copyright Red Hat, 2009 21 * Copyright IBM Corporation, 2009 22 * 23 * Author: Ingo Molnar <mingo@elte.hu> 24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> 25 */ 26 27 #include <linux/delay.h> 28 #include <linux/gfp.h> 29 #include <linux/oom.h> 30 #include <linux/sched/debug.h> 31 #include <linux/smpboot.h> 32 #include <linux/sched/isolation.h> 33 #include <uapi/linux/sched/types.h> 34 #include "../time/tick-internal.h" 35 36 #ifdef CONFIG_RCU_BOOST 37 38 #include "../locking/rtmutex_common.h" 39 40 /* 41 * Control variables for per-CPU and per-rcu_node kthreads. 42 */ 43 static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); 44 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); 45 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); 46 DEFINE_PER_CPU(char, rcu_cpu_has_work); 47 48 #else /* #ifdef CONFIG_RCU_BOOST */ 49 50 /* 51 * Some architectures do not define rt_mutexes, but if !CONFIG_RCU_BOOST, 52 * all uses are in dead code. Provide a definition to keep the compiler 53 * happy, but add WARN_ON_ONCE() to complain if used in the wrong place. 54 * This probably needs to be excluded from -rt builds. 55 */ 56 #define rt_mutex_owner(a) ({ WARN_ON_ONCE(1); NULL; }) 57 #define rt_mutex_futex_unlock(x) WARN_ON_ONCE(1) 58 59 #endif /* #else #ifdef CONFIG_RCU_BOOST */ 60 61 #ifdef CONFIG_RCU_NOCB_CPU 62 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ 63 static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */ 64 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 65 66 /* 67 * Check the RCU kernel configuration parameters and print informative 68 * messages about anything out of the ordinary. 69 */ 70 static void __init rcu_bootup_announce_oddness(void) 71 { 72 if (IS_ENABLED(CONFIG_RCU_TRACE)) 73 pr_info("\tRCU event tracing is enabled.\n"); 74 if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) || 75 (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32)) 76 pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.\n", 77 RCU_FANOUT); 78 if (rcu_fanout_exact) 79 pr_info("\tHierarchical RCU autobalancing is disabled.\n"); 80 if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ)) 81 pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n"); 82 if (IS_ENABLED(CONFIG_PROVE_RCU)) 83 pr_info("\tRCU lockdep checking is enabled.\n"); 84 if (RCU_NUM_LVLS >= 4) 85 pr_info("\tFour(or more)-level hierarchy is enabled.\n"); 86 if (RCU_FANOUT_LEAF != 16) 87 pr_info("\tBuild-time adjustment of leaf fanout to %d.\n", 88 RCU_FANOUT_LEAF); 89 if (rcu_fanout_leaf != RCU_FANOUT_LEAF) 90 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", 91 rcu_fanout_leaf); 92 if (nr_cpu_ids != NR_CPUS) 93 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids); 94 #ifdef CONFIG_RCU_BOOST 95 pr_info("\tRCU priority boosting: priority %d delay %d ms.\n", 96 kthread_prio, CONFIG_RCU_BOOST_DELAY); 97 #endif 98 if (blimit != DEFAULT_RCU_BLIMIT) 99 pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit); 100 if (qhimark != DEFAULT_RCU_QHIMARK) 101 pr_info("\tBoot-time adjustment of callback high-water mark to %ld.\n", qhimark); 102 if (qlowmark != DEFAULT_RCU_QLOMARK) 103 pr_info("\tBoot-time adjustment of callback low-water mark to %ld.\n", qlowmark); 104 if (jiffies_till_first_fqs != ULONG_MAX) 105 pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.\n", jiffies_till_first_fqs); 106 if (jiffies_till_next_fqs != ULONG_MAX) 107 pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.\n", jiffies_till_next_fqs); 108 if (jiffies_till_sched_qs != ULONG_MAX) 109 pr_info("\tBoot-time adjustment of scheduler-enlistment delay to %ld jiffies.\n", jiffies_till_sched_qs); 110 if (rcu_kick_kthreads) 111 pr_info("\tKick kthreads if too-long grace period.\n"); 112 if (IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD)) 113 pr_info("\tRCU callback double-/use-after-free debug enabled.\n"); 114 if (gp_preinit_delay) 115 pr_info("\tRCU debug GP pre-init slowdown %d jiffies.\n", gp_preinit_delay); 116 if (gp_init_delay) 117 pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_init_delay); 118 if (gp_cleanup_delay) 119 pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_cleanup_delay); 120 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG)) 121 pr_info("\tRCU debug extended QS entry/exit.\n"); 122 rcupdate_announce_bootup_oddness(); 123 } 124 125 #ifdef CONFIG_PREEMPT_RCU 126 127 static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake); 128 static void rcu_read_unlock_special(struct task_struct *t); 129 130 /* 131 * Tell them what RCU they are running. 132 */ 133 static void __init rcu_bootup_announce(void) 134 { 135 pr_info("Preemptible hierarchical RCU implementation.\n"); 136 rcu_bootup_announce_oddness(); 137 } 138 139 /* Flags for rcu_preempt_ctxt_queue() decision table. */ 140 #define RCU_GP_TASKS 0x8 141 #define RCU_EXP_TASKS 0x4 142 #define RCU_GP_BLKD 0x2 143 #define RCU_EXP_BLKD 0x1 144 145 /* 146 * Queues a task preempted within an RCU-preempt read-side critical 147 * section into the appropriate location within the ->blkd_tasks list, 148 * depending on the states of any ongoing normal and expedited grace 149 * periods. The ->gp_tasks pointer indicates which element the normal 150 * grace period is waiting on (NULL if none), and the ->exp_tasks pointer 151 * indicates which element the expedited grace period is waiting on (again, 152 * NULL if none). If a grace period is waiting on a given element in the 153 * ->blkd_tasks list, it also waits on all subsequent elements. Thus, 154 * adding a task to the tail of the list blocks any grace period that is 155 * already waiting on one of the elements. In contrast, adding a task 156 * to the head of the list won't block any grace period that is already 157 * waiting on one of the elements. 158 * 159 * This queuing is imprecise, and can sometimes make an ongoing grace 160 * period wait for a task that is not strictly speaking blocking it. 161 * Given the choice, we needlessly block a normal grace period rather than 162 * blocking an expedited grace period. 163 * 164 * Note that an endless sequence of expedited grace periods still cannot 165 * indefinitely postpone a normal grace period. Eventually, all of the 166 * fixed number of preempted tasks blocking the normal grace period that are 167 * not also blocking the expedited grace period will resume and complete 168 * their RCU read-side critical sections. At that point, the ->gp_tasks 169 * pointer will equal the ->exp_tasks pointer, at which point the end of 170 * the corresponding expedited grace period will also be the end of the 171 * normal grace period. 172 */ 173 static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) 174 __releases(rnp->lock) /* But leaves rrupts disabled. */ 175 { 176 int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) + 177 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) + 178 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) + 179 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0); 180 struct task_struct *t = current; 181 182 raw_lockdep_assert_held_rcu_node(rnp); 183 WARN_ON_ONCE(rdp->mynode != rnp); 184 WARN_ON_ONCE(!rcu_is_leaf_node(rnp)); 185 /* RCU better not be waiting on newly onlined CPUs! */ 186 WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask & 187 rdp->grpmask); 188 189 /* 190 * Decide where to queue the newly blocked task. In theory, 191 * this could be an if-statement. In practice, when I tried 192 * that, it was quite messy. 193 */ 194 switch (blkd_state) { 195 case 0: 196 case RCU_EXP_TASKS: 197 case RCU_EXP_TASKS + RCU_GP_BLKD: 198 case RCU_GP_TASKS: 199 case RCU_GP_TASKS + RCU_EXP_TASKS: 200 201 /* 202 * Blocking neither GP, or first task blocking the normal 203 * GP but not blocking the already-waiting expedited GP. 204 * Queue at the head of the list to avoid unnecessarily 205 * blocking the already-waiting GPs. 206 */ 207 list_add(&t->rcu_node_entry, &rnp->blkd_tasks); 208 break; 209 210 case RCU_EXP_BLKD: 211 case RCU_GP_BLKD: 212 case RCU_GP_BLKD + RCU_EXP_BLKD: 213 case RCU_GP_TASKS + RCU_EXP_BLKD: 214 case RCU_GP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD: 215 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD: 216 217 /* 218 * First task arriving that blocks either GP, or first task 219 * arriving that blocks the expedited GP (with the normal 220 * GP already waiting), or a task arriving that blocks 221 * both GPs with both GPs already waiting. Queue at the 222 * tail of the list to avoid any GP waiting on any of the 223 * already queued tasks that are not blocking it. 224 */ 225 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks); 226 break; 227 228 case RCU_EXP_TASKS + RCU_EXP_BLKD: 229 case RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD: 230 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_EXP_BLKD: 231 232 /* 233 * Second or subsequent task blocking the expedited GP. 234 * The task either does not block the normal GP, or is the 235 * first task blocking the normal GP. Queue just after 236 * the first task blocking the expedited GP. 237 */ 238 list_add(&t->rcu_node_entry, rnp->exp_tasks); 239 break; 240 241 case RCU_GP_TASKS + RCU_GP_BLKD: 242 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD: 243 244 /* 245 * Second or subsequent task blocking the normal GP. 246 * The task does not block the expedited GP. Queue just 247 * after the first task blocking the normal GP. 248 */ 249 list_add(&t->rcu_node_entry, rnp->gp_tasks); 250 break; 251 252 default: 253 254 /* Yet another exercise in excessive paranoia. */ 255 WARN_ON_ONCE(1); 256 break; 257 } 258 259 /* 260 * We have now queued the task. If it was the first one to 261 * block either grace period, update the ->gp_tasks and/or 262 * ->exp_tasks pointers, respectively, to reference the newly 263 * blocked tasks. 264 */ 265 if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) { 266 rnp->gp_tasks = &t->rcu_node_entry; 267 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq); 268 } 269 if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD)) 270 rnp->exp_tasks = &t->rcu_node_entry; 271 WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) != 272 !(rnp->qsmask & rdp->grpmask)); 273 WARN_ON_ONCE(!(blkd_state & RCU_EXP_BLKD) != 274 !(rnp->expmask & rdp->grpmask)); 275 raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */ 276 277 /* 278 * Report the quiescent state for the expedited GP. This expedited 279 * GP should not be able to end until we report, so there should be 280 * no need to check for a subsequent expedited GP. (Though we are 281 * still in a quiescent state in any case.) 282 */ 283 if (blkd_state & RCU_EXP_BLKD && rdp->deferred_qs) 284 rcu_report_exp_rdp(rdp); 285 else 286 WARN_ON_ONCE(rdp->deferred_qs); 287 } 288 289 /* 290 * Record a preemptible-RCU quiescent state for the specified CPU. 291 * Note that this does not necessarily mean that the task currently running 292 * on the CPU is in a quiescent state: Instead, it means that the current 293 * grace period need not wait on any RCU read-side critical section that 294 * starts later on this CPU. It also means that if the current task is 295 * in an RCU read-side critical section, it has already added itself to 296 * some leaf rcu_node structure's ->blkd_tasks list. In addition to the 297 * current task, there might be any number of other tasks blocked while 298 * in an RCU read-side critical section. 299 * 300 * Callers to this function must disable preemption. 301 */ 302 static void rcu_qs(void) 303 { 304 RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!\n"); 305 if (__this_cpu_read(rcu_data.cpu_no_qs.s)) { 306 trace_rcu_grace_period(TPS("rcu_preempt"), 307 __this_cpu_read(rcu_data.gp_seq), 308 TPS("cpuqs")); 309 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false); 310 barrier(); /* Coordinate with rcu_flavor_check_callbacks(). */ 311 current->rcu_read_unlock_special.b.need_qs = false; 312 } 313 } 314 315 /* 316 * We have entered the scheduler, and the current task might soon be 317 * context-switched away from. If this task is in an RCU read-side 318 * critical section, we will no longer be able to rely on the CPU to 319 * record that fact, so we enqueue the task on the blkd_tasks list. 320 * The task will dequeue itself when it exits the outermost enclosing 321 * RCU read-side critical section. Therefore, the current grace period 322 * cannot be permitted to complete until the blkd_tasks list entries 323 * predating the current grace period drain, in other words, until 324 * rnp->gp_tasks becomes NULL. 325 * 326 * Caller must disable interrupts. 327 */ 328 void rcu_note_context_switch(bool preempt) 329 { 330 struct task_struct *t = current; 331 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 332 struct rcu_node *rnp; 333 334 barrier(); /* Avoid RCU read-side critical sections leaking down. */ 335 trace_rcu_utilization(TPS("Start context switch")); 336 lockdep_assert_irqs_disabled(); 337 WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0); 338 if (t->rcu_read_lock_nesting > 0 && 339 !t->rcu_read_unlock_special.b.blocked) { 340 341 /* Possibly blocking in an RCU read-side critical section. */ 342 rnp = rdp->mynode; 343 raw_spin_lock_rcu_node(rnp); 344 t->rcu_read_unlock_special.b.blocked = true; 345 t->rcu_blocked_node = rnp; 346 347 /* 348 * Verify the CPU's sanity, trace the preemption, and 349 * then queue the task as required based on the states 350 * of any ongoing and expedited grace periods. 351 */ 352 WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0); 353 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); 354 trace_rcu_preempt_task(rcu_state.name, 355 t->pid, 356 (rnp->qsmask & rdp->grpmask) 357 ? rnp->gp_seq 358 : rcu_seq_snap(&rnp->gp_seq)); 359 rcu_preempt_ctxt_queue(rnp, rdp); 360 } else if (t->rcu_read_lock_nesting < 0 && 361 t->rcu_read_unlock_special.s) { 362 363 /* 364 * Complete exit from RCU read-side critical section on 365 * behalf of preempted instance of __rcu_read_unlock(). 366 */ 367 rcu_read_unlock_special(t); 368 rcu_preempt_deferred_qs(t); 369 } else { 370 rcu_preempt_deferred_qs(t); 371 } 372 373 /* 374 * Either we were not in an RCU read-side critical section to 375 * begin with, or we have now recorded that critical section 376 * globally. Either way, we can now note a quiescent state 377 * for this CPU. Again, if we were in an RCU read-side critical 378 * section, and if that critical section was blocking the current 379 * grace period, then the fact that the task has been enqueued 380 * means that we continue to block the current grace period. 381 */ 382 rcu_qs(); 383 if (rdp->deferred_qs) 384 rcu_report_exp_rdp(rdp); 385 trace_rcu_utilization(TPS("End context switch")); 386 barrier(); /* Avoid RCU read-side critical sections leaking up. */ 387 } 388 EXPORT_SYMBOL_GPL(rcu_note_context_switch); 389 390 /* 391 * Check for preempted RCU readers blocking the current grace period 392 * for the specified rcu_node structure. If the caller needs a reliable 393 * answer, it must hold the rcu_node's ->lock. 394 */ 395 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) 396 { 397 return rnp->gp_tasks != NULL; 398 } 399 400 /* 401 * Preemptible RCU implementation for rcu_read_lock(). 402 * Just increment ->rcu_read_lock_nesting, shared state will be updated 403 * if we block. 404 */ 405 void __rcu_read_lock(void) 406 { 407 current->rcu_read_lock_nesting++; 408 barrier(); /* critical section after entry code. */ 409 } 410 EXPORT_SYMBOL_GPL(__rcu_read_lock); 411 412 /* 413 * Preemptible RCU implementation for rcu_read_unlock(). 414 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost 415 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then 416 * invoke rcu_read_unlock_special() to clean up after a context switch 417 * in an RCU read-side critical section and other special cases. 418 */ 419 void __rcu_read_unlock(void) 420 { 421 struct task_struct *t = current; 422 423 if (t->rcu_read_lock_nesting != 1) { 424 --t->rcu_read_lock_nesting; 425 } else { 426 barrier(); /* critical section before exit code. */ 427 t->rcu_read_lock_nesting = INT_MIN; 428 barrier(); /* assign before ->rcu_read_unlock_special load */ 429 if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s))) 430 rcu_read_unlock_special(t); 431 barrier(); /* ->rcu_read_unlock_special load before assign */ 432 t->rcu_read_lock_nesting = 0; 433 } 434 #ifdef CONFIG_PROVE_LOCKING 435 { 436 int rrln = READ_ONCE(t->rcu_read_lock_nesting); 437 438 WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); 439 } 440 #endif /* #ifdef CONFIG_PROVE_LOCKING */ 441 } 442 EXPORT_SYMBOL_GPL(__rcu_read_unlock); 443 444 /* 445 * Advance a ->blkd_tasks-list pointer to the next entry, instead 446 * returning NULL if at the end of the list. 447 */ 448 static struct list_head *rcu_next_node_entry(struct task_struct *t, 449 struct rcu_node *rnp) 450 { 451 struct list_head *np; 452 453 np = t->rcu_node_entry.next; 454 if (np == &rnp->blkd_tasks) 455 np = NULL; 456 return np; 457 } 458 459 /* 460 * Return true if the specified rcu_node structure has tasks that were 461 * preempted within an RCU read-side critical section. 462 */ 463 static bool rcu_preempt_has_tasks(struct rcu_node *rnp) 464 { 465 return !list_empty(&rnp->blkd_tasks); 466 } 467 468 /* 469 * Report deferred quiescent states. The deferral time can 470 * be quite short, for example, in the case of the call from 471 * rcu_read_unlock_special(). 472 */ 473 static void 474 rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) 475 { 476 bool empty_exp; 477 bool empty_norm; 478 bool empty_exp_now; 479 struct list_head *np; 480 bool drop_boost_mutex = false; 481 struct rcu_data *rdp; 482 struct rcu_node *rnp; 483 union rcu_special special; 484 485 /* 486 * If RCU core is waiting for this CPU to exit its critical section, 487 * report the fact that it has exited. Because irqs are disabled, 488 * t->rcu_read_unlock_special cannot change. 489 */ 490 special = t->rcu_read_unlock_special; 491 rdp = this_cpu_ptr(&rcu_data); 492 if (!special.s && !rdp->deferred_qs) { 493 local_irq_restore(flags); 494 return; 495 } 496 if (special.b.need_qs) { 497 rcu_qs(); 498 t->rcu_read_unlock_special.b.need_qs = false; 499 if (!t->rcu_read_unlock_special.s && !rdp->deferred_qs) { 500 local_irq_restore(flags); 501 return; 502 } 503 } 504 505 /* 506 * Respond to a request by an expedited grace period for a 507 * quiescent state from this CPU. Note that requests from 508 * tasks are handled when removing the task from the 509 * blocked-tasks list below. 510 */ 511 if (rdp->deferred_qs) { 512 rcu_report_exp_rdp(rdp); 513 if (!t->rcu_read_unlock_special.s) { 514 local_irq_restore(flags); 515 return; 516 } 517 } 518 519 /* Clean up if blocked during RCU read-side critical section. */ 520 if (special.b.blocked) { 521 t->rcu_read_unlock_special.b.blocked = false; 522 523 /* 524 * Remove this task from the list it blocked on. The task 525 * now remains queued on the rcu_node corresponding to the 526 * CPU it first blocked on, so there is no longer any need 527 * to loop. Retain a WARN_ON_ONCE() out of sheer paranoia. 528 */ 529 rnp = t->rcu_blocked_node; 530 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 531 WARN_ON_ONCE(rnp != t->rcu_blocked_node); 532 WARN_ON_ONCE(!rcu_is_leaf_node(rnp)); 533 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); 534 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq && 535 (!empty_norm || rnp->qsmask)); 536 empty_exp = sync_rcu_preempt_exp_done(rnp); 537 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ 538 np = rcu_next_node_entry(t, rnp); 539 list_del_init(&t->rcu_node_entry); 540 t->rcu_blocked_node = NULL; 541 trace_rcu_unlock_preempted_task(TPS("rcu_preempt"), 542 rnp->gp_seq, t->pid); 543 if (&t->rcu_node_entry == rnp->gp_tasks) 544 rnp->gp_tasks = np; 545 if (&t->rcu_node_entry == rnp->exp_tasks) 546 rnp->exp_tasks = np; 547 if (IS_ENABLED(CONFIG_RCU_BOOST)) { 548 /* Snapshot ->boost_mtx ownership w/rnp->lock held. */ 549 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t; 550 if (&t->rcu_node_entry == rnp->boost_tasks) 551 rnp->boost_tasks = np; 552 } 553 554 /* 555 * If this was the last task on the current list, and if 556 * we aren't waiting on any CPUs, report the quiescent state. 557 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, 558 * so we must take a snapshot of the expedited state. 559 */ 560 empty_exp_now = sync_rcu_preempt_exp_done(rnp); 561 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) { 562 trace_rcu_quiescent_state_report(TPS("preempt_rcu"), 563 rnp->gp_seq, 564 0, rnp->qsmask, 565 rnp->level, 566 rnp->grplo, 567 rnp->grphi, 568 !!rnp->gp_tasks); 569 rcu_report_unblock_qs_rnp(rnp, flags); 570 } else { 571 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 572 } 573 574 /* Unboost if we were boosted. */ 575 if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex) 576 rt_mutex_futex_unlock(&rnp->boost_mtx); 577 578 /* 579 * If this was the last task on the expedited lists, 580 * then we need to report up the rcu_node hierarchy. 581 */ 582 if (!empty_exp && empty_exp_now) 583 rcu_report_exp_rnp(rnp, true); 584 } else { 585 local_irq_restore(flags); 586 } 587 } 588 589 /* 590 * Is a deferred quiescent-state pending, and are we also not in 591 * an RCU read-side critical section? It is the caller's responsibility 592 * to ensure it is otherwise safe to report any deferred quiescent 593 * states. The reason for this is that it is safe to report a 594 * quiescent state during context switch even though preemption 595 * is disabled. This function cannot be expected to understand these 596 * nuances, so the caller must handle them. 597 */ 598 static bool rcu_preempt_need_deferred_qs(struct task_struct *t) 599 { 600 return (this_cpu_ptr(&rcu_data)->deferred_qs || 601 READ_ONCE(t->rcu_read_unlock_special.s)) && 602 t->rcu_read_lock_nesting <= 0; 603 } 604 605 /* 606 * Report a deferred quiescent state if needed and safe to do so. 607 * As with rcu_preempt_need_deferred_qs(), "safe" involves only 608 * not being in an RCU read-side critical section. The caller must 609 * evaluate safety in terms of interrupt, softirq, and preemption 610 * disabling. 611 */ 612 static void rcu_preempt_deferred_qs(struct task_struct *t) 613 { 614 unsigned long flags; 615 bool couldrecurse = t->rcu_read_lock_nesting >= 0; 616 617 if (!rcu_preempt_need_deferred_qs(t)) 618 return; 619 if (couldrecurse) 620 t->rcu_read_lock_nesting -= INT_MIN; 621 local_irq_save(flags); 622 rcu_preempt_deferred_qs_irqrestore(t, flags); 623 if (couldrecurse) 624 t->rcu_read_lock_nesting += INT_MIN; 625 } 626 627 /* 628 * Handle special cases during rcu_read_unlock(), such as needing to 629 * notify RCU core processing or task having blocked during the RCU 630 * read-side critical section. 631 */ 632 static void rcu_read_unlock_special(struct task_struct *t) 633 { 634 unsigned long flags; 635 bool preempt_bh_were_disabled = 636 !!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)); 637 bool irqs_were_disabled; 638 639 /* NMI handlers cannot block and cannot safely manipulate state. */ 640 if (in_nmi()) 641 return; 642 643 local_irq_save(flags); 644 irqs_were_disabled = irqs_disabled_flags(flags); 645 if ((preempt_bh_were_disabled || irqs_were_disabled) && 646 t->rcu_read_unlock_special.b.blocked) { 647 /* Need to defer quiescent state until everything is enabled. */ 648 raise_softirq_irqoff(RCU_SOFTIRQ); 649 local_irq_restore(flags); 650 return; 651 } 652 rcu_preempt_deferred_qs_irqrestore(t, flags); 653 } 654 655 /* 656 * Dump detailed information for all tasks blocking the current RCU 657 * grace period on the specified rcu_node structure. 658 */ 659 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) 660 { 661 unsigned long flags; 662 struct task_struct *t; 663 664 raw_spin_lock_irqsave_rcu_node(rnp, flags); 665 if (!rcu_preempt_blocked_readers_cgp(rnp)) { 666 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 667 return; 668 } 669 t = list_entry(rnp->gp_tasks->prev, 670 struct task_struct, rcu_node_entry); 671 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { 672 /* 673 * We could be printing a lot while holding a spinlock. 674 * Avoid triggering hard lockup. 675 */ 676 touch_nmi_watchdog(); 677 sched_show_task(t); 678 } 679 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 680 } 681 682 /* 683 * Dump detailed information for all tasks blocking the current RCU 684 * grace period. 685 */ 686 static void rcu_print_detail_task_stall(void) 687 { 688 struct rcu_node *rnp = rcu_get_root(); 689 690 rcu_print_detail_task_stall_rnp(rnp); 691 rcu_for_each_leaf_node(rnp) 692 rcu_print_detail_task_stall_rnp(rnp); 693 } 694 695 static void rcu_print_task_stall_begin(struct rcu_node *rnp) 696 { 697 pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):", 698 rnp->level, rnp->grplo, rnp->grphi); 699 } 700 701 static void rcu_print_task_stall_end(void) 702 { 703 pr_cont("\n"); 704 } 705 706 /* 707 * Scan the current list of tasks blocked within RCU read-side critical 708 * sections, printing out the tid of each. 709 */ 710 static int rcu_print_task_stall(struct rcu_node *rnp) 711 { 712 struct task_struct *t; 713 int ndetected = 0; 714 715 if (!rcu_preempt_blocked_readers_cgp(rnp)) 716 return 0; 717 rcu_print_task_stall_begin(rnp); 718 t = list_entry(rnp->gp_tasks->prev, 719 struct task_struct, rcu_node_entry); 720 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { 721 pr_cont(" P%d", t->pid); 722 ndetected++; 723 } 724 rcu_print_task_stall_end(); 725 return ndetected; 726 } 727 728 /* 729 * Scan the current list of tasks blocked within RCU read-side critical 730 * sections, printing out the tid of each that is blocking the current 731 * expedited grace period. 732 */ 733 static int rcu_print_task_exp_stall(struct rcu_node *rnp) 734 { 735 struct task_struct *t; 736 int ndetected = 0; 737 738 if (!rnp->exp_tasks) 739 return 0; 740 t = list_entry(rnp->exp_tasks->prev, 741 struct task_struct, rcu_node_entry); 742 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { 743 pr_cont(" P%d", t->pid); 744 ndetected++; 745 } 746 return ndetected; 747 } 748 749 /* 750 * Check that the list of blocked tasks for the newly completed grace 751 * period is in fact empty. It is a serious bug to complete a grace 752 * period that still has RCU readers blocked! This function must be 753 * invoked -before- updating this rnp's ->gp_seq, and the rnp's ->lock 754 * must be held by the caller. 755 * 756 * Also, if there are blocked tasks on the list, they automatically 757 * block the newly created grace period, so set up ->gp_tasks accordingly. 758 */ 759 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 760 { 761 struct task_struct *t; 762 763 RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n"); 764 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) 765 dump_blkd_tasks(rnp, 10); 766 if (rcu_preempt_has_tasks(rnp) && 767 (rnp->qsmaskinit || rnp->wait_blkd_tasks)) { 768 rnp->gp_tasks = rnp->blkd_tasks.next; 769 t = container_of(rnp->gp_tasks, struct task_struct, 770 rcu_node_entry); 771 trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"), 772 rnp->gp_seq, t->pid); 773 } 774 WARN_ON_ONCE(rnp->qsmask); 775 } 776 777 /* 778 * Check for a quiescent state from the current CPU. When a task blocks, 779 * the task is recorded in the corresponding CPU's rcu_node structure, 780 * which is checked elsewhere. 781 * 782 * Caller must disable hard irqs. 783 */ 784 static void rcu_flavor_check_callbacks(int user) 785 { 786 struct task_struct *t = current; 787 788 if (user || rcu_is_cpu_rrupt_from_idle()) { 789 rcu_note_voluntary_context_switch(current); 790 } 791 if (t->rcu_read_lock_nesting > 0 || 792 (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) { 793 /* No QS, force context switch if deferred. */ 794 if (rcu_preempt_need_deferred_qs(t)) { 795 set_tsk_need_resched(t); 796 set_preempt_need_resched(); 797 } 798 } else if (rcu_preempt_need_deferred_qs(t)) { 799 rcu_preempt_deferred_qs(t); /* Report deferred QS. */ 800 return; 801 } else if (!t->rcu_read_lock_nesting) { 802 rcu_qs(); /* Report immediate QS. */ 803 return; 804 } 805 806 /* If GP is oldish, ask for help from rcu_read_unlock_special(). */ 807 if (t->rcu_read_lock_nesting > 0 && 808 __this_cpu_read(rcu_data.core_needs_qs) && 809 __this_cpu_read(rcu_data.cpu_no_qs.b.norm) && 810 !t->rcu_read_unlock_special.b.need_qs && 811 time_after(jiffies, rcu_state.gp_start + HZ)) 812 t->rcu_read_unlock_special.b.need_qs = true; 813 } 814 815 /** 816 * synchronize_rcu - wait until a grace period has elapsed. 817 * 818 * Control will return to the caller some time after a full grace 819 * period has elapsed, in other words after all currently executing RCU 820 * read-side critical sections have completed. Note, however, that 821 * upon return from synchronize_rcu(), the caller might well be executing 822 * concurrently with new RCU read-side critical sections that began while 823 * synchronize_rcu() was waiting. RCU read-side critical sections are 824 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested. 825 * In addition, regions of code across which interrupts, preemption, or 826 * softirqs have been disabled also serve as RCU read-side critical 827 * sections. This includes hardware interrupt handlers, softirq handlers, 828 * and NMI handlers. 829 * 830 * Note that this guarantee implies further memory-ordering guarantees. 831 * On systems with more than one CPU, when synchronize_rcu() returns, 832 * each CPU is guaranteed to have executed a full memory barrier since 833 * the end of its last RCU read-side critical section whose beginning 834 * preceded the call to synchronize_rcu(). In addition, each CPU having 835 * an RCU read-side critical section that extends beyond the return from 836 * synchronize_rcu() is guaranteed to have executed a full memory barrier 837 * after the beginning of synchronize_rcu() and before the beginning of 838 * that RCU read-side critical section. Note that these guarantees include 839 * CPUs that are offline, idle, or executing in user mode, as well as CPUs 840 * that are executing in the kernel. 841 * 842 * Furthermore, if CPU A invoked synchronize_rcu(), which returned 843 * to its caller on CPU B, then both CPU A and CPU B are guaranteed 844 * to have executed a full memory barrier during the execution of 845 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but 846 * again only if the system has more than one CPU). 847 */ 848 void synchronize_rcu(void) 849 { 850 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 851 lock_is_held(&rcu_lock_map) || 852 lock_is_held(&rcu_sched_lock_map), 853 "Illegal synchronize_rcu() in RCU read-side critical section"); 854 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) 855 return; 856 if (rcu_gp_is_expedited()) 857 synchronize_rcu_expedited(); 858 else 859 wait_rcu_gp(call_rcu); 860 } 861 EXPORT_SYMBOL_GPL(synchronize_rcu); 862 863 /* 864 * Check for a task exiting while in a preemptible-RCU read-side 865 * critical section, clean up if so. No need to issue warnings, 866 * as debug_check_no_locks_held() already does this if lockdep 867 * is enabled. 868 */ 869 void exit_rcu(void) 870 { 871 struct task_struct *t = current; 872 873 if (likely(list_empty(¤t->rcu_node_entry))) 874 return; 875 t->rcu_read_lock_nesting = 1; 876 barrier(); 877 t->rcu_read_unlock_special.b.blocked = true; 878 __rcu_read_unlock(); 879 rcu_preempt_deferred_qs(current); 880 } 881 882 /* 883 * Dump the blocked-tasks state, but limit the list dump to the 884 * specified number of elements. 885 */ 886 static void 887 dump_blkd_tasks(struct rcu_node *rnp, int ncheck) 888 { 889 int cpu; 890 int i; 891 struct list_head *lhp; 892 bool onl; 893 struct rcu_data *rdp; 894 struct rcu_node *rnp1; 895 896 raw_lockdep_assert_held_rcu_node(rnp); 897 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n", 898 __func__, rnp->grplo, rnp->grphi, rnp->level, 899 (long)rnp->gp_seq, (long)rnp->completedqs); 900 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) 901 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n", 902 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext); 903 pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p\n", 904 __func__, rnp->gp_tasks, rnp->boost_tasks, rnp->exp_tasks); 905 pr_info("%s: ->blkd_tasks", __func__); 906 i = 0; 907 list_for_each(lhp, &rnp->blkd_tasks) { 908 pr_cont(" %p", lhp); 909 if (++i >= 10) 910 break; 911 } 912 pr_cont("\n"); 913 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) { 914 rdp = per_cpu_ptr(&rcu_data, cpu); 915 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp)); 916 pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n", 917 cpu, ".o"[onl], 918 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags, 919 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags); 920 } 921 } 922 923 #else /* #ifdef CONFIG_PREEMPT_RCU */ 924 925 /* 926 * Tell them what RCU they are running. 927 */ 928 static void __init rcu_bootup_announce(void) 929 { 930 pr_info("Hierarchical RCU implementation.\n"); 931 rcu_bootup_announce_oddness(); 932 } 933 934 /* 935 * Note a quiescent state for PREEMPT=n. Because we do not need to know 936 * how many quiescent states passed, just if there was at least one since 937 * the start of the grace period, this just sets a flag. The caller must 938 * have disabled preemption. 939 */ 940 static void rcu_qs(void) 941 { 942 RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!"); 943 if (!__this_cpu_read(rcu_data.cpu_no_qs.s)) 944 return; 945 trace_rcu_grace_period(TPS("rcu_sched"), 946 __this_cpu_read(rcu_data.gp_seq), TPS("cpuqs")); 947 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false); 948 if (!__this_cpu_read(rcu_data.cpu_no_qs.b.exp)) 949 return; 950 __this_cpu_write(rcu_data.cpu_no_qs.b.exp, false); 951 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); 952 } 953 954 /* 955 * Register an urgently needed quiescent state. If there is an 956 * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight 957 * dyntick-idle quiescent state visible to other CPUs, which will in 958 * some cases serve for expedited as well as normal grace periods. 959 * Either way, register a lightweight quiescent state. 960 * 961 * The barrier() calls are redundant in the common case when this is 962 * called externally, but just in case this is called from within this 963 * file. 964 * 965 */ 966 void rcu_all_qs(void) 967 { 968 unsigned long flags; 969 970 if (!raw_cpu_read(rcu_data.rcu_urgent_qs)) 971 return; 972 preempt_disable(); 973 /* Load rcu_urgent_qs before other flags. */ 974 if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { 975 preempt_enable(); 976 return; 977 } 978 this_cpu_write(rcu_data.rcu_urgent_qs, false); 979 barrier(); /* Avoid RCU read-side critical sections leaking down. */ 980 if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) { 981 local_irq_save(flags); 982 rcu_momentary_dyntick_idle(); 983 local_irq_restore(flags); 984 } 985 rcu_qs(); 986 barrier(); /* Avoid RCU read-side critical sections leaking up. */ 987 preempt_enable(); 988 } 989 EXPORT_SYMBOL_GPL(rcu_all_qs); 990 991 /* 992 * Note a PREEMPT=n context switch. The caller must have disabled interrupts. 993 */ 994 void rcu_note_context_switch(bool preempt) 995 { 996 barrier(); /* Avoid RCU read-side critical sections leaking down. */ 997 trace_rcu_utilization(TPS("Start context switch")); 998 rcu_qs(); 999 /* Load rcu_urgent_qs before other flags. */ 1000 if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) 1001 goto out; 1002 this_cpu_write(rcu_data.rcu_urgent_qs, false); 1003 if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) 1004 rcu_momentary_dyntick_idle(); 1005 if (!preempt) 1006 rcu_tasks_qs(current); 1007 out: 1008 trace_rcu_utilization(TPS("End context switch")); 1009 barrier(); /* Avoid RCU read-side critical sections leaking up. */ 1010 } 1011 EXPORT_SYMBOL_GPL(rcu_note_context_switch); 1012 1013 /* 1014 * Because preemptible RCU does not exist, there are never any preempted 1015 * RCU readers. 1016 */ 1017 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) 1018 { 1019 return 0; 1020 } 1021 1022 /* 1023 * Because there is no preemptible RCU, there can be no readers blocked. 1024 */ 1025 static bool rcu_preempt_has_tasks(struct rcu_node *rnp) 1026 { 1027 return false; 1028 } 1029 1030 /* 1031 * Because there is no preemptible RCU, there can be no deferred quiescent 1032 * states. 1033 */ 1034 static bool rcu_preempt_need_deferred_qs(struct task_struct *t) 1035 { 1036 return false; 1037 } 1038 static void rcu_preempt_deferred_qs(struct task_struct *t) { } 1039 1040 /* 1041 * Because preemptible RCU does not exist, we never have to check for 1042 * tasks blocked within RCU read-side critical sections. 1043 */ 1044 static void rcu_print_detail_task_stall(void) 1045 { 1046 } 1047 1048 /* 1049 * Because preemptible RCU does not exist, we never have to check for 1050 * tasks blocked within RCU read-side critical sections. 1051 */ 1052 static int rcu_print_task_stall(struct rcu_node *rnp) 1053 { 1054 return 0; 1055 } 1056 1057 /* 1058 * Because preemptible RCU does not exist, we never have to check for 1059 * tasks blocked within RCU read-side critical sections that are 1060 * blocking the current expedited grace period. 1061 */ 1062 static int rcu_print_task_exp_stall(struct rcu_node *rnp) 1063 { 1064 return 0; 1065 } 1066 1067 /* 1068 * Because there is no preemptible RCU, there can be no readers blocked, 1069 * so there is no need to check for blocked tasks. So check only for 1070 * bogus qsmask values. 1071 */ 1072 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 1073 { 1074 WARN_ON_ONCE(rnp->qsmask); 1075 } 1076 1077 /* 1078 * Check to see if this CPU is in a non-context-switch quiescent state 1079 * (user mode or idle loop for rcu, non-softirq execution for rcu_bh). 1080 * Also schedule RCU core processing. 1081 * 1082 * This function must be called from hardirq context. It is normally 1083 * invoked from the scheduling-clock interrupt. 1084 */ 1085 static void rcu_flavor_check_callbacks(int user) 1086 { 1087 if (user || rcu_is_cpu_rrupt_from_idle()) { 1088 1089 /* 1090 * Get here if this CPU took its interrupt from user 1091 * mode or from the idle loop, and if this is not a 1092 * nested interrupt. In this case, the CPU is in 1093 * a quiescent state, so note it. 1094 * 1095 * No memory barrier is required here because rcu_qs() 1096 * references only CPU-local variables that other CPUs 1097 * neither access nor modify, at least not while the 1098 * corresponding CPU is online. 1099 */ 1100 1101 rcu_qs(); 1102 } 1103 } 1104 1105 /* PREEMPT=n implementation of synchronize_rcu(). */ 1106 void synchronize_rcu(void) 1107 { 1108 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 1109 lock_is_held(&rcu_lock_map) || 1110 lock_is_held(&rcu_sched_lock_map), 1111 "Illegal synchronize_rcu() in RCU read-side critical section"); 1112 if (rcu_blocking_is_gp()) 1113 return; 1114 if (rcu_gp_is_expedited()) 1115 synchronize_rcu_expedited(); 1116 else 1117 wait_rcu_gp(call_rcu); 1118 } 1119 EXPORT_SYMBOL_GPL(synchronize_rcu); 1120 1121 /* 1122 * Because preemptible RCU does not exist, tasks cannot possibly exit 1123 * while in preemptible RCU read-side critical sections. 1124 */ 1125 void exit_rcu(void) 1126 { 1127 } 1128 1129 /* 1130 * Dump the guaranteed-empty blocked-tasks state. Trust but verify. 1131 */ 1132 static void 1133 dump_blkd_tasks(struct rcu_node *rnp, int ncheck) 1134 { 1135 WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks)); 1136 } 1137 1138 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 1139 1140 #ifdef CONFIG_RCU_BOOST 1141 1142 static void rcu_wake_cond(struct task_struct *t, int status) 1143 { 1144 /* 1145 * If the thread is yielding, only wake it when this 1146 * is invoked from idle 1147 */ 1148 if (status != RCU_KTHREAD_YIELDING || is_idle_task(current)) 1149 wake_up_process(t); 1150 } 1151 1152 /* 1153 * Carry out RCU priority boosting on the task indicated by ->exp_tasks 1154 * or ->boost_tasks, advancing the pointer to the next task in the 1155 * ->blkd_tasks list. 1156 * 1157 * Note that irqs must be enabled: boosting the task can block. 1158 * Returns 1 if there are more tasks needing to be boosted. 1159 */ 1160 static int rcu_boost(struct rcu_node *rnp) 1161 { 1162 unsigned long flags; 1163 struct task_struct *t; 1164 struct list_head *tb; 1165 1166 if (READ_ONCE(rnp->exp_tasks) == NULL && 1167 READ_ONCE(rnp->boost_tasks) == NULL) 1168 return 0; /* Nothing left to boost. */ 1169 1170 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1171 1172 /* 1173 * Recheck under the lock: all tasks in need of boosting 1174 * might exit their RCU read-side critical sections on their own. 1175 */ 1176 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) { 1177 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1178 return 0; 1179 } 1180 1181 /* 1182 * Preferentially boost tasks blocking expedited grace periods. 1183 * This cannot starve the normal grace periods because a second 1184 * expedited grace period must boost all blocked tasks, including 1185 * those blocking the pre-existing normal grace period. 1186 */ 1187 if (rnp->exp_tasks != NULL) 1188 tb = rnp->exp_tasks; 1189 else 1190 tb = rnp->boost_tasks; 1191 1192 /* 1193 * We boost task t by manufacturing an rt_mutex that appears to 1194 * be held by task t. We leave a pointer to that rt_mutex where 1195 * task t can find it, and task t will release the mutex when it 1196 * exits its outermost RCU read-side critical section. Then 1197 * simply acquiring this artificial rt_mutex will boost task 1198 * t's priority. (Thanks to tglx for suggesting this approach!) 1199 * 1200 * Note that task t must acquire rnp->lock to remove itself from 1201 * the ->blkd_tasks list, which it will do from exit() if from 1202 * nowhere else. We therefore are guaranteed that task t will 1203 * stay around at least until we drop rnp->lock. Note that 1204 * rnp->lock also resolves races between our priority boosting 1205 * and task t's exiting its outermost RCU read-side critical 1206 * section. 1207 */ 1208 t = container_of(tb, struct task_struct, rcu_node_entry); 1209 rt_mutex_init_proxy_locked(&rnp->boost_mtx, t); 1210 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1211 /* Lock only for side effect: boosts task t's priority. */ 1212 rt_mutex_lock(&rnp->boost_mtx); 1213 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ 1214 1215 return READ_ONCE(rnp->exp_tasks) != NULL || 1216 READ_ONCE(rnp->boost_tasks) != NULL; 1217 } 1218 1219 /* 1220 * Priority-boosting kthread, one per leaf rcu_node. 1221 */ 1222 static int rcu_boost_kthread(void *arg) 1223 { 1224 struct rcu_node *rnp = (struct rcu_node *)arg; 1225 int spincnt = 0; 1226 int more2boost; 1227 1228 trace_rcu_utilization(TPS("Start boost kthread@init")); 1229 for (;;) { 1230 rnp->boost_kthread_status = RCU_KTHREAD_WAITING; 1231 trace_rcu_utilization(TPS("End boost kthread@rcu_wait")); 1232 rcu_wait(rnp->boost_tasks || rnp->exp_tasks); 1233 trace_rcu_utilization(TPS("Start boost kthread@rcu_wait")); 1234 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; 1235 more2boost = rcu_boost(rnp); 1236 if (more2boost) 1237 spincnt++; 1238 else 1239 spincnt = 0; 1240 if (spincnt > 10) { 1241 rnp->boost_kthread_status = RCU_KTHREAD_YIELDING; 1242 trace_rcu_utilization(TPS("End boost kthread@rcu_yield")); 1243 schedule_timeout_interruptible(2); 1244 trace_rcu_utilization(TPS("Start boost kthread@rcu_yield")); 1245 spincnt = 0; 1246 } 1247 } 1248 /* NOTREACHED */ 1249 trace_rcu_utilization(TPS("End boost kthread@notreached")); 1250 return 0; 1251 } 1252 1253 /* 1254 * Check to see if it is time to start boosting RCU readers that are 1255 * blocking the current grace period, and, if so, tell the per-rcu_node 1256 * kthread to start boosting them. If there is an expedited grace 1257 * period in progress, it is always time to boost. 1258 * 1259 * The caller must hold rnp->lock, which this function releases. 1260 * The ->boost_kthread_task is immortal, so we don't need to worry 1261 * about it going away. 1262 */ 1263 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1264 __releases(rnp->lock) 1265 { 1266 struct task_struct *t; 1267 1268 raw_lockdep_assert_held_rcu_node(rnp); 1269 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { 1270 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1271 return; 1272 } 1273 if (rnp->exp_tasks != NULL || 1274 (rnp->gp_tasks != NULL && 1275 rnp->boost_tasks == NULL && 1276 rnp->qsmask == 0 && 1277 ULONG_CMP_GE(jiffies, rnp->boost_time))) { 1278 if (rnp->exp_tasks == NULL) 1279 rnp->boost_tasks = rnp->gp_tasks; 1280 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1281 t = rnp->boost_kthread_task; 1282 if (t) 1283 rcu_wake_cond(t, rnp->boost_kthread_status); 1284 } else { 1285 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1286 } 1287 } 1288 1289 /* 1290 * Wake up the per-CPU kthread to invoke RCU callbacks. 1291 */ 1292 static void invoke_rcu_callbacks_kthread(void) 1293 { 1294 unsigned long flags; 1295 1296 local_irq_save(flags); 1297 __this_cpu_write(rcu_cpu_has_work, 1); 1298 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && 1299 current != __this_cpu_read(rcu_cpu_kthread_task)) { 1300 rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task), 1301 __this_cpu_read(rcu_cpu_kthread_status)); 1302 } 1303 local_irq_restore(flags); 1304 } 1305 1306 /* 1307 * Is the current CPU running the RCU-callbacks kthread? 1308 * Caller must have preemption disabled. 1309 */ 1310 static bool rcu_is_callbacks_kthread(void) 1311 { 1312 return __this_cpu_read(rcu_cpu_kthread_task) == current; 1313 } 1314 1315 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) 1316 1317 /* 1318 * Do priority-boost accounting for the start of a new grace period. 1319 */ 1320 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) 1321 { 1322 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; 1323 } 1324 1325 /* 1326 * Create an RCU-boost kthread for the specified node if one does not 1327 * already exist. We only create this kthread for preemptible RCU. 1328 * Returns zero if all is well, a negated errno otherwise. 1329 */ 1330 static int rcu_spawn_one_boost_kthread(struct rcu_node *rnp) 1331 { 1332 int rnp_index = rnp - rcu_get_root(); 1333 unsigned long flags; 1334 struct sched_param sp; 1335 struct task_struct *t; 1336 1337 if (!IS_ENABLED(CONFIG_PREEMPT_RCU)) 1338 return 0; 1339 1340 if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0) 1341 return 0; 1342 1343 rcu_state.boost = 1; 1344 if (rnp->boost_kthread_task != NULL) 1345 return 0; 1346 t = kthread_create(rcu_boost_kthread, (void *)rnp, 1347 "rcub/%d", rnp_index); 1348 if (IS_ERR(t)) 1349 return PTR_ERR(t); 1350 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1351 rnp->boost_kthread_task = t; 1352 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1353 sp.sched_priority = kthread_prio; 1354 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 1355 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ 1356 return 0; 1357 } 1358 1359 static void rcu_kthread_do_work(void) 1360 { 1361 rcu_do_batch(this_cpu_ptr(&rcu_data)); 1362 } 1363 1364 static void rcu_cpu_kthread_setup(unsigned int cpu) 1365 { 1366 struct sched_param sp; 1367 1368 sp.sched_priority = kthread_prio; 1369 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); 1370 } 1371 1372 static void rcu_cpu_kthread_park(unsigned int cpu) 1373 { 1374 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; 1375 } 1376 1377 static int rcu_cpu_kthread_should_run(unsigned int cpu) 1378 { 1379 return __this_cpu_read(rcu_cpu_has_work); 1380 } 1381 1382 /* 1383 * Per-CPU kernel thread that invokes RCU callbacks. This replaces 1384 * the RCU softirq used in configurations of RCU that do not support RCU 1385 * priority boosting. 1386 */ 1387 static void rcu_cpu_kthread(unsigned int cpu) 1388 { 1389 unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); 1390 char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); 1391 int spincnt; 1392 1393 for (spincnt = 0; spincnt < 10; spincnt++) { 1394 trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); 1395 local_bh_disable(); 1396 *statusp = RCU_KTHREAD_RUNNING; 1397 this_cpu_inc(rcu_cpu_kthread_loops); 1398 local_irq_disable(); 1399 work = *workp; 1400 *workp = 0; 1401 local_irq_enable(); 1402 if (work) 1403 rcu_kthread_do_work(); 1404 local_bh_enable(); 1405 if (*workp == 0) { 1406 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); 1407 *statusp = RCU_KTHREAD_WAITING; 1408 return; 1409 } 1410 } 1411 *statusp = RCU_KTHREAD_YIELDING; 1412 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); 1413 schedule_timeout_interruptible(2); 1414 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); 1415 *statusp = RCU_KTHREAD_WAITING; 1416 } 1417 1418 /* 1419 * Set the per-rcu_node kthread's affinity to cover all CPUs that are 1420 * served by the rcu_node in question. The CPU hotplug lock is still 1421 * held, so the value of rnp->qsmaskinit will be stable. 1422 * 1423 * We don't include outgoingcpu in the affinity set, use -1 if there is 1424 * no outgoing CPU. If there are no CPUs left in the affinity set, 1425 * this function allows the kthread to execute on any CPU. 1426 */ 1427 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) 1428 { 1429 struct task_struct *t = rnp->boost_kthread_task; 1430 unsigned long mask = rcu_rnp_online_cpus(rnp); 1431 cpumask_var_t cm; 1432 int cpu; 1433 1434 if (!t) 1435 return; 1436 if (!zalloc_cpumask_var(&cm, GFP_KERNEL)) 1437 return; 1438 for_each_leaf_node_possible_cpu(rnp, cpu) 1439 if ((mask & leaf_node_cpu_bit(rnp, cpu)) && 1440 cpu != outgoingcpu) 1441 cpumask_set_cpu(cpu, cm); 1442 if (cpumask_weight(cm) == 0) 1443 cpumask_setall(cm); 1444 set_cpus_allowed_ptr(t, cm); 1445 free_cpumask_var(cm); 1446 } 1447 1448 static struct smp_hotplug_thread rcu_cpu_thread_spec = { 1449 .store = &rcu_cpu_kthread_task, 1450 .thread_should_run = rcu_cpu_kthread_should_run, 1451 .thread_fn = rcu_cpu_kthread, 1452 .thread_comm = "rcuc/%u", 1453 .setup = rcu_cpu_kthread_setup, 1454 .park = rcu_cpu_kthread_park, 1455 }; 1456 1457 /* 1458 * Spawn boost kthreads -- called as soon as the scheduler is running. 1459 */ 1460 static void __init rcu_spawn_boost_kthreads(void) 1461 { 1462 struct rcu_node *rnp; 1463 int cpu; 1464 1465 for_each_possible_cpu(cpu) 1466 per_cpu(rcu_cpu_has_work, cpu) = 0; 1467 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); 1468 rcu_for_each_leaf_node(rnp) 1469 (void)rcu_spawn_one_boost_kthread(rnp); 1470 } 1471 1472 static void rcu_prepare_kthreads(int cpu) 1473 { 1474 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 1475 struct rcu_node *rnp = rdp->mynode; 1476 1477 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ 1478 if (rcu_scheduler_fully_active) 1479 (void)rcu_spawn_one_boost_kthread(rnp); 1480 } 1481 1482 #else /* #ifdef CONFIG_RCU_BOOST */ 1483 1484 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1485 __releases(rnp->lock) 1486 { 1487 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1488 } 1489 1490 static void invoke_rcu_callbacks_kthread(void) 1491 { 1492 WARN_ON_ONCE(1); 1493 } 1494 1495 static bool rcu_is_callbacks_kthread(void) 1496 { 1497 return false; 1498 } 1499 1500 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) 1501 { 1502 } 1503 1504 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) 1505 { 1506 } 1507 1508 static void __init rcu_spawn_boost_kthreads(void) 1509 { 1510 } 1511 1512 static void rcu_prepare_kthreads(int cpu) 1513 { 1514 } 1515 1516 #endif /* #else #ifdef CONFIG_RCU_BOOST */ 1517 1518 #if !defined(CONFIG_RCU_FAST_NO_HZ) 1519 1520 /* 1521 * Check to see if any future RCU-related work will need to be done 1522 * by the current CPU, even if none need be done immediately, returning 1523 * 1 if so. This function is part of the RCU implementation; it is -not- 1524 * an exported member of the RCU API. 1525 * 1526 * Because we not have RCU_FAST_NO_HZ, just check whether or not this 1527 * CPU has RCU callbacks queued. 1528 */ 1529 int rcu_needs_cpu(u64 basemono, u64 *nextevt) 1530 { 1531 *nextevt = KTIME_MAX; 1532 return rcu_cpu_has_callbacks(NULL); 1533 } 1534 1535 /* 1536 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up 1537 * after it. 1538 */ 1539 static void rcu_cleanup_after_idle(void) 1540 { 1541 } 1542 1543 /* 1544 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n, 1545 * is nothing. 1546 */ 1547 static void rcu_prepare_for_idle(void) 1548 { 1549 } 1550 1551 /* 1552 * Don't bother keeping a running count of the number of RCU callbacks 1553 * posted because CONFIG_RCU_FAST_NO_HZ=n. 1554 */ 1555 static void rcu_idle_count_callbacks_posted(void) 1556 { 1557 } 1558 1559 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 1560 1561 /* 1562 * This code is invoked when a CPU goes idle, at which point we want 1563 * to have the CPU do everything required for RCU so that it can enter 1564 * the energy-efficient dyntick-idle mode. This is handled by a 1565 * state machine implemented by rcu_prepare_for_idle() below. 1566 * 1567 * The following three proprocessor symbols control this state machine: 1568 * 1569 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted 1570 * to sleep in dyntick-idle mode with RCU callbacks pending. This 1571 * is sized to be roughly one RCU grace period. Those energy-efficiency 1572 * benchmarkers who might otherwise be tempted to set this to a large 1573 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your 1574 * system. And if you are -that- concerned about energy efficiency, 1575 * just power the system down and be done with it! 1576 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is 1577 * permitted to sleep in dyntick-idle mode with only lazy RCU 1578 * callbacks pending. Setting this too high can OOM your system. 1579 * 1580 * The values below work well in practice. If future workloads require 1581 * adjustment, they can be converted into kernel config parameters, though 1582 * making the state machine smarter might be a better option. 1583 */ 1584 #define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */ 1585 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ 1586 1587 static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY; 1588 module_param(rcu_idle_gp_delay, int, 0644); 1589 static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY; 1590 module_param(rcu_idle_lazy_gp_delay, int, 0644); 1591 1592 /* 1593 * Try to advance callbacks on the current CPU, but only if it has been 1594 * awhile since the last time we did so. Afterwards, if there are any 1595 * callbacks ready for immediate invocation, return true. 1596 */ 1597 static bool __maybe_unused rcu_try_advance_all_cbs(void) 1598 { 1599 bool cbs_ready = false; 1600 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 1601 struct rcu_node *rnp; 1602 1603 /* Exit early if we advanced recently. */ 1604 if (jiffies == rdp->last_advance_all) 1605 return false; 1606 rdp->last_advance_all = jiffies; 1607 1608 rnp = rdp->mynode; 1609 1610 /* 1611 * Don't bother checking unless a grace period has 1612 * completed since we last checked and there are 1613 * callbacks not yet ready to invoke. 1614 */ 1615 if ((rcu_seq_completed_gp(rdp->gp_seq, 1616 rcu_seq_current(&rnp->gp_seq)) || 1617 unlikely(READ_ONCE(rdp->gpwrap))) && 1618 rcu_segcblist_pend_cbs(&rdp->cblist)) 1619 note_gp_changes(rdp); 1620 1621 if (rcu_segcblist_ready_cbs(&rdp->cblist)) 1622 cbs_ready = true; 1623 return cbs_ready; 1624 } 1625 1626 /* 1627 * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready 1628 * to invoke. If the CPU has callbacks, try to advance them. Tell the 1629 * caller to set the timeout based on whether or not there are non-lazy 1630 * callbacks. 1631 * 1632 * The caller must have disabled interrupts. 1633 */ 1634 int rcu_needs_cpu(u64 basemono, u64 *nextevt) 1635 { 1636 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 1637 unsigned long dj; 1638 1639 lockdep_assert_irqs_disabled(); 1640 1641 /* Snapshot to detect later posting of non-lazy callback. */ 1642 rdp->nonlazy_posted_snap = rdp->nonlazy_posted; 1643 1644 /* If no callbacks, RCU doesn't need the CPU. */ 1645 if (!rcu_cpu_has_callbacks(&rdp->all_lazy)) { 1646 *nextevt = KTIME_MAX; 1647 return 0; 1648 } 1649 1650 /* Attempt to advance callbacks. */ 1651 if (rcu_try_advance_all_cbs()) { 1652 /* Some ready to invoke, so initiate later invocation. */ 1653 invoke_rcu_core(); 1654 return 1; 1655 } 1656 rdp->last_accelerate = jiffies; 1657 1658 /* Request timer delay depending on laziness, and round. */ 1659 if (!rdp->all_lazy) { 1660 dj = round_up(rcu_idle_gp_delay + jiffies, 1661 rcu_idle_gp_delay) - jiffies; 1662 } else { 1663 dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies; 1664 } 1665 *nextevt = basemono + dj * TICK_NSEC; 1666 return 0; 1667 } 1668 1669 /* 1670 * Prepare a CPU for idle from an RCU perspective. The first major task 1671 * is to sense whether nohz mode has been enabled or disabled via sysfs. 1672 * The second major task is to check to see if a non-lazy callback has 1673 * arrived at a CPU that previously had only lazy callbacks. The third 1674 * major task is to accelerate (that is, assign grace-period numbers to) 1675 * any recently arrived callbacks. 1676 * 1677 * The caller must have disabled interrupts. 1678 */ 1679 static void rcu_prepare_for_idle(void) 1680 { 1681 bool needwake; 1682 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 1683 struct rcu_node *rnp; 1684 int tne; 1685 1686 lockdep_assert_irqs_disabled(); 1687 if (rcu_is_nocb_cpu(smp_processor_id())) 1688 return; 1689 1690 /* Handle nohz enablement switches conservatively. */ 1691 tne = READ_ONCE(tick_nohz_active); 1692 if (tne != rdp->tick_nohz_enabled_snap) { 1693 if (rcu_cpu_has_callbacks(NULL)) 1694 invoke_rcu_core(); /* force nohz to see update. */ 1695 rdp->tick_nohz_enabled_snap = tne; 1696 return; 1697 } 1698 if (!tne) 1699 return; 1700 1701 /* 1702 * If a non-lazy callback arrived at a CPU having only lazy 1703 * callbacks, invoke RCU core for the side-effect of recalculating 1704 * idle duration on re-entry to idle. 1705 */ 1706 if (rdp->all_lazy && 1707 rdp->nonlazy_posted != rdp->nonlazy_posted_snap) { 1708 rdp->all_lazy = false; 1709 rdp->nonlazy_posted_snap = rdp->nonlazy_posted; 1710 invoke_rcu_core(); 1711 return; 1712 } 1713 1714 /* 1715 * If we have not yet accelerated this jiffy, accelerate all 1716 * callbacks on this CPU. 1717 */ 1718 if (rdp->last_accelerate == jiffies) 1719 return; 1720 rdp->last_accelerate = jiffies; 1721 if (rcu_segcblist_pend_cbs(&rdp->cblist)) { 1722 rnp = rdp->mynode; 1723 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 1724 needwake = rcu_accelerate_cbs(rnp, rdp); 1725 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 1726 if (needwake) 1727 rcu_gp_kthread_wake(); 1728 } 1729 } 1730 1731 /* 1732 * Clean up for exit from idle. Attempt to advance callbacks based on 1733 * any grace periods that elapsed while the CPU was idle, and if any 1734 * callbacks are now ready to invoke, initiate invocation. 1735 */ 1736 static void rcu_cleanup_after_idle(void) 1737 { 1738 lockdep_assert_irqs_disabled(); 1739 if (rcu_is_nocb_cpu(smp_processor_id())) 1740 return; 1741 if (rcu_try_advance_all_cbs()) 1742 invoke_rcu_core(); 1743 } 1744 1745 /* 1746 * Keep a running count of the number of non-lazy callbacks posted 1747 * on this CPU. This running counter (which is never decremented) allows 1748 * rcu_prepare_for_idle() to detect when something out of the idle loop 1749 * posts a callback, even if an equal number of callbacks are invoked. 1750 * Of course, callbacks should only be posted from within a trace event 1751 * designed to be called from idle or from within RCU_NONIDLE(). 1752 */ 1753 static void rcu_idle_count_callbacks_posted(void) 1754 { 1755 __this_cpu_add(rcu_data.nonlazy_posted, 1); 1756 } 1757 1758 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 1759 1760 #ifdef CONFIG_RCU_FAST_NO_HZ 1761 1762 static void print_cpu_stall_fast_no_hz(char *cp, int cpu) 1763 { 1764 struct rcu_data *rdp = &per_cpu(rcu_data, cpu); 1765 unsigned long nlpd = rdp->nonlazy_posted - rdp->nonlazy_posted_snap; 1766 1767 sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c", 1768 rdp->last_accelerate & 0xffff, jiffies & 0xffff, 1769 ulong2long(nlpd), 1770 rdp->all_lazy ? 'L' : '.', 1771 rdp->tick_nohz_enabled_snap ? '.' : 'D'); 1772 } 1773 1774 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */ 1775 1776 static void print_cpu_stall_fast_no_hz(char *cp, int cpu) 1777 { 1778 *cp = '\0'; 1779 } 1780 1781 #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */ 1782 1783 /* Initiate the stall-info list. */ 1784 static void print_cpu_stall_info_begin(void) 1785 { 1786 pr_cont("\n"); 1787 } 1788 1789 /* 1790 * Print out diagnostic information for the specified stalled CPU. 1791 * 1792 * If the specified CPU is aware of the current RCU grace period, then 1793 * print the number of scheduling clock interrupts the CPU has taken 1794 * during the time that it has been aware. Otherwise, print the number 1795 * of RCU grace periods that this CPU is ignorant of, for example, "1" 1796 * if the CPU was aware of the previous grace period. 1797 * 1798 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info. 1799 */ 1800 static void print_cpu_stall_info(int cpu) 1801 { 1802 unsigned long delta; 1803 char fast_no_hz[72]; 1804 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 1805 char *ticks_title; 1806 unsigned long ticks_value; 1807 1808 /* 1809 * We could be printing a lot while holding a spinlock. Avoid 1810 * triggering hard lockup. 1811 */ 1812 touch_nmi_watchdog(); 1813 1814 ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq); 1815 if (ticks_value) { 1816 ticks_title = "GPs behind"; 1817 } else { 1818 ticks_title = "ticks this GP"; 1819 ticks_value = rdp->ticks_this_gp; 1820 } 1821 print_cpu_stall_fast_no_hz(fast_no_hz, cpu); 1822 delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq); 1823 pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s\n", 1824 cpu, 1825 "O."[!!cpu_online(cpu)], 1826 "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)], 1827 "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)], 1828 !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' : 1829 rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' : 1830 "!."[!delta], 1831 ticks_value, ticks_title, 1832 rcu_dynticks_snap(rdp) & 0xfff, 1833 rdp->dynticks_nesting, rdp->dynticks_nmi_nesting, 1834 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), 1835 READ_ONCE(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart, 1836 fast_no_hz); 1837 } 1838 1839 /* Terminate the stall-info list. */ 1840 static void print_cpu_stall_info_end(void) 1841 { 1842 pr_err("\t"); 1843 } 1844 1845 /* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */ 1846 static void zero_cpu_stall_ticks(struct rcu_data *rdp) 1847 { 1848 rdp->ticks_this_gp = 0; 1849 rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id()); 1850 WRITE_ONCE(rdp->last_fqs_resched, jiffies); 1851 } 1852 1853 #ifdef CONFIG_RCU_NOCB_CPU 1854 1855 /* 1856 * Offload callback processing from the boot-time-specified set of CPUs 1857 * specified by rcu_nocb_mask. For each CPU in the set, there is a 1858 * kthread created that pulls the callbacks from the corresponding CPU, 1859 * waits for a grace period to elapse, and invokes the callbacks. 1860 * The no-CBs CPUs do a wake_up() on their kthread when they insert 1861 * a callback into any empty list, unless the rcu_nocb_poll boot parameter 1862 * has been specified, in which case each kthread actively polls its 1863 * CPU. (Which isn't so great for energy efficiency, but which does 1864 * reduce RCU's overhead on that CPU.) 1865 * 1866 * This is intended to be used in conjunction with Frederic Weisbecker's 1867 * adaptive-idle work, which would seriously reduce OS jitter on CPUs 1868 * running CPU-bound user-mode computations. 1869 * 1870 * Offloading of callback processing could also in theory be used as 1871 * an energy-efficiency measure because CPUs with no RCU callbacks 1872 * queued are more aggressive about entering dyntick-idle mode. 1873 */ 1874 1875 1876 /* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. */ 1877 static int __init rcu_nocb_setup(char *str) 1878 { 1879 alloc_bootmem_cpumask_var(&rcu_nocb_mask); 1880 cpulist_parse(str, rcu_nocb_mask); 1881 return 1; 1882 } 1883 __setup("rcu_nocbs=", rcu_nocb_setup); 1884 1885 static int __init parse_rcu_nocb_poll(char *arg) 1886 { 1887 rcu_nocb_poll = true; 1888 return 0; 1889 } 1890 early_param("rcu_nocb_poll", parse_rcu_nocb_poll); 1891 1892 /* 1893 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended 1894 * grace period. 1895 */ 1896 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq) 1897 { 1898 swake_up_all(sq); 1899 } 1900 1901 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp) 1902 { 1903 return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1]; 1904 } 1905 1906 static void rcu_init_one_nocb(struct rcu_node *rnp) 1907 { 1908 init_swait_queue_head(&rnp->nocb_gp_wq[0]); 1909 init_swait_queue_head(&rnp->nocb_gp_wq[1]); 1910 } 1911 1912 /* Is the specified CPU a no-CBs CPU? */ 1913 bool rcu_is_nocb_cpu(int cpu) 1914 { 1915 if (cpumask_available(rcu_nocb_mask)) 1916 return cpumask_test_cpu(cpu, rcu_nocb_mask); 1917 return false; 1918 } 1919 1920 /* 1921 * Kick the leader kthread for this NOCB group. Caller holds ->nocb_lock 1922 * and this function releases it. 1923 */ 1924 static void __wake_nocb_leader(struct rcu_data *rdp, bool force, 1925 unsigned long flags) 1926 __releases(rdp->nocb_lock) 1927 { 1928 struct rcu_data *rdp_leader = rdp->nocb_leader; 1929 1930 lockdep_assert_held(&rdp->nocb_lock); 1931 if (!READ_ONCE(rdp_leader->nocb_kthread)) { 1932 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 1933 return; 1934 } 1935 if (rdp_leader->nocb_leader_sleep || force) { 1936 /* Prior smp_mb__after_atomic() orders against prior enqueue. */ 1937 WRITE_ONCE(rdp_leader->nocb_leader_sleep, false); 1938 del_timer(&rdp->nocb_timer); 1939 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 1940 smp_mb(); /* ->nocb_leader_sleep before swake_up_one(). */ 1941 swake_up_one(&rdp_leader->nocb_wq); 1942 } else { 1943 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 1944 } 1945 } 1946 1947 /* 1948 * Kick the leader kthread for this NOCB group, but caller has not 1949 * acquired locks. 1950 */ 1951 static void wake_nocb_leader(struct rcu_data *rdp, bool force) 1952 { 1953 unsigned long flags; 1954 1955 raw_spin_lock_irqsave(&rdp->nocb_lock, flags); 1956 __wake_nocb_leader(rdp, force, flags); 1957 } 1958 1959 /* 1960 * Arrange to wake the leader kthread for this NOCB group at some 1961 * future time when it is safe to do so. 1962 */ 1963 static void wake_nocb_leader_defer(struct rcu_data *rdp, int waketype, 1964 const char *reason) 1965 { 1966 unsigned long flags; 1967 1968 raw_spin_lock_irqsave(&rdp->nocb_lock, flags); 1969 if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT) 1970 mod_timer(&rdp->nocb_timer, jiffies + 1); 1971 WRITE_ONCE(rdp->nocb_defer_wakeup, waketype); 1972 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason); 1973 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 1974 } 1975 1976 /* 1977 * Does the specified CPU need an RCU callback for this invocation 1978 * of rcu_barrier()? 1979 */ 1980 static bool rcu_nocb_cpu_needs_barrier(int cpu) 1981 { 1982 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 1983 unsigned long ret; 1984 #ifdef CONFIG_PROVE_RCU 1985 struct rcu_head *rhp; 1986 #endif /* #ifdef CONFIG_PROVE_RCU */ 1987 1988 /* 1989 * Check count of all no-CBs callbacks awaiting invocation. 1990 * There needs to be a barrier before this function is called, 1991 * but associated with a prior determination that no more 1992 * callbacks would be posted. In the worst case, the first 1993 * barrier in rcu_barrier() suffices (but the caller cannot 1994 * necessarily rely on this, not a substitute for the caller 1995 * getting the concurrency design right!). There must also be 1996 * a barrier between the following load an posting of a callback 1997 * (if a callback is in fact needed). This is associated with an 1998 * atomic_inc() in the caller. 1999 */ 2000 ret = atomic_long_read(&rdp->nocb_q_count); 2001 2002 #ifdef CONFIG_PROVE_RCU 2003 rhp = READ_ONCE(rdp->nocb_head); 2004 if (!rhp) 2005 rhp = READ_ONCE(rdp->nocb_gp_head); 2006 if (!rhp) 2007 rhp = READ_ONCE(rdp->nocb_follower_head); 2008 2009 /* Having no rcuo kthread but CBs after scheduler starts is bad! */ 2010 if (!READ_ONCE(rdp->nocb_kthread) && rhp && 2011 rcu_scheduler_fully_active) { 2012 /* RCU callback enqueued before CPU first came online??? */ 2013 pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n", 2014 cpu, rhp->func); 2015 WARN_ON_ONCE(1); 2016 } 2017 #endif /* #ifdef CONFIG_PROVE_RCU */ 2018 2019 return !!ret; 2020 } 2021 2022 /* 2023 * Enqueue the specified string of rcu_head structures onto the specified 2024 * CPU's no-CBs lists. The CPU is specified by rdp, the head of the 2025 * string by rhp, and the tail of the string by rhtp. The non-lazy/lazy 2026 * counts are supplied by rhcount and rhcount_lazy. 2027 * 2028 * If warranted, also wake up the kthread servicing this CPUs queues. 2029 */ 2030 static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, 2031 struct rcu_head *rhp, 2032 struct rcu_head **rhtp, 2033 int rhcount, int rhcount_lazy, 2034 unsigned long flags) 2035 { 2036 int len; 2037 struct rcu_head **old_rhpp; 2038 struct task_struct *t; 2039 2040 /* Enqueue the callback on the nocb list and update counts. */ 2041 atomic_long_add(rhcount, &rdp->nocb_q_count); 2042 /* rcu_barrier() relies on ->nocb_q_count add before xchg. */ 2043 old_rhpp = xchg(&rdp->nocb_tail, rhtp); 2044 WRITE_ONCE(*old_rhpp, rhp); 2045 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy); 2046 smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */ 2047 2048 /* If we are not being polled and there is a kthread, awaken it ... */ 2049 t = READ_ONCE(rdp->nocb_kthread); 2050 if (rcu_nocb_poll || !t) { 2051 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, 2052 TPS("WakeNotPoll")); 2053 return; 2054 } 2055 len = atomic_long_read(&rdp->nocb_q_count); 2056 if (old_rhpp == &rdp->nocb_head) { 2057 if (!irqs_disabled_flags(flags)) { 2058 /* ... if queue was empty ... */ 2059 wake_nocb_leader(rdp, false); 2060 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, 2061 TPS("WakeEmpty")); 2062 } else { 2063 wake_nocb_leader_defer(rdp, RCU_NOCB_WAKE, 2064 TPS("WakeEmptyIsDeferred")); 2065 } 2066 rdp->qlen_last_fqs_check = 0; 2067 } else if (len > rdp->qlen_last_fqs_check + qhimark) { 2068 /* ... or if many callbacks queued. */ 2069 if (!irqs_disabled_flags(flags)) { 2070 wake_nocb_leader(rdp, true); 2071 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, 2072 TPS("WakeOvf")); 2073 } else { 2074 wake_nocb_leader_defer(rdp, RCU_NOCB_WAKE_FORCE, 2075 TPS("WakeOvfIsDeferred")); 2076 } 2077 rdp->qlen_last_fqs_check = LONG_MAX / 2; 2078 } else { 2079 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot")); 2080 } 2081 return; 2082 } 2083 2084 /* 2085 * This is a helper for __call_rcu(), which invokes this when the normal 2086 * callback queue is inoperable. If this is not a no-CBs CPU, this 2087 * function returns failure back to __call_rcu(), which can complain 2088 * appropriately. 2089 * 2090 * Otherwise, this function queues the callback where the corresponding 2091 * "rcuo" kthread can find it. 2092 */ 2093 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, 2094 bool lazy, unsigned long flags) 2095 { 2096 2097 if (!rcu_is_nocb_cpu(rdp->cpu)) 2098 return false; 2099 __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags); 2100 if (__is_kfree_rcu_offset((unsigned long)rhp->func)) 2101 trace_rcu_kfree_callback(rcu_state.name, rhp, 2102 (unsigned long)rhp->func, 2103 -atomic_long_read(&rdp->nocb_q_count_lazy), 2104 -atomic_long_read(&rdp->nocb_q_count)); 2105 else 2106 trace_rcu_callback(rcu_state.name, rhp, 2107 -atomic_long_read(&rdp->nocb_q_count_lazy), 2108 -atomic_long_read(&rdp->nocb_q_count)); 2109 2110 /* 2111 * If called from an extended quiescent state with interrupts 2112 * disabled, invoke the RCU core in order to allow the idle-entry 2113 * deferred-wakeup check to function. 2114 */ 2115 if (irqs_disabled_flags(flags) && 2116 !rcu_is_watching() && 2117 cpu_online(smp_processor_id())) 2118 invoke_rcu_core(); 2119 2120 return true; 2121 } 2122 2123 /* 2124 * Adopt orphaned callbacks on a no-CBs CPU, or return 0 if this is 2125 * not a no-CBs CPU. 2126 */ 2127 static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp, 2128 struct rcu_data *rdp, 2129 unsigned long flags) 2130 { 2131 lockdep_assert_irqs_disabled(); 2132 if (!rcu_is_nocb_cpu(smp_processor_id())) 2133 return false; /* Not NOCBs CPU, caller must migrate CBs. */ 2134 __call_rcu_nocb_enqueue(my_rdp, rcu_segcblist_head(&rdp->cblist), 2135 rcu_segcblist_tail(&rdp->cblist), 2136 rcu_segcblist_n_cbs(&rdp->cblist), 2137 rcu_segcblist_n_lazy_cbs(&rdp->cblist), flags); 2138 rcu_segcblist_init(&rdp->cblist); 2139 rcu_segcblist_disable(&rdp->cblist); 2140 return true; 2141 } 2142 2143 /* 2144 * If necessary, kick off a new grace period, and either way wait 2145 * for a subsequent grace period to complete. 2146 */ 2147 static void rcu_nocb_wait_gp(struct rcu_data *rdp) 2148 { 2149 unsigned long c; 2150 bool d; 2151 unsigned long flags; 2152 bool needwake; 2153 struct rcu_node *rnp = rdp->mynode; 2154 2155 local_irq_save(flags); 2156 c = rcu_seq_snap(&rcu_state.gp_seq); 2157 if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { 2158 local_irq_restore(flags); 2159 } else { 2160 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 2161 needwake = rcu_start_this_gp(rnp, rdp, c); 2162 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2163 if (needwake) 2164 rcu_gp_kthread_wake(); 2165 } 2166 2167 /* 2168 * Wait for the grace period. Do so interruptibly to avoid messing 2169 * up the load average. 2170 */ 2171 trace_rcu_this_gp(rnp, rdp, c, TPS("StartWait")); 2172 for (;;) { 2173 swait_event_interruptible_exclusive( 2174 rnp->nocb_gp_wq[rcu_seq_ctr(c) & 0x1], 2175 (d = rcu_seq_done(&rnp->gp_seq, c))); 2176 if (likely(d)) 2177 break; 2178 WARN_ON(signal_pending(current)); 2179 trace_rcu_this_gp(rnp, rdp, c, TPS("ResumeWait")); 2180 } 2181 trace_rcu_this_gp(rnp, rdp, c, TPS("EndWait")); 2182 smp_mb(); /* Ensure that CB invocation happens after GP end. */ 2183 } 2184 2185 /* 2186 * Leaders come here to wait for additional callbacks to show up. 2187 * This function does not return until callbacks appear. 2188 */ 2189 static void nocb_leader_wait(struct rcu_data *my_rdp) 2190 { 2191 bool firsttime = true; 2192 unsigned long flags; 2193 bool gotcbs; 2194 struct rcu_data *rdp; 2195 struct rcu_head **tail; 2196 2197 wait_again: 2198 2199 /* Wait for callbacks to appear. */ 2200 if (!rcu_nocb_poll) { 2201 trace_rcu_nocb_wake(rcu_state.name, my_rdp->cpu, TPS("Sleep")); 2202 swait_event_interruptible_exclusive(my_rdp->nocb_wq, 2203 !READ_ONCE(my_rdp->nocb_leader_sleep)); 2204 raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags); 2205 my_rdp->nocb_leader_sleep = true; 2206 WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); 2207 del_timer(&my_rdp->nocb_timer); 2208 raw_spin_unlock_irqrestore(&my_rdp->nocb_lock, flags); 2209 } else if (firsttime) { 2210 firsttime = false; /* Don't drown trace log with "Poll"! */ 2211 trace_rcu_nocb_wake(rcu_state.name, my_rdp->cpu, TPS("Poll")); 2212 } 2213 2214 /* 2215 * Each pass through the following loop checks a follower for CBs. 2216 * We are our own first follower. Any CBs found are moved to 2217 * nocb_gp_head, where they await a grace period. 2218 */ 2219 gotcbs = false; 2220 smp_mb(); /* wakeup and _sleep before ->nocb_head reads. */ 2221 for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { 2222 rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head); 2223 if (!rdp->nocb_gp_head) 2224 continue; /* No CBs here, try next follower. */ 2225 2226 /* Move callbacks to wait-for-GP list, which is empty. */ 2227 WRITE_ONCE(rdp->nocb_head, NULL); 2228 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head); 2229 gotcbs = true; 2230 } 2231 2232 /* No callbacks? Sleep a bit if polling, and go retry. */ 2233 if (unlikely(!gotcbs)) { 2234 WARN_ON(signal_pending(current)); 2235 if (rcu_nocb_poll) { 2236 schedule_timeout_interruptible(1); 2237 } else { 2238 trace_rcu_nocb_wake(rcu_state.name, my_rdp->cpu, 2239 TPS("WokeEmpty")); 2240 } 2241 goto wait_again; 2242 } 2243 2244 /* Wait for one grace period. */ 2245 rcu_nocb_wait_gp(my_rdp); 2246 2247 /* Each pass through the following loop wakes a follower, if needed. */ 2248 for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { 2249 if (!rcu_nocb_poll && 2250 READ_ONCE(rdp->nocb_head) && 2251 READ_ONCE(my_rdp->nocb_leader_sleep)) { 2252 raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags); 2253 my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/ 2254 raw_spin_unlock_irqrestore(&my_rdp->nocb_lock, flags); 2255 } 2256 if (!rdp->nocb_gp_head) 2257 continue; /* No CBs, so no need to wake follower. */ 2258 2259 /* Append callbacks to follower's "done" list. */ 2260 raw_spin_lock_irqsave(&rdp->nocb_lock, flags); 2261 tail = rdp->nocb_follower_tail; 2262 rdp->nocb_follower_tail = rdp->nocb_gp_tail; 2263 *tail = rdp->nocb_gp_head; 2264 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 2265 if (rdp != my_rdp && tail == &rdp->nocb_follower_head) { 2266 /* List was empty, so wake up the follower. */ 2267 swake_up_one(&rdp->nocb_wq); 2268 } 2269 } 2270 2271 /* If we (the leader) don't have CBs, go wait some more. */ 2272 if (!my_rdp->nocb_follower_head) 2273 goto wait_again; 2274 } 2275 2276 /* 2277 * Followers come here to wait for additional callbacks to show up. 2278 * This function does not return until callbacks appear. 2279 */ 2280 static void nocb_follower_wait(struct rcu_data *rdp) 2281 { 2282 for (;;) { 2283 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FollowerSleep")); 2284 swait_event_interruptible_exclusive(rdp->nocb_wq, 2285 READ_ONCE(rdp->nocb_follower_head)); 2286 if (smp_load_acquire(&rdp->nocb_follower_head)) { 2287 /* ^^^ Ensure CB invocation follows _head test. */ 2288 return; 2289 } 2290 WARN_ON(signal_pending(current)); 2291 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty")); 2292 } 2293 } 2294 2295 /* 2296 * Per-rcu_data kthread, but only for no-CBs CPUs. Each kthread invokes 2297 * callbacks queued by the corresponding no-CBs CPU, however, there is 2298 * an optional leader-follower relationship so that the grace-period 2299 * kthreads don't have to do quite so many wakeups. 2300 */ 2301 static int rcu_nocb_kthread(void *arg) 2302 { 2303 int c, cl; 2304 unsigned long flags; 2305 struct rcu_head *list; 2306 struct rcu_head *next; 2307 struct rcu_head **tail; 2308 struct rcu_data *rdp = arg; 2309 2310 /* Each pass through this loop invokes one batch of callbacks */ 2311 for (;;) { 2312 /* Wait for callbacks. */ 2313 if (rdp->nocb_leader == rdp) 2314 nocb_leader_wait(rdp); 2315 else 2316 nocb_follower_wait(rdp); 2317 2318 /* Pull the ready-to-invoke callbacks onto local list. */ 2319 raw_spin_lock_irqsave(&rdp->nocb_lock, flags); 2320 list = rdp->nocb_follower_head; 2321 rdp->nocb_follower_head = NULL; 2322 tail = rdp->nocb_follower_tail; 2323 rdp->nocb_follower_tail = &rdp->nocb_follower_head; 2324 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 2325 BUG_ON(!list); 2326 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeNonEmpty")); 2327 2328 /* Each pass through the following loop invokes a callback. */ 2329 trace_rcu_batch_start(rcu_state.name, 2330 atomic_long_read(&rdp->nocb_q_count_lazy), 2331 atomic_long_read(&rdp->nocb_q_count), -1); 2332 c = cl = 0; 2333 while (list) { 2334 next = list->next; 2335 /* Wait for enqueuing to complete, if needed. */ 2336 while (next == NULL && &list->next != tail) { 2337 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, 2338 TPS("WaitQueue")); 2339 schedule_timeout_interruptible(1); 2340 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, 2341 TPS("WokeQueue")); 2342 next = list->next; 2343 } 2344 debug_rcu_head_unqueue(list); 2345 local_bh_disable(); 2346 if (__rcu_reclaim(rcu_state.name, list)) 2347 cl++; 2348 c++; 2349 local_bh_enable(); 2350 cond_resched_tasks_rcu_qs(); 2351 list = next; 2352 } 2353 trace_rcu_batch_end(rcu_state.name, c, !!list, 0, 0, 1); 2354 smp_mb__before_atomic(); /* _add after CB invocation. */ 2355 atomic_long_add(-c, &rdp->nocb_q_count); 2356 atomic_long_add(-cl, &rdp->nocb_q_count_lazy); 2357 } 2358 return 0; 2359 } 2360 2361 /* Is a deferred wakeup of rcu_nocb_kthread() required? */ 2362 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) 2363 { 2364 return READ_ONCE(rdp->nocb_defer_wakeup); 2365 } 2366 2367 /* Do a deferred wakeup of rcu_nocb_kthread(). */ 2368 static void do_nocb_deferred_wakeup_common(struct rcu_data *rdp) 2369 { 2370 unsigned long flags; 2371 int ndw; 2372 2373 raw_spin_lock_irqsave(&rdp->nocb_lock, flags); 2374 if (!rcu_nocb_need_deferred_wakeup(rdp)) { 2375 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 2376 return; 2377 } 2378 ndw = READ_ONCE(rdp->nocb_defer_wakeup); 2379 WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); 2380 __wake_nocb_leader(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags); 2381 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake")); 2382 } 2383 2384 /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */ 2385 static void do_nocb_deferred_wakeup_timer(struct timer_list *t) 2386 { 2387 struct rcu_data *rdp = from_timer(rdp, t, nocb_timer); 2388 2389 do_nocb_deferred_wakeup_common(rdp); 2390 } 2391 2392 /* 2393 * Do a deferred wakeup of rcu_nocb_kthread() from fastpath. 2394 * This means we do an inexact common-case check. Note that if 2395 * we miss, ->nocb_timer will eventually clean things up. 2396 */ 2397 static void do_nocb_deferred_wakeup(struct rcu_data *rdp) 2398 { 2399 if (rcu_nocb_need_deferred_wakeup(rdp)) 2400 do_nocb_deferred_wakeup_common(rdp); 2401 } 2402 2403 void __init rcu_init_nohz(void) 2404 { 2405 int cpu; 2406 bool need_rcu_nocb_mask = false; 2407 2408 #if defined(CONFIG_NO_HZ_FULL) 2409 if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask)) 2410 need_rcu_nocb_mask = true; 2411 #endif /* #if defined(CONFIG_NO_HZ_FULL) */ 2412 2413 if (!cpumask_available(rcu_nocb_mask) && need_rcu_nocb_mask) { 2414 if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) { 2415 pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n"); 2416 return; 2417 } 2418 } 2419 if (!cpumask_available(rcu_nocb_mask)) 2420 return; 2421 2422 #if defined(CONFIG_NO_HZ_FULL) 2423 if (tick_nohz_full_running) 2424 cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask); 2425 #endif /* #if defined(CONFIG_NO_HZ_FULL) */ 2426 2427 if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) { 2428 pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n"); 2429 cpumask_and(rcu_nocb_mask, cpu_possible_mask, 2430 rcu_nocb_mask); 2431 } 2432 if (cpumask_empty(rcu_nocb_mask)) 2433 pr_info("\tOffload RCU callbacks from CPUs: (none).\n"); 2434 else 2435 pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n", 2436 cpumask_pr_args(rcu_nocb_mask)); 2437 if (rcu_nocb_poll) 2438 pr_info("\tPoll for callbacks from no-CBs CPUs.\n"); 2439 2440 for_each_cpu(cpu, rcu_nocb_mask) 2441 init_nocb_callback_list(per_cpu_ptr(&rcu_data, cpu)); 2442 rcu_organize_nocb_kthreads(); 2443 } 2444 2445 /* Initialize per-rcu_data variables for no-CBs CPUs. */ 2446 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) 2447 { 2448 rdp->nocb_tail = &rdp->nocb_head; 2449 init_swait_queue_head(&rdp->nocb_wq); 2450 rdp->nocb_follower_tail = &rdp->nocb_follower_head; 2451 raw_spin_lock_init(&rdp->nocb_lock); 2452 timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0); 2453 } 2454 2455 /* 2456 * If the specified CPU is a no-CBs CPU that does not already have its 2457 * rcuo kthread, spawn it. If the CPUs are brought online out of order, 2458 * this can require re-organizing the leader-follower relationships. 2459 */ 2460 static void rcu_spawn_one_nocb_kthread(int cpu) 2461 { 2462 struct rcu_data *rdp; 2463 struct rcu_data *rdp_last; 2464 struct rcu_data *rdp_old_leader; 2465 struct rcu_data *rdp_spawn = per_cpu_ptr(&rcu_data, cpu); 2466 struct task_struct *t; 2467 2468 /* 2469 * If this isn't a no-CBs CPU or if it already has an rcuo kthread, 2470 * then nothing to do. 2471 */ 2472 if (!rcu_is_nocb_cpu(cpu) || rdp_spawn->nocb_kthread) 2473 return; 2474 2475 /* If we didn't spawn the leader first, reorganize! */ 2476 rdp_old_leader = rdp_spawn->nocb_leader; 2477 if (rdp_old_leader != rdp_spawn && !rdp_old_leader->nocb_kthread) { 2478 rdp_last = NULL; 2479 rdp = rdp_old_leader; 2480 do { 2481 rdp->nocb_leader = rdp_spawn; 2482 if (rdp_last && rdp != rdp_spawn) 2483 rdp_last->nocb_next_follower = rdp; 2484 if (rdp == rdp_spawn) { 2485 rdp = rdp->nocb_next_follower; 2486 } else { 2487 rdp_last = rdp; 2488 rdp = rdp->nocb_next_follower; 2489 rdp_last->nocb_next_follower = NULL; 2490 } 2491 } while (rdp); 2492 rdp_spawn->nocb_next_follower = rdp_old_leader; 2493 } 2494 2495 /* Spawn the kthread for this CPU. */ 2496 t = kthread_run(rcu_nocb_kthread, rdp_spawn, 2497 "rcuo%c/%d", rcu_state.abbr, cpu); 2498 BUG_ON(IS_ERR(t)); 2499 WRITE_ONCE(rdp_spawn->nocb_kthread, t); 2500 } 2501 2502 /* 2503 * If the specified CPU is a no-CBs CPU that does not already have its 2504 * rcuo kthreads, spawn them. 2505 */ 2506 static void rcu_spawn_all_nocb_kthreads(int cpu) 2507 { 2508 if (rcu_scheduler_fully_active) 2509 rcu_spawn_one_nocb_kthread(cpu); 2510 } 2511 2512 /* 2513 * Once the scheduler is running, spawn rcuo kthreads for all online 2514 * no-CBs CPUs. This assumes that the early_initcall()s happen before 2515 * non-boot CPUs come online -- if this changes, we will need to add 2516 * some mutual exclusion. 2517 */ 2518 static void __init rcu_spawn_nocb_kthreads(void) 2519 { 2520 int cpu; 2521 2522 for_each_online_cpu(cpu) 2523 rcu_spawn_all_nocb_kthreads(cpu); 2524 } 2525 2526 /* How many follower CPU IDs per leader? Default of -1 for sqrt(nr_cpu_ids). */ 2527 static int rcu_nocb_leader_stride = -1; 2528 module_param(rcu_nocb_leader_stride, int, 0444); 2529 2530 /* 2531 * Initialize leader-follower relationships for all no-CBs CPU. 2532 */ 2533 static void __init rcu_organize_nocb_kthreads(void) 2534 { 2535 int cpu; 2536 int ls = rcu_nocb_leader_stride; 2537 int nl = 0; /* Next leader. */ 2538 struct rcu_data *rdp; 2539 struct rcu_data *rdp_leader = NULL; /* Suppress misguided gcc warn. */ 2540 struct rcu_data *rdp_prev = NULL; 2541 2542 if (!cpumask_available(rcu_nocb_mask)) 2543 return; 2544 if (ls == -1) { 2545 ls = int_sqrt(nr_cpu_ids); 2546 rcu_nocb_leader_stride = ls; 2547 } 2548 2549 /* 2550 * Each pass through this loop sets up one rcu_data structure. 2551 * Should the corresponding CPU come online in the future, then 2552 * we will spawn the needed set of rcu_nocb_kthread() kthreads. 2553 */ 2554 for_each_cpu(cpu, rcu_nocb_mask) { 2555 rdp = per_cpu_ptr(&rcu_data, cpu); 2556 if (rdp->cpu >= nl) { 2557 /* New leader, set up for followers & next leader. */ 2558 nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls; 2559 rdp->nocb_leader = rdp; 2560 rdp_leader = rdp; 2561 } else { 2562 /* Another follower, link to previous leader. */ 2563 rdp->nocb_leader = rdp_leader; 2564 rdp_prev->nocb_next_follower = rdp; 2565 } 2566 rdp_prev = rdp; 2567 } 2568 } 2569 2570 /* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */ 2571 static bool init_nocb_callback_list(struct rcu_data *rdp) 2572 { 2573 if (!rcu_is_nocb_cpu(rdp->cpu)) 2574 return false; 2575 2576 /* If there are early-boot callbacks, move them to nocb lists. */ 2577 if (!rcu_segcblist_empty(&rdp->cblist)) { 2578 rdp->nocb_head = rcu_segcblist_head(&rdp->cblist); 2579 rdp->nocb_tail = rcu_segcblist_tail(&rdp->cblist); 2580 atomic_long_set(&rdp->nocb_q_count, 2581 rcu_segcblist_n_cbs(&rdp->cblist)); 2582 atomic_long_set(&rdp->nocb_q_count_lazy, 2583 rcu_segcblist_n_lazy_cbs(&rdp->cblist)); 2584 rcu_segcblist_init(&rdp->cblist); 2585 } 2586 rcu_segcblist_disable(&rdp->cblist); 2587 return true; 2588 } 2589 2590 #else /* #ifdef CONFIG_RCU_NOCB_CPU */ 2591 2592 static bool rcu_nocb_cpu_needs_barrier(int cpu) 2593 { 2594 WARN_ON_ONCE(1); /* Should be dead code. */ 2595 return false; 2596 } 2597 2598 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq) 2599 { 2600 } 2601 2602 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp) 2603 { 2604 return NULL; 2605 } 2606 2607 static void rcu_init_one_nocb(struct rcu_node *rnp) 2608 { 2609 } 2610 2611 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, 2612 bool lazy, unsigned long flags) 2613 { 2614 return false; 2615 } 2616 2617 static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp, 2618 struct rcu_data *rdp, 2619 unsigned long flags) 2620 { 2621 return false; 2622 } 2623 2624 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) 2625 { 2626 } 2627 2628 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) 2629 { 2630 return false; 2631 } 2632 2633 static void do_nocb_deferred_wakeup(struct rcu_data *rdp) 2634 { 2635 } 2636 2637 static void rcu_spawn_all_nocb_kthreads(int cpu) 2638 { 2639 } 2640 2641 static void __init rcu_spawn_nocb_kthreads(void) 2642 { 2643 } 2644 2645 static bool init_nocb_callback_list(struct rcu_data *rdp) 2646 { 2647 return false; 2648 } 2649 2650 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ 2651 2652 /* 2653 * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the 2654 * grace-period kthread will do force_quiescent_state() processing? 2655 * The idea is to avoid waking up RCU core processing on such a 2656 * CPU unless the grace period has extended for too long. 2657 * 2658 * This code relies on the fact that all NO_HZ_FULL CPUs are also 2659 * CONFIG_RCU_NOCB_CPU CPUs. 2660 */ 2661 static bool rcu_nohz_full_cpu(void) 2662 { 2663 #ifdef CONFIG_NO_HZ_FULL 2664 if (tick_nohz_full_cpu(smp_processor_id()) && 2665 (!rcu_gp_in_progress() || 2666 ULONG_CMP_LT(jiffies, READ_ONCE(rcu_state.gp_start) + HZ))) 2667 return true; 2668 #endif /* #ifdef CONFIG_NO_HZ_FULL */ 2669 return false; 2670 } 2671 2672 /* 2673 * Bind the RCU grace-period kthreads to the housekeeping CPU. 2674 */ 2675 static void rcu_bind_gp_kthread(void) 2676 { 2677 if (!tick_nohz_full_enabled()) 2678 return; 2679 housekeeping_affine(current, HK_FLAG_RCU); 2680 } 2681 2682 /* Record the current task on dyntick-idle entry. */ 2683 static void rcu_dynticks_task_enter(void) 2684 { 2685 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) 2686 WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id()); 2687 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */ 2688 } 2689 2690 /* Record no current task on dyntick-idle exit. */ 2691 static void rcu_dynticks_task_exit(void) 2692 { 2693 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) 2694 WRITE_ONCE(current->rcu_tasks_idle_cpu, -1); 2695 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */ 2696 } 2697