1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 4 * Internal non-public definitions that provide either classic 5 * or preemptible semantics. 6 * 7 * Copyright Red Hat, 2009 8 * Copyright IBM Corporation, 2009 9 * 10 * Author: Ingo Molnar <mingo@elte.hu> 11 * Paul E. McKenney <paulmck@linux.ibm.com> 12 */ 13 14 #include "../locking/rtmutex_common.h" 15 16 #ifdef CONFIG_RCU_NOCB_CPU 17 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ 18 static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */ 19 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 20 21 /* 22 * Check the RCU kernel configuration parameters and print informative 23 * messages about anything out of the ordinary. 24 */ 25 static void __init rcu_bootup_announce_oddness(void) 26 { 27 if (IS_ENABLED(CONFIG_RCU_TRACE)) 28 pr_info("\tRCU event tracing is enabled.\n"); 29 if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) || 30 (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32)) 31 pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.\n", 32 RCU_FANOUT); 33 if (rcu_fanout_exact) 34 pr_info("\tHierarchical RCU autobalancing is disabled.\n"); 35 if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ)) 36 pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n"); 37 if (IS_ENABLED(CONFIG_PROVE_RCU)) 38 pr_info("\tRCU lockdep checking is enabled.\n"); 39 if (RCU_NUM_LVLS >= 4) 40 pr_info("\tFour(or more)-level hierarchy is enabled.\n"); 41 if (RCU_FANOUT_LEAF != 16) 42 pr_info("\tBuild-time adjustment of leaf fanout to %d.\n", 43 RCU_FANOUT_LEAF); 44 if (rcu_fanout_leaf != RCU_FANOUT_LEAF) 45 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", 46 rcu_fanout_leaf); 47 if (nr_cpu_ids != NR_CPUS) 48 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids); 49 #ifdef CONFIG_RCU_BOOST 50 pr_info("\tRCU priority boosting: priority %d delay %d ms.\n", 51 kthread_prio, CONFIG_RCU_BOOST_DELAY); 52 #endif 53 if (blimit != DEFAULT_RCU_BLIMIT) 54 pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit); 55 if (qhimark != DEFAULT_RCU_QHIMARK) 56 pr_info("\tBoot-time adjustment of callback high-water mark to %ld.\n", qhimark); 57 if (qlowmark != DEFAULT_RCU_QLOMARK) 58 pr_info("\tBoot-time adjustment of callback low-water mark to %ld.\n", qlowmark); 59 if (jiffies_till_first_fqs != ULONG_MAX) 60 pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.\n", jiffies_till_first_fqs); 61 if (jiffies_till_next_fqs != ULONG_MAX) 62 pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.\n", jiffies_till_next_fqs); 63 if (jiffies_till_sched_qs != ULONG_MAX) 64 pr_info("\tBoot-time adjustment of scheduler-enlistment delay to %ld jiffies.\n", jiffies_till_sched_qs); 65 if (rcu_kick_kthreads) 66 pr_info("\tKick kthreads if too-long grace period.\n"); 67 if (IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD)) 68 pr_info("\tRCU callback double-/use-after-free debug enabled.\n"); 69 if (gp_preinit_delay) 70 pr_info("\tRCU debug GP pre-init slowdown %d jiffies.\n", gp_preinit_delay); 71 if (gp_init_delay) 72 pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_init_delay); 73 if (gp_cleanup_delay) 74 pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_cleanup_delay); 75 if (!use_softirq) 76 pr_info("\tRCU_SOFTIRQ processing moved to rcuc kthreads.\n"); 77 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG)) 78 pr_info("\tRCU debug extended QS entry/exit.\n"); 79 rcupdate_announce_bootup_oddness(); 80 } 81 82 #ifdef CONFIG_PREEMPT_RCU 83 84 static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake); 85 static void rcu_read_unlock_special(struct task_struct *t); 86 87 /* 88 * Tell them what RCU they are running. 89 */ 90 static void __init rcu_bootup_announce(void) 91 { 92 pr_info("Preemptible hierarchical RCU implementation.\n"); 93 rcu_bootup_announce_oddness(); 94 } 95 96 /* Flags for rcu_preempt_ctxt_queue() decision table. */ 97 #define RCU_GP_TASKS 0x8 98 #define RCU_EXP_TASKS 0x4 99 #define RCU_GP_BLKD 0x2 100 #define RCU_EXP_BLKD 0x1 101 102 /* 103 * Queues a task preempted within an RCU-preempt read-side critical 104 * section into the appropriate location within the ->blkd_tasks list, 105 * depending on the states of any ongoing normal and expedited grace 106 * periods. The ->gp_tasks pointer indicates which element the normal 107 * grace period is waiting on (NULL if none), and the ->exp_tasks pointer 108 * indicates which element the expedited grace period is waiting on (again, 109 * NULL if none). If a grace period is waiting on a given element in the 110 * ->blkd_tasks list, it also waits on all subsequent elements. Thus, 111 * adding a task to the tail of the list blocks any grace period that is 112 * already waiting on one of the elements. In contrast, adding a task 113 * to the head of the list won't block any grace period that is already 114 * waiting on one of the elements. 115 * 116 * This queuing is imprecise, and can sometimes make an ongoing grace 117 * period wait for a task that is not strictly speaking blocking it. 118 * Given the choice, we needlessly block a normal grace period rather than 119 * blocking an expedited grace period. 120 * 121 * Note that an endless sequence of expedited grace periods still cannot 122 * indefinitely postpone a normal grace period. Eventually, all of the 123 * fixed number of preempted tasks blocking the normal grace period that are 124 * not also blocking the expedited grace period will resume and complete 125 * their RCU read-side critical sections. At that point, the ->gp_tasks 126 * pointer will equal the ->exp_tasks pointer, at which point the end of 127 * the corresponding expedited grace period will also be the end of the 128 * normal grace period. 129 */ 130 static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) 131 __releases(rnp->lock) /* But leaves rrupts disabled. */ 132 { 133 int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) + 134 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) + 135 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) + 136 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0); 137 struct task_struct *t = current; 138 139 raw_lockdep_assert_held_rcu_node(rnp); 140 WARN_ON_ONCE(rdp->mynode != rnp); 141 WARN_ON_ONCE(!rcu_is_leaf_node(rnp)); 142 /* RCU better not be waiting on newly onlined CPUs! */ 143 WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask & 144 rdp->grpmask); 145 146 /* 147 * Decide where to queue the newly blocked task. In theory, 148 * this could be an if-statement. In practice, when I tried 149 * that, it was quite messy. 150 */ 151 switch (blkd_state) { 152 case 0: 153 case RCU_EXP_TASKS: 154 case RCU_EXP_TASKS + RCU_GP_BLKD: 155 case RCU_GP_TASKS: 156 case RCU_GP_TASKS + RCU_EXP_TASKS: 157 158 /* 159 * Blocking neither GP, or first task blocking the normal 160 * GP but not blocking the already-waiting expedited GP. 161 * Queue at the head of the list to avoid unnecessarily 162 * blocking the already-waiting GPs. 163 */ 164 list_add(&t->rcu_node_entry, &rnp->blkd_tasks); 165 break; 166 167 case RCU_EXP_BLKD: 168 case RCU_GP_BLKD: 169 case RCU_GP_BLKD + RCU_EXP_BLKD: 170 case RCU_GP_TASKS + RCU_EXP_BLKD: 171 case RCU_GP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD: 172 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD: 173 174 /* 175 * First task arriving that blocks either GP, or first task 176 * arriving that blocks the expedited GP (with the normal 177 * GP already waiting), or a task arriving that blocks 178 * both GPs with both GPs already waiting. Queue at the 179 * tail of the list to avoid any GP waiting on any of the 180 * already queued tasks that are not blocking it. 181 */ 182 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks); 183 break; 184 185 case RCU_EXP_TASKS + RCU_EXP_BLKD: 186 case RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD: 187 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_EXP_BLKD: 188 189 /* 190 * Second or subsequent task blocking the expedited GP. 191 * The task either does not block the normal GP, or is the 192 * first task blocking the normal GP. Queue just after 193 * the first task blocking the expedited GP. 194 */ 195 list_add(&t->rcu_node_entry, rnp->exp_tasks); 196 break; 197 198 case RCU_GP_TASKS + RCU_GP_BLKD: 199 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD: 200 201 /* 202 * Second or subsequent task blocking the normal GP. 203 * The task does not block the expedited GP. Queue just 204 * after the first task blocking the normal GP. 205 */ 206 list_add(&t->rcu_node_entry, rnp->gp_tasks); 207 break; 208 209 default: 210 211 /* Yet another exercise in excessive paranoia. */ 212 WARN_ON_ONCE(1); 213 break; 214 } 215 216 /* 217 * We have now queued the task. If it was the first one to 218 * block either grace period, update the ->gp_tasks and/or 219 * ->exp_tasks pointers, respectively, to reference the newly 220 * blocked tasks. 221 */ 222 if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) { 223 rnp->gp_tasks = &t->rcu_node_entry; 224 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq); 225 } 226 if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD)) 227 rnp->exp_tasks = &t->rcu_node_entry; 228 WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) != 229 !(rnp->qsmask & rdp->grpmask)); 230 WARN_ON_ONCE(!(blkd_state & RCU_EXP_BLKD) != 231 !(rnp->expmask & rdp->grpmask)); 232 raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */ 233 234 /* 235 * Report the quiescent state for the expedited GP. This expedited 236 * GP should not be able to end until we report, so there should be 237 * no need to check for a subsequent expedited GP. (Though we are 238 * still in a quiescent state in any case.) 239 */ 240 if (blkd_state & RCU_EXP_BLKD && rdp->exp_deferred_qs) 241 rcu_report_exp_rdp(rdp); 242 else 243 WARN_ON_ONCE(rdp->exp_deferred_qs); 244 } 245 246 /* 247 * Record a preemptible-RCU quiescent state for the specified CPU. 248 * Note that this does not necessarily mean that the task currently running 249 * on the CPU is in a quiescent state: Instead, it means that the current 250 * grace period need not wait on any RCU read-side critical section that 251 * starts later on this CPU. It also means that if the current task is 252 * in an RCU read-side critical section, it has already added itself to 253 * some leaf rcu_node structure's ->blkd_tasks list. In addition to the 254 * current task, there might be any number of other tasks blocked while 255 * in an RCU read-side critical section. 256 * 257 * Callers to this function must disable preemption. 258 */ 259 static void rcu_qs(void) 260 { 261 RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!\n"); 262 if (__this_cpu_read(rcu_data.cpu_no_qs.s)) { 263 trace_rcu_grace_period(TPS("rcu_preempt"), 264 __this_cpu_read(rcu_data.gp_seq), 265 TPS("cpuqs")); 266 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false); 267 barrier(); /* Coordinate with rcu_flavor_sched_clock_irq(). */ 268 WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, false); 269 } 270 } 271 272 /* 273 * We have entered the scheduler, and the current task might soon be 274 * context-switched away from. If this task is in an RCU read-side 275 * critical section, we will no longer be able to rely on the CPU to 276 * record that fact, so we enqueue the task on the blkd_tasks list. 277 * The task will dequeue itself when it exits the outermost enclosing 278 * RCU read-side critical section. Therefore, the current grace period 279 * cannot be permitted to complete until the blkd_tasks list entries 280 * predating the current grace period drain, in other words, until 281 * rnp->gp_tasks becomes NULL. 282 * 283 * Caller must disable interrupts. 284 */ 285 void rcu_note_context_switch(bool preempt) 286 { 287 struct task_struct *t = current; 288 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 289 struct rcu_node *rnp; 290 291 barrier(); /* Avoid RCU read-side critical sections leaking down. */ 292 trace_rcu_utilization(TPS("Start context switch")); 293 lockdep_assert_irqs_disabled(); 294 WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0); 295 if (t->rcu_read_lock_nesting > 0 && 296 !t->rcu_read_unlock_special.b.blocked) { 297 298 /* Possibly blocking in an RCU read-side critical section. */ 299 rnp = rdp->mynode; 300 raw_spin_lock_rcu_node(rnp); 301 t->rcu_read_unlock_special.b.blocked = true; 302 t->rcu_blocked_node = rnp; 303 304 /* 305 * Verify the CPU's sanity, trace the preemption, and 306 * then queue the task as required based on the states 307 * of any ongoing and expedited grace periods. 308 */ 309 WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0); 310 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); 311 trace_rcu_preempt_task(rcu_state.name, 312 t->pid, 313 (rnp->qsmask & rdp->grpmask) 314 ? rnp->gp_seq 315 : rcu_seq_snap(&rnp->gp_seq)); 316 rcu_preempt_ctxt_queue(rnp, rdp); 317 } else if (t->rcu_read_lock_nesting < 0 && 318 t->rcu_read_unlock_special.s) { 319 320 /* 321 * Complete exit from RCU read-side critical section on 322 * behalf of preempted instance of __rcu_read_unlock(). 323 */ 324 rcu_read_unlock_special(t); 325 rcu_preempt_deferred_qs(t); 326 } else { 327 rcu_preempt_deferred_qs(t); 328 } 329 330 /* 331 * Either we were not in an RCU read-side critical section to 332 * begin with, or we have now recorded that critical section 333 * globally. Either way, we can now note a quiescent state 334 * for this CPU. Again, if we were in an RCU read-side critical 335 * section, and if that critical section was blocking the current 336 * grace period, then the fact that the task has been enqueued 337 * means that we continue to block the current grace period. 338 */ 339 rcu_qs(); 340 if (rdp->exp_deferred_qs) 341 rcu_report_exp_rdp(rdp); 342 trace_rcu_utilization(TPS("End context switch")); 343 barrier(); /* Avoid RCU read-side critical sections leaking up. */ 344 } 345 EXPORT_SYMBOL_GPL(rcu_note_context_switch); 346 347 /* 348 * Check for preempted RCU readers blocking the current grace period 349 * for the specified rcu_node structure. If the caller needs a reliable 350 * answer, it must hold the rcu_node's ->lock. 351 */ 352 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) 353 { 354 return rnp->gp_tasks != NULL; 355 } 356 357 /* Bias and limit values for ->rcu_read_lock_nesting. */ 358 #define RCU_NEST_BIAS INT_MAX 359 #define RCU_NEST_NMAX (-INT_MAX / 2) 360 #define RCU_NEST_PMAX (INT_MAX / 2) 361 362 /* 363 * Preemptible RCU implementation for rcu_read_lock(). 364 * Just increment ->rcu_read_lock_nesting, shared state will be updated 365 * if we block. 366 */ 367 void __rcu_read_lock(void) 368 { 369 current->rcu_read_lock_nesting++; 370 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) 371 WARN_ON_ONCE(current->rcu_read_lock_nesting > RCU_NEST_PMAX); 372 barrier(); /* critical section after entry code. */ 373 } 374 EXPORT_SYMBOL_GPL(__rcu_read_lock); 375 376 /* 377 * Preemptible RCU implementation for rcu_read_unlock(). 378 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost 379 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then 380 * invoke rcu_read_unlock_special() to clean up after a context switch 381 * in an RCU read-side critical section and other special cases. 382 */ 383 void __rcu_read_unlock(void) 384 { 385 struct task_struct *t = current; 386 387 if (t->rcu_read_lock_nesting != 1) { 388 --t->rcu_read_lock_nesting; 389 } else { 390 barrier(); /* critical section before exit code. */ 391 t->rcu_read_lock_nesting = -RCU_NEST_BIAS; 392 barrier(); /* assign before ->rcu_read_unlock_special load */ 393 if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s))) 394 rcu_read_unlock_special(t); 395 barrier(); /* ->rcu_read_unlock_special load before assign */ 396 t->rcu_read_lock_nesting = 0; 397 } 398 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { 399 int rrln = t->rcu_read_lock_nesting; 400 401 WARN_ON_ONCE(rrln < 0 && rrln > RCU_NEST_NMAX); 402 } 403 } 404 EXPORT_SYMBOL_GPL(__rcu_read_unlock); 405 406 /* 407 * Advance a ->blkd_tasks-list pointer to the next entry, instead 408 * returning NULL if at the end of the list. 409 */ 410 static struct list_head *rcu_next_node_entry(struct task_struct *t, 411 struct rcu_node *rnp) 412 { 413 struct list_head *np; 414 415 np = t->rcu_node_entry.next; 416 if (np == &rnp->blkd_tasks) 417 np = NULL; 418 return np; 419 } 420 421 /* 422 * Return true if the specified rcu_node structure has tasks that were 423 * preempted within an RCU read-side critical section. 424 */ 425 static bool rcu_preempt_has_tasks(struct rcu_node *rnp) 426 { 427 return !list_empty(&rnp->blkd_tasks); 428 } 429 430 /* 431 * Report deferred quiescent states. The deferral time can 432 * be quite short, for example, in the case of the call from 433 * rcu_read_unlock_special(). 434 */ 435 static void 436 rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) 437 { 438 bool empty_exp; 439 bool empty_norm; 440 bool empty_exp_now; 441 struct list_head *np; 442 bool drop_boost_mutex = false; 443 struct rcu_data *rdp; 444 struct rcu_node *rnp; 445 union rcu_special special; 446 447 /* 448 * If RCU core is waiting for this CPU to exit its critical section, 449 * report the fact that it has exited. Because irqs are disabled, 450 * t->rcu_read_unlock_special cannot change. 451 */ 452 special = t->rcu_read_unlock_special; 453 rdp = this_cpu_ptr(&rcu_data); 454 if (!special.s && !rdp->exp_deferred_qs) { 455 local_irq_restore(flags); 456 return; 457 } 458 t->rcu_read_unlock_special.b.deferred_qs = false; 459 if (special.b.need_qs) { 460 rcu_qs(); 461 t->rcu_read_unlock_special.b.need_qs = false; 462 if (!t->rcu_read_unlock_special.s && !rdp->exp_deferred_qs) { 463 local_irq_restore(flags); 464 return; 465 } 466 } 467 468 /* 469 * Respond to a request by an expedited grace period for a 470 * quiescent state from this CPU. Note that requests from 471 * tasks are handled when removing the task from the 472 * blocked-tasks list below. 473 */ 474 if (rdp->exp_deferred_qs) { 475 rcu_report_exp_rdp(rdp); 476 if (!t->rcu_read_unlock_special.s) { 477 local_irq_restore(flags); 478 return; 479 } 480 } 481 482 /* Clean up if blocked during RCU read-side critical section. */ 483 if (special.b.blocked) { 484 t->rcu_read_unlock_special.b.blocked = false; 485 486 /* 487 * Remove this task from the list it blocked on. The task 488 * now remains queued on the rcu_node corresponding to the 489 * CPU it first blocked on, so there is no longer any need 490 * to loop. Retain a WARN_ON_ONCE() out of sheer paranoia. 491 */ 492 rnp = t->rcu_blocked_node; 493 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 494 WARN_ON_ONCE(rnp != t->rcu_blocked_node); 495 WARN_ON_ONCE(!rcu_is_leaf_node(rnp)); 496 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); 497 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq && 498 (!empty_norm || rnp->qsmask)); 499 empty_exp = sync_rcu_preempt_exp_done(rnp); 500 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ 501 np = rcu_next_node_entry(t, rnp); 502 list_del_init(&t->rcu_node_entry); 503 t->rcu_blocked_node = NULL; 504 trace_rcu_unlock_preempted_task(TPS("rcu_preempt"), 505 rnp->gp_seq, t->pid); 506 if (&t->rcu_node_entry == rnp->gp_tasks) 507 rnp->gp_tasks = np; 508 if (&t->rcu_node_entry == rnp->exp_tasks) 509 rnp->exp_tasks = np; 510 if (IS_ENABLED(CONFIG_RCU_BOOST)) { 511 /* Snapshot ->boost_mtx ownership w/rnp->lock held. */ 512 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t; 513 if (&t->rcu_node_entry == rnp->boost_tasks) 514 rnp->boost_tasks = np; 515 } 516 517 /* 518 * If this was the last task on the current list, and if 519 * we aren't waiting on any CPUs, report the quiescent state. 520 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, 521 * so we must take a snapshot of the expedited state. 522 */ 523 empty_exp_now = sync_rcu_preempt_exp_done(rnp); 524 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) { 525 trace_rcu_quiescent_state_report(TPS("preempt_rcu"), 526 rnp->gp_seq, 527 0, rnp->qsmask, 528 rnp->level, 529 rnp->grplo, 530 rnp->grphi, 531 !!rnp->gp_tasks); 532 rcu_report_unblock_qs_rnp(rnp, flags); 533 } else { 534 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 535 } 536 537 /* Unboost if we were boosted. */ 538 if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex) 539 rt_mutex_futex_unlock(&rnp->boost_mtx); 540 541 /* 542 * If this was the last task on the expedited lists, 543 * then we need to report up the rcu_node hierarchy. 544 */ 545 if (!empty_exp && empty_exp_now) 546 rcu_report_exp_rnp(rnp, true); 547 } else { 548 local_irq_restore(flags); 549 } 550 } 551 552 /* 553 * Is a deferred quiescent-state pending, and are we also not in 554 * an RCU read-side critical section? It is the caller's responsibility 555 * to ensure it is otherwise safe to report any deferred quiescent 556 * states. The reason for this is that it is safe to report a 557 * quiescent state during context switch even though preemption 558 * is disabled. This function cannot be expected to understand these 559 * nuances, so the caller must handle them. 560 */ 561 static bool rcu_preempt_need_deferred_qs(struct task_struct *t) 562 { 563 return (__this_cpu_read(rcu_data.exp_deferred_qs) || 564 READ_ONCE(t->rcu_read_unlock_special.s)) && 565 t->rcu_read_lock_nesting <= 0; 566 } 567 568 /* 569 * Report a deferred quiescent state if needed and safe to do so. 570 * As with rcu_preempt_need_deferred_qs(), "safe" involves only 571 * not being in an RCU read-side critical section. The caller must 572 * evaluate safety in terms of interrupt, softirq, and preemption 573 * disabling. 574 */ 575 static void rcu_preempt_deferred_qs(struct task_struct *t) 576 { 577 unsigned long flags; 578 bool couldrecurse = t->rcu_read_lock_nesting >= 0; 579 580 if (!rcu_preempt_need_deferred_qs(t)) 581 return; 582 if (couldrecurse) 583 t->rcu_read_lock_nesting -= RCU_NEST_BIAS; 584 local_irq_save(flags); 585 rcu_preempt_deferred_qs_irqrestore(t, flags); 586 if (couldrecurse) 587 t->rcu_read_lock_nesting += RCU_NEST_BIAS; 588 } 589 590 /* 591 * Minimal handler to give the scheduler a chance to re-evaluate. 592 */ 593 static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp) 594 { 595 struct rcu_data *rdp; 596 597 rdp = container_of(iwp, struct rcu_data, defer_qs_iw); 598 rdp->defer_qs_iw_pending = false; 599 } 600 601 /* 602 * Handle special cases during rcu_read_unlock(), such as needing to 603 * notify RCU core processing or task having blocked during the RCU 604 * read-side critical section. 605 */ 606 static void rcu_read_unlock_special(struct task_struct *t) 607 { 608 unsigned long flags; 609 bool preempt_bh_were_disabled = 610 !!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)); 611 bool irqs_were_disabled; 612 613 /* NMI handlers cannot block and cannot safely manipulate state. */ 614 if (in_nmi()) 615 return; 616 617 local_irq_save(flags); 618 irqs_were_disabled = irqs_disabled_flags(flags); 619 if (preempt_bh_were_disabled || irqs_were_disabled) { 620 bool exp; 621 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 622 struct rcu_node *rnp = rdp->mynode; 623 624 t->rcu_read_unlock_special.b.exp_hint = false; 625 exp = (t->rcu_blocked_node && t->rcu_blocked_node->exp_tasks) || 626 (rdp->grpmask & rnp->expmask) || 627 tick_nohz_full_cpu(rdp->cpu); 628 // Need to defer quiescent state until everything is enabled. 629 if ((exp || in_irq()) && irqs_were_disabled && use_softirq && 630 (in_irq() || !t->rcu_read_unlock_special.b.deferred_qs)) { 631 // Using softirq, safe to awaken, and we get 632 // no help from enabling irqs, unlike bh/preempt. 633 raise_softirq_irqoff(RCU_SOFTIRQ); 634 } else if (exp && irqs_were_disabled && !use_softirq && 635 !t->rcu_read_unlock_special.b.deferred_qs) { 636 // Safe to awaken and we get no help from enabling 637 // irqs, unlike bh/preempt. 638 invoke_rcu_core(); 639 } else { 640 // Enabling BH or preempt does reschedule, so... 641 // Also if no expediting or NO_HZ_FULL, slow is OK. 642 set_tsk_need_resched(current); 643 set_preempt_need_resched(); 644 if (IS_ENABLED(CONFIG_IRQ_WORK) && 645 !rdp->defer_qs_iw_pending && exp) { 646 // Get scheduler to re-evaluate and call hooks. 647 // If !IRQ_WORK, FQS scan will eventually IPI. 648 init_irq_work(&rdp->defer_qs_iw, 649 rcu_preempt_deferred_qs_handler); 650 rdp->defer_qs_iw_pending = true; 651 irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu); 652 } 653 } 654 t->rcu_read_unlock_special.b.deferred_qs = true; 655 local_irq_restore(flags); 656 return; 657 } 658 WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, false); 659 rcu_preempt_deferred_qs_irqrestore(t, flags); 660 } 661 662 /* 663 * Check that the list of blocked tasks for the newly completed grace 664 * period is in fact empty. It is a serious bug to complete a grace 665 * period that still has RCU readers blocked! This function must be 666 * invoked -before- updating this rnp's ->gp_seq, and the rnp's ->lock 667 * must be held by the caller. 668 * 669 * Also, if there are blocked tasks on the list, they automatically 670 * block the newly created grace period, so set up ->gp_tasks accordingly. 671 */ 672 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 673 { 674 struct task_struct *t; 675 676 RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n"); 677 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) 678 dump_blkd_tasks(rnp, 10); 679 if (rcu_preempt_has_tasks(rnp) && 680 (rnp->qsmaskinit || rnp->wait_blkd_tasks)) { 681 rnp->gp_tasks = rnp->blkd_tasks.next; 682 t = container_of(rnp->gp_tasks, struct task_struct, 683 rcu_node_entry); 684 trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"), 685 rnp->gp_seq, t->pid); 686 } 687 WARN_ON_ONCE(rnp->qsmask); 688 } 689 690 /* 691 * Check for a quiescent state from the current CPU, including voluntary 692 * context switches for Tasks RCU. When a task blocks, the task is 693 * recorded in the corresponding CPU's rcu_node structure, which is checked 694 * elsewhere, hence this function need only check for quiescent states 695 * related to the current CPU, not to those related to tasks. 696 */ 697 static void rcu_flavor_sched_clock_irq(int user) 698 { 699 struct task_struct *t = current; 700 701 if (user || rcu_is_cpu_rrupt_from_idle()) { 702 rcu_note_voluntary_context_switch(current); 703 } 704 if (t->rcu_read_lock_nesting > 0 || 705 (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) { 706 /* No QS, force context switch if deferred. */ 707 if (rcu_preempt_need_deferred_qs(t)) { 708 set_tsk_need_resched(t); 709 set_preempt_need_resched(); 710 } 711 } else if (rcu_preempt_need_deferred_qs(t)) { 712 rcu_preempt_deferred_qs(t); /* Report deferred QS. */ 713 return; 714 } else if (!t->rcu_read_lock_nesting) { 715 rcu_qs(); /* Report immediate QS. */ 716 return; 717 } 718 719 /* If GP is oldish, ask for help from rcu_read_unlock_special(). */ 720 if (t->rcu_read_lock_nesting > 0 && 721 __this_cpu_read(rcu_data.core_needs_qs) && 722 __this_cpu_read(rcu_data.cpu_no_qs.b.norm) && 723 !t->rcu_read_unlock_special.b.need_qs && 724 time_after(jiffies, rcu_state.gp_start + HZ)) 725 t->rcu_read_unlock_special.b.need_qs = true; 726 } 727 728 /* 729 * Check for a task exiting while in a preemptible-RCU read-side 730 * critical section, clean up if so. No need to issue warnings, as 731 * debug_check_no_locks_held() already does this if lockdep is enabled. 732 * Besides, if this function does anything other than just immediately 733 * return, there was a bug of some sort. Spewing warnings from this 734 * function is like as not to simply obscure important prior warnings. 735 */ 736 void exit_rcu(void) 737 { 738 struct task_struct *t = current; 739 740 if (unlikely(!list_empty(¤t->rcu_node_entry))) { 741 t->rcu_read_lock_nesting = 1; 742 barrier(); 743 WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true); 744 } else if (unlikely(t->rcu_read_lock_nesting)) { 745 t->rcu_read_lock_nesting = 1; 746 } else { 747 return; 748 } 749 __rcu_read_unlock(); 750 rcu_preempt_deferred_qs(current); 751 } 752 753 /* 754 * Dump the blocked-tasks state, but limit the list dump to the 755 * specified number of elements. 756 */ 757 static void 758 dump_blkd_tasks(struct rcu_node *rnp, int ncheck) 759 { 760 int cpu; 761 int i; 762 struct list_head *lhp; 763 bool onl; 764 struct rcu_data *rdp; 765 struct rcu_node *rnp1; 766 767 raw_lockdep_assert_held_rcu_node(rnp); 768 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n", 769 __func__, rnp->grplo, rnp->grphi, rnp->level, 770 (long)rnp->gp_seq, (long)rnp->completedqs); 771 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) 772 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n", 773 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext); 774 pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p\n", 775 __func__, rnp->gp_tasks, rnp->boost_tasks, rnp->exp_tasks); 776 pr_info("%s: ->blkd_tasks", __func__); 777 i = 0; 778 list_for_each(lhp, &rnp->blkd_tasks) { 779 pr_cont(" %p", lhp); 780 if (++i >= ncheck) 781 break; 782 } 783 pr_cont("\n"); 784 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) { 785 rdp = per_cpu_ptr(&rcu_data, cpu); 786 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp)); 787 pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n", 788 cpu, ".o"[onl], 789 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags, 790 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags); 791 } 792 } 793 794 #else /* #ifdef CONFIG_PREEMPT_RCU */ 795 796 /* 797 * Tell them what RCU they are running. 798 */ 799 static void __init rcu_bootup_announce(void) 800 { 801 pr_info("Hierarchical RCU implementation.\n"); 802 rcu_bootup_announce_oddness(); 803 } 804 805 /* 806 * Note a quiescent state for PREEMPT=n. Because we do not need to know 807 * how many quiescent states passed, just if there was at least one since 808 * the start of the grace period, this just sets a flag. The caller must 809 * have disabled preemption. 810 */ 811 static void rcu_qs(void) 812 { 813 RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!"); 814 if (!__this_cpu_read(rcu_data.cpu_no_qs.s)) 815 return; 816 trace_rcu_grace_period(TPS("rcu_sched"), 817 __this_cpu_read(rcu_data.gp_seq), TPS("cpuqs")); 818 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false); 819 if (!__this_cpu_read(rcu_data.cpu_no_qs.b.exp)) 820 return; 821 __this_cpu_write(rcu_data.cpu_no_qs.b.exp, false); 822 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); 823 } 824 825 /* 826 * Register an urgently needed quiescent state. If there is an 827 * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight 828 * dyntick-idle quiescent state visible to other CPUs, which will in 829 * some cases serve for expedited as well as normal grace periods. 830 * Either way, register a lightweight quiescent state. 831 * 832 * The barrier() calls are redundant in the common case when this is 833 * called externally, but just in case this is called from within this 834 * file. 835 * 836 */ 837 void rcu_all_qs(void) 838 { 839 unsigned long flags; 840 841 if (!raw_cpu_read(rcu_data.rcu_urgent_qs)) 842 return; 843 preempt_disable(); 844 /* Load rcu_urgent_qs before other flags. */ 845 if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { 846 preempt_enable(); 847 return; 848 } 849 this_cpu_write(rcu_data.rcu_urgent_qs, false); 850 barrier(); /* Avoid RCU read-side critical sections leaking down. */ 851 if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) { 852 local_irq_save(flags); 853 rcu_momentary_dyntick_idle(); 854 local_irq_restore(flags); 855 } 856 rcu_qs(); 857 barrier(); /* Avoid RCU read-side critical sections leaking up. */ 858 preempt_enable(); 859 } 860 EXPORT_SYMBOL_GPL(rcu_all_qs); 861 862 /* 863 * Note a PREEMPT=n context switch. The caller must have disabled interrupts. 864 */ 865 void rcu_note_context_switch(bool preempt) 866 { 867 barrier(); /* Avoid RCU read-side critical sections leaking down. */ 868 trace_rcu_utilization(TPS("Start context switch")); 869 rcu_qs(); 870 /* Load rcu_urgent_qs before other flags. */ 871 if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) 872 goto out; 873 this_cpu_write(rcu_data.rcu_urgent_qs, false); 874 if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) 875 rcu_momentary_dyntick_idle(); 876 if (!preempt) 877 rcu_tasks_qs(current); 878 out: 879 trace_rcu_utilization(TPS("End context switch")); 880 barrier(); /* Avoid RCU read-side critical sections leaking up. */ 881 } 882 EXPORT_SYMBOL_GPL(rcu_note_context_switch); 883 884 /* 885 * Because preemptible RCU does not exist, there are never any preempted 886 * RCU readers. 887 */ 888 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) 889 { 890 return 0; 891 } 892 893 /* 894 * Because there is no preemptible RCU, there can be no readers blocked. 895 */ 896 static bool rcu_preempt_has_tasks(struct rcu_node *rnp) 897 { 898 return false; 899 } 900 901 /* 902 * Because there is no preemptible RCU, there can be no deferred quiescent 903 * states. 904 */ 905 static bool rcu_preempt_need_deferred_qs(struct task_struct *t) 906 { 907 return false; 908 } 909 static void rcu_preempt_deferred_qs(struct task_struct *t) { } 910 911 /* 912 * Because there is no preemptible RCU, there can be no readers blocked, 913 * so there is no need to check for blocked tasks. So check only for 914 * bogus qsmask values. 915 */ 916 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 917 { 918 WARN_ON_ONCE(rnp->qsmask); 919 } 920 921 /* 922 * Check to see if this CPU is in a non-context-switch quiescent state, 923 * namely user mode and idle loop. 924 */ 925 static void rcu_flavor_sched_clock_irq(int user) 926 { 927 if (user || rcu_is_cpu_rrupt_from_idle()) { 928 929 /* 930 * Get here if this CPU took its interrupt from user 931 * mode or from the idle loop, and if this is not a 932 * nested interrupt. In this case, the CPU is in 933 * a quiescent state, so note it. 934 * 935 * No memory barrier is required here because rcu_qs() 936 * references only CPU-local variables that other CPUs 937 * neither access nor modify, at least not while the 938 * corresponding CPU is online. 939 */ 940 941 rcu_qs(); 942 } 943 } 944 945 /* 946 * Because preemptible RCU does not exist, tasks cannot possibly exit 947 * while in preemptible RCU read-side critical sections. 948 */ 949 void exit_rcu(void) 950 { 951 } 952 953 /* 954 * Dump the guaranteed-empty blocked-tasks state. Trust but verify. 955 */ 956 static void 957 dump_blkd_tasks(struct rcu_node *rnp, int ncheck) 958 { 959 WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks)); 960 } 961 962 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 963 964 /* 965 * If boosting, set rcuc kthreads to realtime priority. 966 */ 967 static void rcu_cpu_kthread_setup(unsigned int cpu) 968 { 969 #ifdef CONFIG_RCU_BOOST 970 struct sched_param sp; 971 972 sp.sched_priority = kthread_prio; 973 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); 974 #endif /* #ifdef CONFIG_RCU_BOOST */ 975 } 976 977 #ifdef CONFIG_RCU_BOOST 978 979 /* 980 * Carry out RCU priority boosting on the task indicated by ->exp_tasks 981 * or ->boost_tasks, advancing the pointer to the next task in the 982 * ->blkd_tasks list. 983 * 984 * Note that irqs must be enabled: boosting the task can block. 985 * Returns 1 if there are more tasks needing to be boosted. 986 */ 987 static int rcu_boost(struct rcu_node *rnp) 988 { 989 unsigned long flags; 990 struct task_struct *t; 991 struct list_head *tb; 992 993 if (READ_ONCE(rnp->exp_tasks) == NULL && 994 READ_ONCE(rnp->boost_tasks) == NULL) 995 return 0; /* Nothing left to boost. */ 996 997 raw_spin_lock_irqsave_rcu_node(rnp, flags); 998 999 /* 1000 * Recheck under the lock: all tasks in need of boosting 1001 * might exit their RCU read-side critical sections on their own. 1002 */ 1003 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) { 1004 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1005 return 0; 1006 } 1007 1008 /* 1009 * Preferentially boost tasks blocking expedited grace periods. 1010 * This cannot starve the normal grace periods because a second 1011 * expedited grace period must boost all blocked tasks, including 1012 * those blocking the pre-existing normal grace period. 1013 */ 1014 if (rnp->exp_tasks != NULL) 1015 tb = rnp->exp_tasks; 1016 else 1017 tb = rnp->boost_tasks; 1018 1019 /* 1020 * We boost task t by manufacturing an rt_mutex that appears to 1021 * be held by task t. We leave a pointer to that rt_mutex where 1022 * task t can find it, and task t will release the mutex when it 1023 * exits its outermost RCU read-side critical section. Then 1024 * simply acquiring this artificial rt_mutex will boost task 1025 * t's priority. (Thanks to tglx for suggesting this approach!) 1026 * 1027 * Note that task t must acquire rnp->lock to remove itself from 1028 * the ->blkd_tasks list, which it will do from exit() if from 1029 * nowhere else. We therefore are guaranteed that task t will 1030 * stay around at least until we drop rnp->lock. Note that 1031 * rnp->lock also resolves races between our priority boosting 1032 * and task t's exiting its outermost RCU read-side critical 1033 * section. 1034 */ 1035 t = container_of(tb, struct task_struct, rcu_node_entry); 1036 rt_mutex_init_proxy_locked(&rnp->boost_mtx, t); 1037 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1038 /* Lock only for side effect: boosts task t's priority. */ 1039 rt_mutex_lock(&rnp->boost_mtx); 1040 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ 1041 1042 return READ_ONCE(rnp->exp_tasks) != NULL || 1043 READ_ONCE(rnp->boost_tasks) != NULL; 1044 } 1045 1046 /* 1047 * Priority-boosting kthread, one per leaf rcu_node. 1048 */ 1049 static int rcu_boost_kthread(void *arg) 1050 { 1051 struct rcu_node *rnp = (struct rcu_node *)arg; 1052 int spincnt = 0; 1053 int more2boost; 1054 1055 trace_rcu_utilization(TPS("Start boost kthread@init")); 1056 for (;;) { 1057 rnp->boost_kthread_status = RCU_KTHREAD_WAITING; 1058 trace_rcu_utilization(TPS("End boost kthread@rcu_wait")); 1059 rcu_wait(rnp->boost_tasks || rnp->exp_tasks); 1060 trace_rcu_utilization(TPS("Start boost kthread@rcu_wait")); 1061 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; 1062 more2boost = rcu_boost(rnp); 1063 if (more2boost) 1064 spincnt++; 1065 else 1066 spincnt = 0; 1067 if (spincnt > 10) { 1068 rnp->boost_kthread_status = RCU_KTHREAD_YIELDING; 1069 trace_rcu_utilization(TPS("End boost kthread@rcu_yield")); 1070 schedule_timeout_interruptible(2); 1071 trace_rcu_utilization(TPS("Start boost kthread@rcu_yield")); 1072 spincnt = 0; 1073 } 1074 } 1075 /* NOTREACHED */ 1076 trace_rcu_utilization(TPS("End boost kthread@notreached")); 1077 return 0; 1078 } 1079 1080 /* 1081 * Check to see if it is time to start boosting RCU readers that are 1082 * blocking the current grace period, and, if so, tell the per-rcu_node 1083 * kthread to start boosting them. If there is an expedited grace 1084 * period in progress, it is always time to boost. 1085 * 1086 * The caller must hold rnp->lock, which this function releases. 1087 * The ->boost_kthread_task is immortal, so we don't need to worry 1088 * about it going away. 1089 */ 1090 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1091 __releases(rnp->lock) 1092 { 1093 raw_lockdep_assert_held_rcu_node(rnp); 1094 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { 1095 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1096 return; 1097 } 1098 if (rnp->exp_tasks != NULL || 1099 (rnp->gp_tasks != NULL && 1100 rnp->boost_tasks == NULL && 1101 rnp->qsmask == 0 && 1102 ULONG_CMP_GE(jiffies, rnp->boost_time))) { 1103 if (rnp->exp_tasks == NULL) 1104 rnp->boost_tasks = rnp->gp_tasks; 1105 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1106 rcu_wake_cond(rnp->boost_kthread_task, 1107 rnp->boost_kthread_status); 1108 } else { 1109 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1110 } 1111 } 1112 1113 /* 1114 * Is the current CPU running the RCU-callbacks kthread? 1115 * Caller must have preemption disabled. 1116 */ 1117 static bool rcu_is_callbacks_kthread(void) 1118 { 1119 return __this_cpu_read(rcu_data.rcu_cpu_kthread_task) == current; 1120 } 1121 1122 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) 1123 1124 /* 1125 * Do priority-boost accounting for the start of a new grace period. 1126 */ 1127 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) 1128 { 1129 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; 1130 } 1131 1132 /* 1133 * Create an RCU-boost kthread for the specified node if one does not 1134 * already exist. We only create this kthread for preemptible RCU. 1135 * Returns zero if all is well, a negated errno otherwise. 1136 */ 1137 static int rcu_spawn_one_boost_kthread(struct rcu_node *rnp) 1138 { 1139 int rnp_index = rnp - rcu_get_root(); 1140 unsigned long flags; 1141 struct sched_param sp; 1142 struct task_struct *t; 1143 1144 if (!IS_ENABLED(CONFIG_PREEMPT_RCU)) 1145 return 0; 1146 1147 if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0) 1148 return 0; 1149 1150 rcu_state.boost = 1; 1151 if (rnp->boost_kthread_task != NULL) 1152 return 0; 1153 t = kthread_create(rcu_boost_kthread, (void *)rnp, 1154 "rcub/%d", rnp_index); 1155 if (IS_ERR(t)) 1156 return PTR_ERR(t); 1157 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1158 rnp->boost_kthread_task = t; 1159 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1160 sp.sched_priority = kthread_prio; 1161 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 1162 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ 1163 return 0; 1164 } 1165 1166 /* 1167 * Set the per-rcu_node kthread's affinity to cover all CPUs that are 1168 * served by the rcu_node in question. The CPU hotplug lock is still 1169 * held, so the value of rnp->qsmaskinit will be stable. 1170 * 1171 * We don't include outgoingcpu in the affinity set, use -1 if there is 1172 * no outgoing CPU. If there are no CPUs left in the affinity set, 1173 * this function allows the kthread to execute on any CPU. 1174 */ 1175 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) 1176 { 1177 struct task_struct *t = rnp->boost_kthread_task; 1178 unsigned long mask = rcu_rnp_online_cpus(rnp); 1179 cpumask_var_t cm; 1180 int cpu; 1181 1182 if (!t) 1183 return; 1184 if (!zalloc_cpumask_var(&cm, GFP_KERNEL)) 1185 return; 1186 for_each_leaf_node_possible_cpu(rnp, cpu) 1187 if ((mask & leaf_node_cpu_bit(rnp, cpu)) && 1188 cpu != outgoingcpu) 1189 cpumask_set_cpu(cpu, cm); 1190 if (cpumask_weight(cm) == 0) 1191 cpumask_setall(cm); 1192 set_cpus_allowed_ptr(t, cm); 1193 free_cpumask_var(cm); 1194 } 1195 1196 /* 1197 * Spawn boost kthreads -- called as soon as the scheduler is running. 1198 */ 1199 static void __init rcu_spawn_boost_kthreads(void) 1200 { 1201 struct rcu_node *rnp; 1202 1203 rcu_for_each_leaf_node(rnp) 1204 (void)rcu_spawn_one_boost_kthread(rnp); 1205 } 1206 1207 static void rcu_prepare_kthreads(int cpu) 1208 { 1209 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 1210 struct rcu_node *rnp = rdp->mynode; 1211 1212 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ 1213 if (rcu_scheduler_fully_active) 1214 (void)rcu_spawn_one_boost_kthread(rnp); 1215 } 1216 1217 #else /* #ifdef CONFIG_RCU_BOOST */ 1218 1219 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1220 __releases(rnp->lock) 1221 { 1222 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1223 } 1224 1225 static bool rcu_is_callbacks_kthread(void) 1226 { 1227 return false; 1228 } 1229 1230 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) 1231 { 1232 } 1233 1234 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) 1235 { 1236 } 1237 1238 static void __init rcu_spawn_boost_kthreads(void) 1239 { 1240 } 1241 1242 static void rcu_prepare_kthreads(int cpu) 1243 { 1244 } 1245 1246 #endif /* #else #ifdef CONFIG_RCU_BOOST */ 1247 1248 #if !defined(CONFIG_RCU_FAST_NO_HZ) 1249 1250 /* 1251 * Check to see if any future RCU-related work will need to be done 1252 * by the current CPU, even if none need be done immediately, returning 1253 * 1 if so. This function is part of the RCU implementation; it is -not- 1254 * an exported member of the RCU API. 1255 * 1256 * Because we not have RCU_FAST_NO_HZ, just check whether or not this 1257 * CPU has RCU callbacks queued. 1258 */ 1259 int rcu_needs_cpu(u64 basemono, u64 *nextevt) 1260 { 1261 *nextevt = KTIME_MAX; 1262 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist); 1263 } 1264 1265 /* 1266 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up 1267 * after it. 1268 */ 1269 static void rcu_cleanup_after_idle(void) 1270 { 1271 } 1272 1273 /* 1274 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n, 1275 * is nothing. 1276 */ 1277 static void rcu_prepare_for_idle(void) 1278 { 1279 } 1280 1281 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 1282 1283 /* 1284 * This code is invoked when a CPU goes idle, at which point we want 1285 * to have the CPU do everything required for RCU so that it can enter 1286 * the energy-efficient dyntick-idle mode. This is handled by a 1287 * state machine implemented by rcu_prepare_for_idle() below. 1288 * 1289 * The following three proprocessor symbols control this state machine: 1290 * 1291 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted 1292 * to sleep in dyntick-idle mode with RCU callbacks pending. This 1293 * is sized to be roughly one RCU grace period. Those energy-efficiency 1294 * benchmarkers who might otherwise be tempted to set this to a large 1295 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your 1296 * system. And if you are -that- concerned about energy efficiency, 1297 * just power the system down and be done with it! 1298 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is 1299 * permitted to sleep in dyntick-idle mode with only lazy RCU 1300 * callbacks pending. Setting this too high can OOM your system. 1301 * 1302 * The values below work well in practice. If future workloads require 1303 * adjustment, they can be converted into kernel config parameters, though 1304 * making the state machine smarter might be a better option. 1305 */ 1306 #define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */ 1307 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ 1308 1309 static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY; 1310 module_param(rcu_idle_gp_delay, int, 0644); 1311 static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY; 1312 module_param(rcu_idle_lazy_gp_delay, int, 0644); 1313 1314 /* 1315 * Try to advance callbacks on the current CPU, but only if it has been 1316 * awhile since the last time we did so. Afterwards, if there are any 1317 * callbacks ready for immediate invocation, return true. 1318 */ 1319 static bool __maybe_unused rcu_try_advance_all_cbs(void) 1320 { 1321 bool cbs_ready = false; 1322 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 1323 struct rcu_node *rnp; 1324 1325 /* Exit early if we advanced recently. */ 1326 if (jiffies == rdp->last_advance_all) 1327 return false; 1328 rdp->last_advance_all = jiffies; 1329 1330 rnp = rdp->mynode; 1331 1332 /* 1333 * Don't bother checking unless a grace period has 1334 * completed since we last checked and there are 1335 * callbacks not yet ready to invoke. 1336 */ 1337 if ((rcu_seq_completed_gp(rdp->gp_seq, 1338 rcu_seq_current(&rnp->gp_seq)) || 1339 unlikely(READ_ONCE(rdp->gpwrap))) && 1340 rcu_segcblist_pend_cbs(&rdp->cblist)) 1341 note_gp_changes(rdp); 1342 1343 if (rcu_segcblist_ready_cbs(&rdp->cblist)) 1344 cbs_ready = true; 1345 return cbs_ready; 1346 } 1347 1348 /* 1349 * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready 1350 * to invoke. If the CPU has callbacks, try to advance them. Tell the 1351 * caller to set the timeout based on whether or not there are non-lazy 1352 * callbacks. 1353 * 1354 * The caller must have disabled interrupts. 1355 */ 1356 int rcu_needs_cpu(u64 basemono, u64 *nextevt) 1357 { 1358 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 1359 unsigned long dj; 1360 1361 lockdep_assert_irqs_disabled(); 1362 1363 /* If no callbacks, RCU doesn't need the CPU. */ 1364 if (rcu_segcblist_empty(&rdp->cblist)) { 1365 *nextevt = KTIME_MAX; 1366 return 0; 1367 } 1368 1369 /* Attempt to advance callbacks. */ 1370 if (rcu_try_advance_all_cbs()) { 1371 /* Some ready to invoke, so initiate later invocation. */ 1372 invoke_rcu_core(); 1373 return 1; 1374 } 1375 rdp->last_accelerate = jiffies; 1376 1377 /* Request timer delay depending on laziness, and round. */ 1378 rdp->all_lazy = !rcu_segcblist_n_nonlazy_cbs(&rdp->cblist); 1379 if (rdp->all_lazy) { 1380 dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies; 1381 } else { 1382 dj = round_up(rcu_idle_gp_delay + jiffies, 1383 rcu_idle_gp_delay) - jiffies; 1384 } 1385 *nextevt = basemono + dj * TICK_NSEC; 1386 return 0; 1387 } 1388 1389 /* 1390 * Prepare a CPU for idle from an RCU perspective. The first major task 1391 * is to sense whether nohz mode has been enabled or disabled via sysfs. 1392 * The second major task is to check to see if a non-lazy callback has 1393 * arrived at a CPU that previously had only lazy callbacks. The third 1394 * major task is to accelerate (that is, assign grace-period numbers to) 1395 * any recently arrived callbacks. 1396 * 1397 * The caller must have disabled interrupts. 1398 */ 1399 static void rcu_prepare_for_idle(void) 1400 { 1401 bool needwake; 1402 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 1403 struct rcu_node *rnp; 1404 int tne; 1405 1406 lockdep_assert_irqs_disabled(); 1407 if (rcu_is_nocb_cpu(smp_processor_id())) 1408 return; 1409 1410 /* Handle nohz enablement switches conservatively. */ 1411 tne = READ_ONCE(tick_nohz_active); 1412 if (tne != rdp->tick_nohz_enabled_snap) { 1413 if (!rcu_segcblist_empty(&rdp->cblist)) 1414 invoke_rcu_core(); /* force nohz to see update. */ 1415 rdp->tick_nohz_enabled_snap = tne; 1416 return; 1417 } 1418 if (!tne) 1419 return; 1420 1421 /* 1422 * If a non-lazy callback arrived at a CPU having only lazy 1423 * callbacks, invoke RCU core for the side-effect of recalculating 1424 * idle duration on re-entry to idle. 1425 */ 1426 if (rdp->all_lazy && rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)) { 1427 rdp->all_lazy = false; 1428 invoke_rcu_core(); 1429 return; 1430 } 1431 1432 /* 1433 * If we have not yet accelerated this jiffy, accelerate all 1434 * callbacks on this CPU. 1435 */ 1436 if (rdp->last_accelerate == jiffies) 1437 return; 1438 rdp->last_accelerate = jiffies; 1439 if (rcu_segcblist_pend_cbs(&rdp->cblist)) { 1440 rnp = rdp->mynode; 1441 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 1442 needwake = rcu_accelerate_cbs(rnp, rdp); 1443 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 1444 if (needwake) 1445 rcu_gp_kthread_wake(); 1446 } 1447 } 1448 1449 /* 1450 * Clean up for exit from idle. Attempt to advance callbacks based on 1451 * any grace periods that elapsed while the CPU was idle, and if any 1452 * callbacks are now ready to invoke, initiate invocation. 1453 */ 1454 static void rcu_cleanup_after_idle(void) 1455 { 1456 lockdep_assert_irqs_disabled(); 1457 if (rcu_is_nocb_cpu(smp_processor_id())) 1458 return; 1459 if (rcu_try_advance_all_cbs()) 1460 invoke_rcu_core(); 1461 } 1462 1463 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 1464 1465 #ifdef CONFIG_RCU_NOCB_CPU 1466 1467 /* 1468 * Offload callback processing from the boot-time-specified set of CPUs 1469 * specified by rcu_nocb_mask. For the CPUs in the set, there are kthreads 1470 * created that pull the callbacks from the corresponding CPU, wait for 1471 * a grace period to elapse, and invoke the callbacks. These kthreads 1472 * are organized into leaders, which manage incoming callbacks, wait for 1473 * grace periods, and awaken followers, and the followers, which only 1474 * invoke callbacks. Each leader is its own follower. The no-CBs CPUs 1475 * do a wake_up() on their kthread when they insert a callback into any 1476 * empty list, unless the rcu_nocb_poll boot parameter has been specified, 1477 * in which case each kthread actively polls its CPU. (Which isn't so great 1478 * for energy efficiency, but which does reduce RCU's overhead on that CPU.) 1479 * 1480 * This is intended to be used in conjunction with Frederic Weisbecker's 1481 * adaptive-idle work, which would seriously reduce OS jitter on CPUs 1482 * running CPU-bound user-mode computations. 1483 * 1484 * Offloading of callbacks can also be used as an energy-efficiency 1485 * measure because CPUs with no RCU callbacks queued are more aggressive 1486 * about entering dyntick-idle mode. 1487 */ 1488 1489 1490 /* 1491 * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. 1492 * The string after the "rcu_nocbs=" is either "all" for all CPUs, or a 1493 * comma-separated list of CPUs and/or CPU ranges. If an invalid list is 1494 * given, a warning is emitted and all CPUs are offloaded. 1495 */ 1496 static int __init rcu_nocb_setup(char *str) 1497 { 1498 alloc_bootmem_cpumask_var(&rcu_nocb_mask); 1499 if (!strcasecmp(str, "all")) 1500 cpumask_setall(rcu_nocb_mask); 1501 else 1502 if (cpulist_parse(str, rcu_nocb_mask)) { 1503 pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n"); 1504 cpumask_setall(rcu_nocb_mask); 1505 } 1506 return 1; 1507 } 1508 __setup("rcu_nocbs=", rcu_nocb_setup); 1509 1510 static int __init parse_rcu_nocb_poll(char *arg) 1511 { 1512 rcu_nocb_poll = true; 1513 return 0; 1514 } 1515 early_param("rcu_nocb_poll", parse_rcu_nocb_poll); 1516 1517 /* 1518 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended 1519 * grace period. 1520 */ 1521 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq) 1522 { 1523 swake_up_all(sq); 1524 } 1525 1526 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp) 1527 { 1528 return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1]; 1529 } 1530 1531 static void rcu_init_one_nocb(struct rcu_node *rnp) 1532 { 1533 init_swait_queue_head(&rnp->nocb_gp_wq[0]); 1534 init_swait_queue_head(&rnp->nocb_gp_wq[1]); 1535 } 1536 1537 /* Is the specified CPU a no-CBs CPU? */ 1538 bool rcu_is_nocb_cpu(int cpu) 1539 { 1540 if (cpumask_available(rcu_nocb_mask)) 1541 return cpumask_test_cpu(cpu, rcu_nocb_mask); 1542 return false; 1543 } 1544 1545 /* 1546 * Kick the leader kthread for this NOCB group. Caller holds ->nocb_lock 1547 * and this function releases it. 1548 */ 1549 static void __wake_nocb_leader(struct rcu_data *rdp, bool force, 1550 unsigned long flags) 1551 __releases(rdp->nocb_lock) 1552 { 1553 struct rcu_data *rdp_leader = rdp->nocb_leader; 1554 1555 lockdep_assert_held(&rdp->nocb_lock); 1556 if (!READ_ONCE(rdp_leader->nocb_kthread)) { 1557 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 1558 return; 1559 } 1560 if (rdp_leader->nocb_leader_sleep || force) { 1561 /* Prior smp_mb__after_atomic() orders against prior enqueue. */ 1562 WRITE_ONCE(rdp_leader->nocb_leader_sleep, false); 1563 del_timer(&rdp->nocb_timer); 1564 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 1565 smp_mb(); /* ->nocb_leader_sleep before swake_up_one(). */ 1566 swake_up_one(&rdp_leader->nocb_wq); 1567 } else { 1568 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 1569 } 1570 } 1571 1572 /* 1573 * Kick the leader kthread for this NOCB group, but caller has not 1574 * acquired locks. 1575 */ 1576 static void wake_nocb_leader(struct rcu_data *rdp, bool force) 1577 { 1578 unsigned long flags; 1579 1580 raw_spin_lock_irqsave(&rdp->nocb_lock, flags); 1581 __wake_nocb_leader(rdp, force, flags); 1582 } 1583 1584 /* 1585 * Arrange to wake the leader kthread for this NOCB group at some 1586 * future time when it is safe to do so. 1587 */ 1588 static void wake_nocb_leader_defer(struct rcu_data *rdp, int waketype, 1589 const char *reason) 1590 { 1591 unsigned long flags; 1592 1593 raw_spin_lock_irqsave(&rdp->nocb_lock, flags); 1594 if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT) 1595 mod_timer(&rdp->nocb_timer, jiffies + 1); 1596 WRITE_ONCE(rdp->nocb_defer_wakeup, waketype); 1597 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason); 1598 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 1599 } 1600 1601 /* Does rcu_barrier need to queue an RCU callback on the specified CPU? */ 1602 static bool rcu_nocb_cpu_needs_barrier(int cpu) 1603 { 1604 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 1605 unsigned long ret; 1606 #ifdef CONFIG_PROVE_RCU 1607 struct rcu_head *rhp; 1608 #endif /* #ifdef CONFIG_PROVE_RCU */ 1609 1610 /* 1611 * Check count of all no-CBs callbacks awaiting invocation. 1612 * There needs to be a barrier before this function is called, 1613 * but associated with a prior determination that no more 1614 * callbacks would be posted. In the worst case, the first 1615 * barrier in rcu_barrier() suffices (but the caller cannot 1616 * necessarily rely on this, not a substitute for the caller 1617 * getting the concurrency design right!). There must also be a 1618 * barrier between the following load and posting of a callback 1619 * (if a callback is in fact needed). This is associated with an 1620 * atomic_inc() in the caller. 1621 */ 1622 ret = rcu_get_n_cbs_nocb_cpu(rdp); 1623 1624 #ifdef CONFIG_PROVE_RCU 1625 rhp = READ_ONCE(rdp->nocb_head); 1626 if (!rhp) 1627 rhp = READ_ONCE(rdp->nocb_gp_head); 1628 if (!rhp) 1629 rhp = READ_ONCE(rdp->nocb_follower_head); 1630 1631 /* Having no rcuo kthread but CBs after scheduler starts is bad! */ 1632 if (!READ_ONCE(rdp->nocb_kthread) && rhp && 1633 rcu_scheduler_fully_active) { 1634 /* RCU callback enqueued before CPU first came online??? */ 1635 pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n", 1636 cpu, rhp->func); 1637 WARN_ON_ONCE(1); 1638 } 1639 #endif /* #ifdef CONFIG_PROVE_RCU */ 1640 1641 return !!ret; 1642 } 1643 1644 /* 1645 * Enqueue the specified string of rcu_head structures onto the specified 1646 * CPU's no-CBs lists. The CPU is specified by rdp, the head of the 1647 * string by rhp, and the tail of the string by rhtp. The non-lazy/lazy 1648 * counts are supplied by rhcount and rhcount_lazy. 1649 * 1650 * If warranted, also wake up the kthread servicing this CPUs queues. 1651 */ 1652 static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, 1653 struct rcu_head *rhp, 1654 struct rcu_head **rhtp, 1655 int rhcount, int rhcount_lazy, 1656 unsigned long flags) 1657 { 1658 int len; 1659 struct rcu_head **old_rhpp; 1660 struct task_struct *t; 1661 1662 /* Enqueue the callback on the nocb list and update counts. */ 1663 atomic_long_add(rhcount, &rdp->nocb_q_count); 1664 /* rcu_barrier() relies on ->nocb_q_count add before xchg. */ 1665 old_rhpp = xchg(&rdp->nocb_tail, rhtp); 1666 WRITE_ONCE(*old_rhpp, rhp); 1667 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy); 1668 smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */ 1669 1670 /* If we are not being polled and there is a kthread, awaken it ... */ 1671 t = READ_ONCE(rdp->nocb_kthread); 1672 if (rcu_nocb_poll || !t) { 1673 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, 1674 TPS("WakeNotPoll")); 1675 return; 1676 } 1677 len = rcu_get_n_cbs_nocb_cpu(rdp); 1678 if (old_rhpp == &rdp->nocb_head) { 1679 if (!irqs_disabled_flags(flags)) { 1680 /* ... if queue was empty ... */ 1681 wake_nocb_leader(rdp, false); 1682 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, 1683 TPS("WakeEmpty")); 1684 } else { 1685 wake_nocb_leader_defer(rdp, RCU_NOCB_WAKE, 1686 TPS("WakeEmptyIsDeferred")); 1687 } 1688 rdp->qlen_last_fqs_check = 0; 1689 } else if (len > rdp->qlen_last_fqs_check + qhimark) { 1690 /* ... or if many callbacks queued. */ 1691 if (!irqs_disabled_flags(flags)) { 1692 wake_nocb_leader(rdp, true); 1693 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, 1694 TPS("WakeOvf")); 1695 } else { 1696 wake_nocb_leader_defer(rdp, RCU_NOCB_WAKE_FORCE, 1697 TPS("WakeOvfIsDeferred")); 1698 } 1699 rdp->qlen_last_fqs_check = LONG_MAX / 2; 1700 } else { 1701 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot")); 1702 } 1703 return; 1704 } 1705 1706 /* 1707 * This is a helper for __call_rcu(), which invokes this when the normal 1708 * callback queue is inoperable. If this is not a no-CBs CPU, this 1709 * function returns failure back to __call_rcu(), which can complain 1710 * appropriately. 1711 * 1712 * Otherwise, this function queues the callback where the corresponding 1713 * "rcuo" kthread can find it. 1714 */ 1715 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, 1716 bool lazy, unsigned long flags) 1717 { 1718 1719 if (!rcu_is_nocb_cpu(rdp->cpu)) 1720 return false; 1721 __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags); 1722 if (__is_kfree_rcu_offset((unsigned long)rhp->func)) 1723 trace_rcu_kfree_callback(rcu_state.name, rhp, 1724 (unsigned long)rhp->func, 1725 -atomic_long_read(&rdp->nocb_q_count_lazy), 1726 -rcu_get_n_cbs_nocb_cpu(rdp)); 1727 else 1728 trace_rcu_callback(rcu_state.name, rhp, 1729 -atomic_long_read(&rdp->nocb_q_count_lazy), 1730 -rcu_get_n_cbs_nocb_cpu(rdp)); 1731 1732 /* 1733 * If called from an extended quiescent state with interrupts 1734 * disabled, invoke the RCU core in order to allow the idle-entry 1735 * deferred-wakeup check to function. 1736 */ 1737 if (irqs_disabled_flags(flags) && 1738 !rcu_is_watching() && 1739 cpu_online(smp_processor_id())) 1740 invoke_rcu_core(); 1741 1742 return true; 1743 } 1744 1745 /* 1746 * Adopt orphaned callbacks on a no-CBs CPU, or return 0 if this is 1747 * not a no-CBs CPU. 1748 */ 1749 static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp, 1750 struct rcu_data *rdp, 1751 unsigned long flags) 1752 { 1753 lockdep_assert_irqs_disabled(); 1754 if (!rcu_is_nocb_cpu(smp_processor_id())) 1755 return false; /* Not NOCBs CPU, caller must migrate CBs. */ 1756 __call_rcu_nocb_enqueue(my_rdp, rcu_segcblist_head(&rdp->cblist), 1757 rcu_segcblist_tail(&rdp->cblist), 1758 rcu_segcblist_n_cbs(&rdp->cblist), 1759 rcu_segcblist_n_lazy_cbs(&rdp->cblist), flags); 1760 rcu_segcblist_init(&rdp->cblist); 1761 rcu_segcblist_disable(&rdp->cblist); 1762 return true; 1763 } 1764 1765 /* 1766 * If necessary, kick off a new grace period, and either way wait 1767 * for a subsequent grace period to complete. 1768 */ 1769 static void rcu_nocb_wait_gp(struct rcu_data *rdp) 1770 { 1771 unsigned long c; 1772 bool d; 1773 unsigned long flags; 1774 bool needwake; 1775 struct rcu_node *rnp = rdp->mynode; 1776 1777 local_irq_save(flags); 1778 c = rcu_seq_snap(&rcu_state.gp_seq); 1779 if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { 1780 local_irq_restore(flags); 1781 } else { 1782 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 1783 needwake = rcu_start_this_gp(rnp, rdp, c); 1784 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1785 if (needwake) 1786 rcu_gp_kthread_wake(); 1787 } 1788 1789 /* 1790 * Wait for the grace period. Do so interruptibly to avoid messing 1791 * up the load average. 1792 */ 1793 trace_rcu_this_gp(rnp, rdp, c, TPS("StartWait")); 1794 for (;;) { 1795 swait_event_interruptible_exclusive( 1796 rnp->nocb_gp_wq[rcu_seq_ctr(c) & 0x1], 1797 (d = rcu_seq_done(&rnp->gp_seq, c))); 1798 if (likely(d)) 1799 break; 1800 WARN_ON(signal_pending(current)); 1801 trace_rcu_this_gp(rnp, rdp, c, TPS("ResumeWait")); 1802 } 1803 trace_rcu_this_gp(rnp, rdp, c, TPS("EndWait")); 1804 smp_mb(); /* Ensure that CB invocation happens after GP end. */ 1805 } 1806 1807 /* 1808 * Leaders come here to wait for additional callbacks to show up. 1809 * This function does not return until callbacks appear. 1810 */ 1811 static void nocb_leader_wait(struct rcu_data *my_rdp) 1812 { 1813 bool firsttime = true; 1814 unsigned long flags; 1815 bool gotcbs; 1816 struct rcu_data *rdp; 1817 struct rcu_head **tail; 1818 1819 wait_again: 1820 1821 /* Wait for callbacks to appear. */ 1822 if (!rcu_nocb_poll) { 1823 trace_rcu_nocb_wake(rcu_state.name, my_rdp->cpu, TPS("Sleep")); 1824 swait_event_interruptible_exclusive(my_rdp->nocb_wq, 1825 !READ_ONCE(my_rdp->nocb_leader_sleep)); 1826 raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags); 1827 my_rdp->nocb_leader_sleep = true; 1828 WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); 1829 del_timer(&my_rdp->nocb_timer); 1830 raw_spin_unlock_irqrestore(&my_rdp->nocb_lock, flags); 1831 } else if (firsttime) { 1832 firsttime = false; /* Don't drown trace log with "Poll"! */ 1833 trace_rcu_nocb_wake(rcu_state.name, my_rdp->cpu, TPS("Poll")); 1834 } 1835 1836 /* 1837 * Each pass through the following loop checks a follower for CBs. 1838 * We are our own first follower. Any CBs found are moved to 1839 * nocb_gp_head, where they await a grace period. 1840 */ 1841 gotcbs = false; 1842 smp_mb(); /* wakeup and _sleep before ->nocb_head reads. */ 1843 for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { 1844 rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head); 1845 if (!rdp->nocb_gp_head) 1846 continue; /* No CBs here, try next follower. */ 1847 1848 /* Move callbacks to wait-for-GP list, which is empty. */ 1849 WRITE_ONCE(rdp->nocb_head, NULL); 1850 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head); 1851 gotcbs = true; 1852 } 1853 1854 /* No callbacks? Sleep a bit if polling, and go retry. */ 1855 if (unlikely(!gotcbs)) { 1856 WARN_ON(signal_pending(current)); 1857 if (rcu_nocb_poll) { 1858 schedule_timeout_interruptible(1); 1859 } else { 1860 trace_rcu_nocb_wake(rcu_state.name, my_rdp->cpu, 1861 TPS("WokeEmpty")); 1862 } 1863 goto wait_again; 1864 } 1865 1866 /* Wait for one grace period. */ 1867 rcu_nocb_wait_gp(my_rdp); 1868 1869 /* Each pass through the following loop wakes a follower, if needed. */ 1870 for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { 1871 if (!rcu_nocb_poll && 1872 READ_ONCE(rdp->nocb_head) && 1873 READ_ONCE(my_rdp->nocb_leader_sleep)) { 1874 raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags); 1875 my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/ 1876 raw_spin_unlock_irqrestore(&my_rdp->nocb_lock, flags); 1877 } 1878 if (!rdp->nocb_gp_head) 1879 continue; /* No CBs, so no need to wake follower. */ 1880 1881 /* Append callbacks to follower's "done" list. */ 1882 raw_spin_lock_irqsave(&rdp->nocb_lock, flags); 1883 tail = rdp->nocb_follower_tail; 1884 rdp->nocb_follower_tail = rdp->nocb_gp_tail; 1885 *tail = rdp->nocb_gp_head; 1886 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 1887 if (rdp != my_rdp && tail == &rdp->nocb_follower_head) { 1888 /* List was empty, so wake up the follower. */ 1889 swake_up_one(&rdp->nocb_wq); 1890 } 1891 } 1892 1893 /* If we (the leader) don't have CBs, go wait some more. */ 1894 if (!my_rdp->nocb_follower_head) 1895 goto wait_again; 1896 } 1897 1898 /* 1899 * Followers come here to wait for additional callbacks to show up. 1900 * This function does not return until callbacks appear. 1901 */ 1902 static void nocb_follower_wait(struct rcu_data *rdp) 1903 { 1904 for (;;) { 1905 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FollowerSleep")); 1906 swait_event_interruptible_exclusive(rdp->nocb_wq, 1907 READ_ONCE(rdp->nocb_follower_head)); 1908 if (smp_load_acquire(&rdp->nocb_follower_head)) { 1909 /* ^^^ Ensure CB invocation follows _head test. */ 1910 return; 1911 } 1912 WARN_ON(signal_pending(current)); 1913 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty")); 1914 } 1915 } 1916 1917 /* 1918 * Per-rcu_data kthread, but only for no-CBs CPUs. Each kthread invokes 1919 * callbacks queued by the corresponding no-CBs CPU, however, there is 1920 * an optional leader-follower relationship so that the grace-period 1921 * kthreads don't have to do quite so many wakeups. 1922 */ 1923 static int rcu_nocb_kthread(void *arg) 1924 { 1925 int c, cl; 1926 unsigned long flags; 1927 struct rcu_head *list; 1928 struct rcu_head *next; 1929 struct rcu_head **tail; 1930 struct rcu_data *rdp = arg; 1931 1932 /* Each pass through this loop invokes one batch of callbacks */ 1933 for (;;) { 1934 /* Wait for callbacks. */ 1935 if (rdp->nocb_leader == rdp) 1936 nocb_leader_wait(rdp); 1937 else 1938 nocb_follower_wait(rdp); 1939 1940 /* Pull the ready-to-invoke callbacks onto local list. */ 1941 raw_spin_lock_irqsave(&rdp->nocb_lock, flags); 1942 list = rdp->nocb_follower_head; 1943 rdp->nocb_follower_head = NULL; 1944 tail = rdp->nocb_follower_tail; 1945 rdp->nocb_follower_tail = &rdp->nocb_follower_head; 1946 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 1947 if (WARN_ON_ONCE(!list)) 1948 continue; 1949 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeNonEmpty")); 1950 1951 /* Each pass through the following loop invokes a callback. */ 1952 trace_rcu_batch_start(rcu_state.name, 1953 atomic_long_read(&rdp->nocb_q_count_lazy), 1954 rcu_get_n_cbs_nocb_cpu(rdp), -1); 1955 c = cl = 0; 1956 while (list) { 1957 next = list->next; 1958 /* Wait for enqueuing to complete, if needed. */ 1959 while (next == NULL && &list->next != tail) { 1960 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, 1961 TPS("WaitQueue")); 1962 schedule_timeout_interruptible(1); 1963 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, 1964 TPS("WokeQueue")); 1965 next = list->next; 1966 } 1967 debug_rcu_head_unqueue(list); 1968 local_bh_disable(); 1969 if (__rcu_reclaim(rcu_state.name, list)) 1970 cl++; 1971 c++; 1972 local_bh_enable(); 1973 cond_resched_tasks_rcu_qs(); 1974 list = next; 1975 } 1976 trace_rcu_batch_end(rcu_state.name, c, !!list, 0, 0, 1); 1977 smp_mb__before_atomic(); /* _add after CB invocation. */ 1978 atomic_long_add(-c, &rdp->nocb_q_count); 1979 atomic_long_add(-cl, &rdp->nocb_q_count_lazy); 1980 } 1981 return 0; 1982 } 1983 1984 /* Is a deferred wakeup of rcu_nocb_kthread() required? */ 1985 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) 1986 { 1987 return READ_ONCE(rdp->nocb_defer_wakeup); 1988 } 1989 1990 /* Do a deferred wakeup of rcu_nocb_kthread(). */ 1991 static void do_nocb_deferred_wakeup_common(struct rcu_data *rdp) 1992 { 1993 unsigned long flags; 1994 int ndw; 1995 1996 raw_spin_lock_irqsave(&rdp->nocb_lock, flags); 1997 if (!rcu_nocb_need_deferred_wakeup(rdp)) { 1998 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 1999 return; 2000 } 2001 ndw = READ_ONCE(rdp->nocb_defer_wakeup); 2002 WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); 2003 __wake_nocb_leader(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags); 2004 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake")); 2005 } 2006 2007 /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */ 2008 static void do_nocb_deferred_wakeup_timer(struct timer_list *t) 2009 { 2010 struct rcu_data *rdp = from_timer(rdp, t, nocb_timer); 2011 2012 do_nocb_deferred_wakeup_common(rdp); 2013 } 2014 2015 /* 2016 * Do a deferred wakeup of rcu_nocb_kthread() from fastpath. 2017 * This means we do an inexact common-case check. Note that if 2018 * we miss, ->nocb_timer will eventually clean things up. 2019 */ 2020 static void do_nocb_deferred_wakeup(struct rcu_data *rdp) 2021 { 2022 if (rcu_nocb_need_deferred_wakeup(rdp)) 2023 do_nocb_deferred_wakeup_common(rdp); 2024 } 2025 2026 void __init rcu_init_nohz(void) 2027 { 2028 int cpu; 2029 bool need_rcu_nocb_mask = false; 2030 2031 #if defined(CONFIG_NO_HZ_FULL) 2032 if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask)) 2033 need_rcu_nocb_mask = true; 2034 #endif /* #if defined(CONFIG_NO_HZ_FULL) */ 2035 2036 if (!cpumask_available(rcu_nocb_mask) && need_rcu_nocb_mask) { 2037 if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) { 2038 pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n"); 2039 return; 2040 } 2041 } 2042 if (!cpumask_available(rcu_nocb_mask)) 2043 return; 2044 2045 #if defined(CONFIG_NO_HZ_FULL) 2046 if (tick_nohz_full_running) 2047 cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask); 2048 #endif /* #if defined(CONFIG_NO_HZ_FULL) */ 2049 2050 if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) { 2051 pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n"); 2052 cpumask_and(rcu_nocb_mask, cpu_possible_mask, 2053 rcu_nocb_mask); 2054 } 2055 if (cpumask_empty(rcu_nocb_mask)) 2056 pr_info("\tOffload RCU callbacks from CPUs: (none).\n"); 2057 else 2058 pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n", 2059 cpumask_pr_args(rcu_nocb_mask)); 2060 if (rcu_nocb_poll) 2061 pr_info("\tPoll for callbacks from no-CBs CPUs.\n"); 2062 2063 for_each_cpu(cpu, rcu_nocb_mask) 2064 init_nocb_callback_list(per_cpu_ptr(&rcu_data, cpu)); 2065 rcu_organize_nocb_kthreads(); 2066 } 2067 2068 /* Initialize per-rcu_data variables for no-CBs CPUs. */ 2069 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) 2070 { 2071 rdp->nocb_tail = &rdp->nocb_head; 2072 init_swait_queue_head(&rdp->nocb_wq); 2073 rdp->nocb_follower_tail = &rdp->nocb_follower_head; 2074 raw_spin_lock_init(&rdp->nocb_lock); 2075 timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0); 2076 } 2077 2078 /* 2079 * If the specified CPU is a no-CBs CPU that does not already have its 2080 * rcuo kthread, spawn it. If the CPUs are brought online out of order, 2081 * this can require re-organizing the leader-follower relationships. 2082 */ 2083 static void rcu_spawn_one_nocb_kthread(int cpu) 2084 { 2085 struct rcu_data *rdp; 2086 struct rcu_data *rdp_last; 2087 struct rcu_data *rdp_old_leader; 2088 struct rcu_data *rdp_spawn = per_cpu_ptr(&rcu_data, cpu); 2089 struct task_struct *t; 2090 2091 /* 2092 * If this isn't a no-CBs CPU or if it already has an rcuo kthread, 2093 * then nothing to do. 2094 */ 2095 if (!rcu_is_nocb_cpu(cpu) || rdp_spawn->nocb_kthread) 2096 return; 2097 2098 /* If we didn't spawn the leader first, reorganize! */ 2099 rdp_old_leader = rdp_spawn->nocb_leader; 2100 if (rdp_old_leader != rdp_spawn && !rdp_old_leader->nocb_kthread) { 2101 rdp_last = NULL; 2102 rdp = rdp_old_leader; 2103 do { 2104 rdp->nocb_leader = rdp_spawn; 2105 if (rdp_last && rdp != rdp_spawn) 2106 rdp_last->nocb_next_follower = rdp; 2107 if (rdp == rdp_spawn) { 2108 rdp = rdp->nocb_next_follower; 2109 } else { 2110 rdp_last = rdp; 2111 rdp = rdp->nocb_next_follower; 2112 rdp_last->nocb_next_follower = NULL; 2113 } 2114 } while (rdp); 2115 rdp_spawn->nocb_next_follower = rdp_old_leader; 2116 } 2117 2118 /* Spawn the kthread for this CPU. */ 2119 t = kthread_run(rcu_nocb_kthread, rdp_spawn, 2120 "rcuo%c/%d", rcu_state.abbr, cpu); 2121 if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo kthread, OOM is now expected behavior\n", __func__)) 2122 return; 2123 WRITE_ONCE(rdp_spawn->nocb_kthread, t); 2124 } 2125 2126 /* 2127 * If the specified CPU is a no-CBs CPU that does not already have its 2128 * rcuo kthread, spawn it. 2129 */ 2130 static void rcu_spawn_cpu_nocb_kthread(int cpu) 2131 { 2132 if (rcu_scheduler_fully_active) 2133 rcu_spawn_one_nocb_kthread(cpu); 2134 } 2135 2136 /* 2137 * Once the scheduler is running, spawn rcuo kthreads for all online 2138 * no-CBs CPUs. This assumes that the early_initcall()s happen before 2139 * non-boot CPUs come online -- if this changes, we will need to add 2140 * some mutual exclusion. 2141 */ 2142 static void __init rcu_spawn_nocb_kthreads(void) 2143 { 2144 int cpu; 2145 2146 for_each_online_cpu(cpu) 2147 rcu_spawn_cpu_nocb_kthread(cpu); 2148 } 2149 2150 /* How many follower CPU IDs per leader? Default of -1 for sqrt(nr_cpu_ids). */ 2151 static int rcu_nocb_leader_stride = -1; 2152 module_param(rcu_nocb_leader_stride, int, 0444); 2153 2154 /* 2155 * Initialize leader-follower relationships for all no-CBs CPU. 2156 */ 2157 static void __init rcu_organize_nocb_kthreads(void) 2158 { 2159 int cpu; 2160 int ls = rcu_nocb_leader_stride; 2161 int nl = 0; /* Next leader. */ 2162 struct rcu_data *rdp; 2163 struct rcu_data *rdp_leader = NULL; /* Suppress misguided gcc warn. */ 2164 struct rcu_data *rdp_prev = NULL; 2165 2166 if (!cpumask_available(rcu_nocb_mask)) 2167 return; 2168 if (ls == -1) { 2169 ls = int_sqrt(nr_cpu_ids); 2170 rcu_nocb_leader_stride = ls; 2171 } 2172 2173 /* 2174 * Each pass through this loop sets up one rcu_data structure. 2175 * Should the corresponding CPU come online in the future, then 2176 * we will spawn the needed set of rcu_nocb_kthread() kthreads. 2177 */ 2178 for_each_cpu(cpu, rcu_nocb_mask) { 2179 rdp = per_cpu_ptr(&rcu_data, cpu); 2180 if (rdp->cpu >= nl) { 2181 /* New leader, set up for followers & next leader. */ 2182 nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls; 2183 rdp->nocb_leader = rdp; 2184 rdp_leader = rdp; 2185 } else { 2186 /* Another follower, link to previous leader. */ 2187 rdp->nocb_leader = rdp_leader; 2188 rdp_prev->nocb_next_follower = rdp; 2189 } 2190 rdp_prev = rdp; 2191 } 2192 } 2193 2194 /* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */ 2195 static bool init_nocb_callback_list(struct rcu_data *rdp) 2196 { 2197 if (!rcu_is_nocb_cpu(rdp->cpu)) 2198 return false; 2199 2200 /* If there are early-boot callbacks, move them to nocb lists. */ 2201 if (!rcu_segcblist_empty(&rdp->cblist)) { 2202 rdp->nocb_head = rcu_segcblist_head(&rdp->cblist); 2203 rdp->nocb_tail = rcu_segcblist_tail(&rdp->cblist); 2204 atomic_long_set(&rdp->nocb_q_count, 2205 rcu_segcblist_n_cbs(&rdp->cblist)); 2206 atomic_long_set(&rdp->nocb_q_count_lazy, 2207 rcu_segcblist_n_lazy_cbs(&rdp->cblist)); 2208 rcu_segcblist_init(&rdp->cblist); 2209 } 2210 rcu_segcblist_disable(&rdp->cblist); 2211 return true; 2212 } 2213 2214 /* 2215 * Bind the current task to the offloaded CPUs. If there are no offloaded 2216 * CPUs, leave the task unbound. Splat if the bind attempt fails. 2217 */ 2218 void rcu_bind_current_to_nocb(void) 2219 { 2220 if (cpumask_available(rcu_nocb_mask) && cpumask_weight(rcu_nocb_mask)) 2221 WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask)); 2222 } 2223 EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb); 2224 2225 /* 2226 * Return the number of RCU callbacks still queued from the specified 2227 * CPU, which must be a nocbs CPU. 2228 */ 2229 static unsigned long rcu_get_n_cbs_nocb_cpu(struct rcu_data *rdp) 2230 { 2231 return atomic_long_read(&rdp->nocb_q_count); 2232 } 2233 2234 #else /* #ifdef CONFIG_RCU_NOCB_CPU */ 2235 2236 static bool rcu_nocb_cpu_needs_barrier(int cpu) 2237 { 2238 WARN_ON_ONCE(1); /* Should be dead code. */ 2239 return false; 2240 } 2241 2242 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq) 2243 { 2244 } 2245 2246 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp) 2247 { 2248 return NULL; 2249 } 2250 2251 static void rcu_init_one_nocb(struct rcu_node *rnp) 2252 { 2253 } 2254 2255 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, 2256 bool lazy, unsigned long flags) 2257 { 2258 return false; 2259 } 2260 2261 static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp, 2262 struct rcu_data *rdp, 2263 unsigned long flags) 2264 { 2265 return false; 2266 } 2267 2268 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) 2269 { 2270 } 2271 2272 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) 2273 { 2274 return false; 2275 } 2276 2277 static void do_nocb_deferred_wakeup(struct rcu_data *rdp) 2278 { 2279 } 2280 2281 static void rcu_spawn_cpu_nocb_kthread(int cpu) 2282 { 2283 } 2284 2285 static void __init rcu_spawn_nocb_kthreads(void) 2286 { 2287 } 2288 2289 static bool init_nocb_callback_list(struct rcu_data *rdp) 2290 { 2291 return false; 2292 } 2293 2294 static unsigned long rcu_get_n_cbs_nocb_cpu(struct rcu_data *rdp) 2295 { 2296 return 0; 2297 } 2298 2299 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ 2300 2301 /* 2302 * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the 2303 * grace-period kthread will do force_quiescent_state() processing? 2304 * The idea is to avoid waking up RCU core processing on such a 2305 * CPU unless the grace period has extended for too long. 2306 * 2307 * This code relies on the fact that all NO_HZ_FULL CPUs are also 2308 * CONFIG_RCU_NOCB_CPU CPUs. 2309 */ 2310 static bool rcu_nohz_full_cpu(void) 2311 { 2312 #ifdef CONFIG_NO_HZ_FULL 2313 if (tick_nohz_full_cpu(smp_processor_id()) && 2314 (!rcu_gp_in_progress() || 2315 ULONG_CMP_LT(jiffies, READ_ONCE(rcu_state.gp_start) + HZ))) 2316 return true; 2317 #endif /* #ifdef CONFIG_NO_HZ_FULL */ 2318 return false; 2319 } 2320 2321 /* 2322 * Bind the RCU grace-period kthreads to the housekeeping CPU. 2323 */ 2324 static void rcu_bind_gp_kthread(void) 2325 { 2326 if (!tick_nohz_full_enabled()) 2327 return; 2328 housekeeping_affine(current, HK_FLAG_RCU); 2329 } 2330 2331 /* Record the current task on dyntick-idle entry. */ 2332 static void rcu_dynticks_task_enter(void) 2333 { 2334 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) 2335 WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id()); 2336 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */ 2337 } 2338 2339 /* Record no current task on dyntick-idle exit. */ 2340 static void rcu_dynticks_task_exit(void) 2341 { 2342 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) 2343 WRITE_ONCE(current->rcu_tasks_idle_cpu, -1); 2344 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */ 2345 } 2346