1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 4 * Internal non-public definitions that provide either classic 5 * or preemptible semantics. 6 * 7 * Copyright Red Hat, 2009 8 * Copyright IBM Corporation, 2009 9 * 10 * Author: Ingo Molnar <mingo@elte.hu> 11 * Paul E. McKenney <paulmck@linux.ibm.com> 12 */ 13 14 #include "../locking/rtmutex_common.h" 15 16 #ifdef CONFIG_RCU_NOCB_CPU 17 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ 18 static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */ 19 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 20 21 /* 22 * Check the RCU kernel configuration parameters and print informative 23 * messages about anything out of the ordinary. 24 */ 25 static void __init rcu_bootup_announce_oddness(void) 26 { 27 if (IS_ENABLED(CONFIG_RCU_TRACE)) 28 pr_info("\tRCU event tracing is enabled.\n"); 29 if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) || 30 (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32)) 31 pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.\n", 32 RCU_FANOUT); 33 if (rcu_fanout_exact) 34 pr_info("\tHierarchical RCU autobalancing is disabled.\n"); 35 if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ)) 36 pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n"); 37 if (IS_ENABLED(CONFIG_PROVE_RCU)) 38 pr_info("\tRCU lockdep checking is enabled.\n"); 39 if (RCU_NUM_LVLS >= 4) 40 pr_info("\tFour(or more)-level hierarchy is enabled.\n"); 41 if (RCU_FANOUT_LEAF != 16) 42 pr_info("\tBuild-time adjustment of leaf fanout to %d.\n", 43 RCU_FANOUT_LEAF); 44 if (rcu_fanout_leaf != RCU_FANOUT_LEAF) 45 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", 46 rcu_fanout_leaf); 47 if (nr_cpu_ids != NR_CPUS) 48 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids); 49 #ifdef CONFIG_RCU_BOOST 50 pr_info("\tRCU priority boosting: priority %d delay %d ms.\n", 51 kthread_prio, CONFIG_RCU_BOOST_DELAY); 52 #endif 53 if (blimit != DEFAULT_RCU_BLIMIT) 54 pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit); 55 if (qhimark != DEFAULT_RCU_QHIMARK) 56 pr_info("\tBoot-time adjustment of callback high-water mark to %ld.\n", qhimark); 57 if (qlowmark != DEFAULT_RCU_QLOMARK) 58 pr_info("\tBoot-time adjustment of callback low-water mark to %ld.\n", qlowmark); 59 if (jiffies_till_first_fqs != ULONG_MAX) 60 pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.\n", jiffies_till_first_fqs); 61 if (jiffies_till_next_fqs != ULONG_MAX) 62 pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.\n", jiffies_till_next_fqs); 63 if (jiffies_till_sched_qs != ULONG_MAX) 64 pr_info("\tBoot-time adjustment of scheduler-enlistment delay to %ld jiffies.\n", jiffies_till_sched_qs); 65 if (rcu_kick_kthreads) 66 pr_info("\tKick kthreads if too-long grace period.\n"); 67 if (IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD)) 68 pr_info("\tRCU callback double-/use-after-free debug enabled.\n"); 69 if (gp_preinit_delay) 70 pr_info("\tRCU debug GP pre-init slowdown %d jiffies.\n", gp_preinit_delay); 71 if (gp_init_delay) 72 pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_init_delay); 73 if (gp_cleanup_delay) 74 pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_cleanup_delay); 75 if (!use_softirq) 76 pr_info("\tRCU_SOFTIRQ processing moved to rcuc kthreads.\n"); 77 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG)) 78 pr_info("\tRCU debug extended QS entry/exit.\n"); 79 rcupdate_announce_bootup_oddness(); 80 } 81 82 #ifdef CONFIG_PREEMPT_RCU 83 84 static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake); 85 static void rcu_read_unlock_special(struct task_struct *t); 86 87 /* 88 * Tell them what RCU they are running. 89 */ 90 static void __init rcu_bootup_announce(void) 91 { 92 pr_info("Preemptible hierarchical RCU implementation.\n"); 93 rcu_bootup_announce_oddness(); 94 } 95 96 /* Flags for rcu_preempt_ctxt_queue() decision table. */ 97 #define RCU_GP_TASKS 0x8 98 #define RCU_EXP_TASKS 0x4 99 #define RCU_GP_BLKD 0x2 100 #define RCU_EXP_BLKD 0x1 101 102 /* 103 * Queues a task preempted within an RCU-preempt read-side critical 104 * section into the appropriate location within the ->blkd_tasks list, 105 * depending on the states of any ongoing normal and expedited grace 106 * periods. The ->gp_tasks pointer indicates which element the normal 107 * grace period is waiting on (NULL if none), and the ->exp_tasks pointer 108 * indicates which element the expedited grace period is waiting on (again, 109 * NULL if none). If a grace period is waiting on a given element in the 110 * ->blkd_tasks list, it also waits on all subsequent elements. Thus, 111 * adding a task to the tail of the list blocks any grace period that is 112 * already waiting on one of the elements. In contrast, adding a task 113 * to the head of the list won't block any grace period that is already 114 * waiting on one of the elements. 115 * 116 * This queuing is imprecise, and can sometimes make an ongoing grace 117 * period wait for a task that is not strictly speaking blocking it. 118 * Given the choice, we needlessly block a normal grace period rather than 119 * blocking an expedited grace period. 120 * 121 * Note that an endless sequence of expedited grace periods still cannot 122 * indefinitely postpone a normal grace period. Eventually, all of the 123 * fixed number of preempted tasks blocking the normal grace period that are 124 * not also blocking the expedited grace period will resume and complete 125 * their RCU read-side critical sections. At that point, the ->gp_tasks 126 * pointer will equal the ->exp_tasks pointer, at which point the end of 127 * the corresponding expedited grace period will also be the end of the 128 * normal grace period. 129 */ 130 static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) 131 __releases(rnp->lock) /* But leaves rrupts disabled. */ 132 { 133 int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) + 134 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) + 135 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) + 136 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0); 137 struct task_struct *t = current; 138 139 raw_lockdep_assert_held_rcu_node(rnp); 140 WARN_ON_ONCE(rdp->mynode != rnp); 141 WARN_ON_ONCE(!rcu_is_leaf_node(rnp)); 142 /* RCU better not be waiting on newly onlined CPUs! */ 143 WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask & 144 rdp->grpmask); 145 146 /* 147 * Decide where to queue the newly blocked task. In theory, 148 * this could be an if-statement. In practice, when I tried 149 * that, it was quite messy. 150 */ 151 switch (blkd_state) { 152 case 0: 153 case RCU_EXP_TASKS: 154 case RCU_EXP_TASKS + RCU_GP_BLKD: 155 case RCU_GP_TASKS: 156 case RCU_GP_TASKS + RCU_EXP_TASKS: 157 158 /* 159 * Blocking neither GP, or first task blocking the normal 160 * GP but not blocking the already-waiting expedited GP. 161 * Queue at the head of the list to avoid unnecessarily 162 * blocking the already-waiting GPs. 163 */ 164 list_add(&t->rcu_node_entry, &rnp->blkd_tasks); 165 break; 166 167 case RCU_EXP_BLKD: 168 case RCU_GP_BLKD: 169 case RCU_GP_BLKD + RCU_EXP_BLKD: 170 case RCU_GP_TASKS + RCU_EXP_BLKD: 171 case RCU_GP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD: 172 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD: 173 174 /* 175 * First task arriving that blocks either GP, or first task 176 * arriving that blocks the expedited GP (with the normal 177 * GP already waiting), or a task arriving that blocks 178 * both GPs with both GPs already waiting. Queue at the 179 * tail of the list to avoid any GP waiting on any of the 180 * already queued tasks that are not blocking it. 181 */ 182 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks); 183 break; 184 185 case RCU_EXP_TASKS + RCU_EXP_BLKD: 186 case RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD: 187 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_EXP_BLKD: 188 189 /* 190 * Second or subsequent task blocking the expedited GP. 191 * The task either does not block the normal GP, or is the 192 * first task blocking the normal GP. Queue just after 193 * the first task blocking the expedited GP. 194 */ 195 list_add(&t->rcu_node_entry, rnp->exp_tasks); 196 break; 197 198 case RCU_GP_TASKS + RCU_GP_BLKD: 199 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD: 200 201 /* 202 * Second or subsequent task blocking the normal GP. 203 * The task does not block the expedited GP. Queue just 204 * after the first task blocking the normal GP. 205 */ 206 list_add(&t->rcu_node_entry, rnp->gp_tasks); 207 break; 208 209 default: 210 211 /* Yet another exercise in excessive paranoia. */ 212 WARN_ON_ONCE(1); 213 break; 214 } 215 216 /* 217 * We have now queued the task. If it was the first one to 218 * block either grace period, update the ->gp_tasks and/or 219 * ->exp_tasks pointers, respectively, to reference the newly 220 * blocked tasks. 221 */ 222 if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) { 223 rnp->gp_tasks = &t->rcu_node_entry; 224 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq); 225 } 226 if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD)) 227 rnp->exp_tasks = &t->rcu_node_entry; 228 WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) != 229 !(rnp->qsmask & rdp->grpmask)); 230 WARN_ON_ONCE(!(blkd_state & RCU_EXP_BLKD) != 231 !(rnp->expmask & rdp->grpmask)); 232 raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */ 233 234 /* 235 * Report the quiescent state for the expedited GP. This expedited 236 * GP should not be able to end until we report, so there should be 237 * no need to check for a subsequent expedited GP. (Though we are 238 * still in a quiescent state in any case.) 239 */ 240 if (blkd_state & RCU_EXP_BLKD && rdp->exp_deferred_qs) 241 rcu_report_exp_rdp(rdp); 242 else 243 WARN_ON_ONCE(rdp->exp_deferred_qs); 244 } 245 246 /* 247 * Record a preemptible-RCU quiescent state for the specified CPU. 248 * Note that this does not necessarily mean that the task currently running 249 * on the CPU is in a quiescent state: Instead, it means that the current 250 * grace period need not wait on any RCU read-side critical section that 251 * starts later on this CPU. It also means that if the current task is 252 * in an RCU read-side critical section, it has already added itself to 253 * some leaf rcu_node structure's ->blkd_tasks list. In addition to the 254 * current task, there might be any number of other tasks blocked while 255 * in an RCU read-side critical section. 256 * 257 * Callers to this function must disable preemption. 258 */ 259 static void rcu_qs(void) 260 { 261 RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!\n"); 262 if (__this_cpu_read(rcu_data.cpu_no_qs.s)) { 263 trace_rcu_grace_period(TPS("rcu_preempt"), 264 __this_cpu_read(rcu_data.gp_seq), 265 TPS("cpuqs")); 266 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false); 267 barrier(); /* Coordinate with rcu_flavor_sched_clock_irq(). */ 268 WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, false); 269 } 270 } 271 272 /* 273 * We have entered the scheduler, and the current task might soon be 274 * context-switched away from. If this task is in an RCU read-side 275 * critical section, we will no longer be able to rely on the CPU to 276 * record that fact, so we enqueue the task on the blkd_tasks list. 277 * The task will dequeue itself when it exits the outermost enclosing 278 * RCU read-side critical section. Therefore, the current grace period 279 * cannot be permitted to complete until the blkd_tasks list entries 280 * predating the current grace period drain, in other words, until 281 * rnp->gp_tasks becomes NULL. 282 * 283 * Caller must disable interrupts. 284 */ 285 void rcu_note_context_switch(bool preempt) 286 { 287 struct task_struct *t = current; 288 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 289 struct rcu_node *rnp; 290 291 trace_rcu_utilization(TPS("Start context switch")); 292 lockdep_assert_irqs_disabled(); 293 WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0); 294 if (t->rcu_read_lock_nesting > 0 && 295 !t->rcu_read_unlock_special.b.blocked) { 296 297 /* Possibly blocking in an RCU read-side critical section. */ 298 rnp = rdp->mynode; 299 raw_spin_lock_rcu_node(rnp); 300 t->rcu_read_unlock_special.b.blocked = true; 301 t->rcu_blocked_node = rnp; 302 303 /* 304 * Verify the CPU's sanity, trace the preemption, and 305 * then queue the task as required based on the states 306 * of any ongoing and expedited grace periods. 307 */ 308 WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0); 309 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); 310 trace_rcu_preempt_task(rcu_state.name, 311 t->pid, 312 (rnp->qsmask & rdp->grpmask) 313 ? rnp->gp_seq 314 : rcu_seq_snap(&rnp->gp_seq)); 315 rcu_preempt_ctxt_queue(rnp, rdp); 316 } else { 317 rcu_preempt_deferred_qs(t); 318 } 319 320 /* 321 * Either we were not in an RCU read-side critical section to 322 * begin with, or we have now recorded that critical section 323 * globally. Either way, we can now note a quiescent state 324 * for this CPU. Again, if we were in an RCU read-side critical 325 * section, and if that critical section was blocking the current 326 * grace period, then the fact that the task has been enqueued 327 * means that we continue to block the current grace period. 328 */ 329 rcu_qs(); 330 if (rdp->exp_deferred_qs) 331 rcu_report_exp_rdp(rdp); 332 trace_rcu_utilization(TPS("End context switch")); 333 } 334 EXPORT_SYMBOL_GPL(rcu_note_context_switch); 335 336 /* 337 * Check for preempted RCU readers blocking the current grace period 338 * for the specified rcu_node structure. If the caller needs a reliable 339 * answer, it must hold the rcu_node's ->lock. 340 */ 341 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) 342 { 343 return rnp->gp_tasks != NULL; 344 } 345 346 /* Bias and limit values for ->rcu_read_lock_nesting. */ 347 #define RCU_NEST_BIAS INT_MAX 348 #define RCU_NEST_NMAX (-INT_MAX / 2) 349 #define RCU_NEST_PMAX (INT_MAX / 2) 350 351 /* 352 * Preemptible RCU implementation for rcu_read_lock(). 353 * Just increment ->rcu_read_lock_nesting, shared state will be updated 354 * if we block. 355 */ 356 void __rcu_read_lock(void) 357 { 358 current->rcu_read_lock_nesting++; 359 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) 360 WARN_ON_ONCE(current->rcu_read_lock_nesting > RCU_NEST_PMAX); 361 barrier(); /* critical section after entry code. */ 362 } 363 EXPORT_SYMBOL_GPL(__rcu_read_lock); 364 365 /* 366 * Preemptible RCU implementation for rcu_read_unlock(). 367 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost 368 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then 369 * invoke rcu_read_unlock_special() to clean up after a context switch 370 * in an RCU read-side critical section and other special cases. 371 */ 372 void __rcu_read_unlock(void) 373 { 374 struct task_struct *t = current; 375 376 if (t->rcu_read_lock_nesting != 1) { 377 --t->rcu_read_lock_nesting; 378 } else { 379 barrier(); /* critical section before exit code. */ 380 t->rcu_read_lock_nesting = -RCU_NEST_BIAS; 381 barrier(); /* assign before ->rcu_read_unlock_special load */ 382 if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s))) 383 rcu_read_unlock_special(t); 384 barrier(); /* ->rcu_read_unlock_special load before assign */ 385 t->rcu_read_lock_nesting = 0; 386 } 387 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { 388 int rrln = t->rcu_read_lock_nesting; 389 390 WARN_ON_ONCE(rrln < 0 && rrln > RCU_NEST_NMAX); 391 } 392 } 393 EXPORT_SYMBOL_GPL(__rcu_read_unlock); 394 395 /* 396 * Advance a ->blkd_tasks-list pointer to the next entry, instead 397 * returning NULL if at the end of the list. 398 */ 399 static struct list_head *rcu_next_node_entry(struct task_struct *t, 400 struct rcu_node *rnp) 401 { 402 struct list_head *np; 403 404 np = t->rcu_node_entry.next; 405 if (np == &rnp->blkd_tasks) 406 np = NULL; 407 return np; 408 } 409 410 /* 411 * Return true if the specified rcu_node structure has tasks that were 412 * preempted within an RCU read-side critical section. 413 */ 414 static bool rcu_preempt_has_tasks(struct rcu_node *rnp) 415 { 416 return !list_empty(&rnp->blkd_tasks); 417 } 418 419 /* 420 * Report deferred quiescent states. The deferral time can 421 * be quite short, for example, in the case of the call from 422 * rcu_read_unlock_special(). 423 */ 424 static void 425 rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) 426 { 427 bool empty_exp; 428 bool empty_norm; 429 bool empty_exp_now; 430 struct list_head *np; 431 bool drop_boost_mutex = false; 432 struct rcu_data *rdp; 433 struct rcu_node *rnp; 434 union rcu_special special; 435 436 /* 437 * If RCU core is waiting for this CPU to exit its critical section, 438 * report the fact that it has exited. Because irqs are disabled, 439 * t->rcu_read_unlock_special cannot change. 440 */ 441 special = t->rcu_read_unlock_special; 442 rdp = this_cpu_ptr(&rcu_data); 443 if (!special.s && !rdp->exp_deferred_qs) { 444 local_irq_restore(flags); 445 return; 446 } 447 t->rcu_read_unlock_special.b.deferred_qs = false; 448 if (special.b.need_qs) { 449 rcu_qs(); 450 t->rcu_read_unlock_special.b.need_qs = false; 451 if (!t->rcu_read_unlock_special.s && !rdp->exp_deferred_qs) { 452 local_irq_restore(flags); 453 return; 454 } 455 } 456 457 /* 458 * Respond to a request by an expedited grace period for a 459 * quiescent state from this CPU. Note that requests from 460 * tasks are handled when removing the task from the 461 * blocked-tasks list below. 462 */ 463 if (rdp->exp_deferred_qs) { 464 rcu_report_exp_rdp(rdp); 465 if (!t->rcu_read_unlock_special.s) { 466 local_irq_restore(flags); 467 return; 468 } 469 } 470 471 /* Clean up if blocked during RCU read-side critical section. */ 472 if (special.b.blocked) { 473 t->rcu_read_unlock_special.b.blocked = false; 474 475 /* 476 * Remove this task from the list it blocked on. The task 477 * now remains queued on the rcu_node corresponding to the 478 * CPU it first blocked on, so there is no longer any need 479 * to loop. Retain a WARN_ON_ONCE() out of sheer paranoia. 480 */ 481 rnp = t->rcu_blocked_node; 482 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 483 WARN_ON_ONCE(rnp != t->rcu_blocked_node); 484 WARN_ON_ONCE(!rcu_is_leaf_node(rnp)); 485 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); 486 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq && 487 (!empty_norm || rnp->qsmask)); 488 empty_exp = sync_rcu_preempt_exp_done(rnp); 489 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ 490 np = rcu_next_node_entry(t, rnp); 491 list_del_init(&t->rcu_node_entry); 492 t->rcu_blocked_node = NULL; 493 trace_rcu_unlock_preempted_task(TPS("rcu_preempt"), 494 rnp->gp_seq, t->pid); 495 if (&t->rcu_node_entry == rnp->gp_tasks) 496 rnp->gp_tasks = np; 497 if (&t->rcu_node_entry == rnp->exp_tasks) 498 rnp->exp_tasks = np; 499 if (IS_ENABLED(CONFIG_RCU_BOOST)) { 500 /* Snapshot ->boost_mtx ownership w/rnp->lock held. */ 501 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t; 502 if (&t->rcu_node_entry == rnp->boost_tasks) 503 rnp->boost_tasks = np; 504 } 505 506 /* 507 * If this was the last task on the current list, and if 508 * we aren't waiting on any CPUs, report the quiescent state. 509 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, 510 * so we must take a snapshot of the expedited state. 511 */ 512 empty_exp_now = sync_rcu_preempt_exp_done(rnp); 513 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) { 514 trace_rcu_quiescent_state_report(TPS("preempt_rcu"), 515 rnp->gp_seq, 516 0, rnp->qsmask, 517 rnp->level, 518 rnp->grplo, 519 rnp->grphi, 520 !!rnp->gp_tasks); 521 rcu_report_unblock_qs_rnp(rnp, flags); 522 } else { 523 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 524 } 525 526 /* Unboost if we were boosted. */ 527 if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex) 528 rt_mutex_futex_unlock(&rnp->boost_mtx); 529 530 /* 531 * If this was the last task on the expedited lists, 532 * then we need to report up the rcu_node hierarchy. 533 */ 534 if (!empty_exp && empty_exp_now) 535 rcu_report_exp_rnp(rnp, true); 536 } else { 537 local_irq_restore(flags); 538 } 539 } 540 541 /* 542 * Is a deferred quiescent-state pending, and are we also not in 543 * an RCU read-side critical section? It is the caller's responsibility 544 * to ensure it is otherwise safe to report any deferred quiescent 545 * states. The reason for this is that it is safe to report a 546 * quiescent state during context switch even though preemption 547 * is disabled. This function cannot be expected to understand these 548 * nuances, so the caller must handle them. 549 */ 550 static bool rcu_preempt_need_deferred_qs(struct task_struct *t) 551 { 552 return (__this_cpu_read(rcu_data.exp_deferred_qs) || 553 READ_ONCE(t->rcu_read_unlock_special.s)) && 554 t->rcu_read_lock_nesting <= 0; 555 } 556 557 /* 558 * Report a deferred quiescent state if needed and safe to do so. 559 * As with rcu_preempt_need_deferred_qs(), "safe" involves only 560 * not being in an RCU read-side critical section. The caller must 561 * evaluate safety in terms of interrupt, softirq, and preemption 562 * disabling. 563 */ 564 static void rcu_preempt_deferred_qs(struct task_struct *t) 565 { 566 unsigned long flags; 567 bool couldrecurse = t->rcu_read_lock_nesting >= 0; 568 569 if (!rcu_preempt_need_deferred_qs(t)) 570 return; 571 if (couldrecurse) 572 t->rcu_read_lock_nesting -= RCU_NEST_BIAS; 573 local_irq_save(flags); 574 rcu_preempt_deferred_qs_irqrestore(t, flags); 575 if (couldrecurse) 576 t->rcu_read_lock_nesting += RCU_NEST_BIAS; 577 } 578 579 /* 580 * Minimal handler to give the scheduler a chance to re-evaluate. 581 */ 582 static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp) 583 { 584 struct rcu_data *rdp; 585 586 rdp = container_of(iwp, struct rcu_data, defer_qs_iw); 587 rdp->defer_qs_iw_pending = false; 588 } 589 590 /* 591 * Handle special cases during rcu_read_unlock(), such as needing to 592 * notify RCU core processing or task having blocked during the RCU 593 * read-side critical section. 594 */ 595 static void rcu_read_unlock_special(struct task_struct *t) 596 { 597 unsigned long flags; 598 bool preempt_bh_were_disabled = 599 !!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)); 600 bool irqs_were_disabled; 601 602 /* NMI handlers cannot block and cannot safely manipulate state. */ 603 if (in_nmi()) 604 return; 605 606 local_irq_save(flags); 607 irqs_were_disabled = irqs_disabled_flags(flags); 608 if (preempt_bh_were_disabled || irqs_were_disabled) { 609 bool exp; 610 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 611 struct rcu_node *rnp = rdp->mynode; 612 613 t->rcu_read_unlock_special.b.exp_hint = false; 614 exp = (t->rcu_blocked_node && t->rcu_blocked_node->exp_tasks) || 615 (rdp->grpmask & rnp->expmask) || 616 tick_nohz_full_cpu(rdp->cpu); 617 // Need to defer quiescent state until everything is enabled. 618 if (irqs_were_disabled && use_softirq && 619 (in_interrupt() || 620 (exp && !t->rcu_read_unlock_special.b.deferred_qs))) { 621 // Using softirq, safe to awaken, and we get 622 // no help from enabling irqs, unlike bh/preempt. 623 raise_softirq_irqoff(RCU_SOFTIRQ); 624 } else { 625 // Enabling BH or preempt does reschedule, so... 626 // Also if no expediting or NO_HZ_FULL, slow is OK. 627 set_tsk_need_resched(current); 628 set_preempt_need_resched(); 629 if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled && 630 !rdp->defer_qs_iw_pending && exp) { 631 // Get scheduler to re-evaluate and call hooks. 632 // If !IRQ_WORK, FQS scan will eventually IPI. 633 init_irq_work(&rdp->defer_qs_iw, 634 rcu_preempt_deferred_qs_handler); 635 rdp->defer_qs_iw_pending = true; 636 irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu); 637 } 638 } 639 t->rcu_read_unlock_special.b.deferred_qs = true; 640 local_irq_restore(flags); 641 return; 642 } 643 WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, false); 644 rcu_preempt_deferred_qs_irqrestore(t, flags); 645 } 646 647 /* 648 * Check that the list of blocked tasks for the newly completed grace 649 * period is in fact empty. It is a serious bug to complete a grace 650 * period that still has RCU readers blocked! This function must be 651 * invoked -before- updating this rnp's ->gp_seq, and the rnp's ->lock 652 * must be held by the caller. 653 * 654 * Also, if there are blocked tasks on the list, they automatically 655 * block the newly created grace period, so set up ->gp_tasks accordingly. 656 */ 657 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 658 { 659 struct task_struct *t; 660 661 RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n"); 662 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) 663 dump_blkd_tasks(rnp, 10); 664 if (rcu_preempt_has_tasks(rnp) && 665 (rnp->qsmaskinit || rnp->wait_blkd_tasks)) { 666 rnp->gp_tasks = rnp->blkd_tasks.next; 667 t = container_of(rnp->gp_tasks, struct task_struct, 668 rcu_node_entry); 669 trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"), 670 rnp->gp_seq, t->pid); 671 } 672 WARN_ON_ONCE(rnp->qsmask); 673 } 674 675 /* 676 * Check for a quiescent state from the current CPU, including voluntary 677 * context switches for Tasks RCU. When a task blocks, the task is 678 * recorded in the corresponding CPU's rcu_node structure, which is checked 679 * elsewhere, hence this function need only check for quiescent states 680 * related to the current CPU, not to those related to tasks. 681 */ 682 static void rcu_flavor_sched_clock_irq(int user) 683 { 684 struct task_struct *t = current; 685 686 if (user || rcu_is_cpu_rrupt_from_idle()) { 687 rcu_note_voluntary_context_switch(current); 688 } 689 if (t->rcu_read_lock_nesting > 0 || 690 (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) { 691 /* No QS, force context switch if deferred. */ 692 if (rcu_preempt_need_deferred_qs(t)) { 693 set_tsk_need_resched(t); 694 set_preempt_need_resched(); 695 } 696 } else if (rcu_preempt_need_deferred_qs(t)) { 697 rcu_preempt_deferred_qs(t); /* Report deferred QS. */ 698 return; 699 } else if (!t->rcu_read_lock_nesting) { 700 rcu_qs(); /* Report immediate QS. */ 701 return; 702 } 703 704 /* If GP is oldish, ask for help from rcu_read_unlock_special(). */ 705 if (t->rcu_read_lock_nesting > 0 && 706 __this_cpu_read(rcu_data.core_needs_qs) && 707 __this_cpu_read(rcu_data.cpu_no_qs.b.norm) && 708 !t->rcu_read_unlock_special.b.need_qs && 709 time_after(jiffies, rcu_state.gp_start + HZ)) 710 t->rcu_read_unlock_special.b.need_qs = true; 711 } 712 713 /* 714 * Check for a task exiting while in a preemptible-RCU read-side 715 * critical section, clean up if so. No need to issue warnings, as 716 * debug_check_no_locks_held() already does this if lockdep is enabled. 717 * Besides, if this function does anything other than just immediately 718 * return, there was a bug of some sort. Spewing warnings from this 719 * function is like as not to simply obscure important prior warnings. 720 */ 721 void exit_rcu(void) 722 { 723 struct task_struct *t = current; 724 725 if (unlikely(!list_empty(¤t->rcu_node_entry))) { 726 t->rcu_read_lock_nesting = 1; 727 barrier(); 728 WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true); 729 } else if (unlikely(t->rcu_read_lock_nesting)) { 730 t->rcu_read_lock_nesting = 1; 731 } else { 732 return; 733 } 734 __rcu_read_unlock(); 735 rcu_preempt_deferred_qs(current); 736 } 737 738 /* 739 * Dump the blocked-tasks state, but limit the list dump to the 740 * specified number of elements. 741 */ 742 static void 743 dump_blkd_tasks(struct rcu_node *rnp, int ncheck) 744 { 745 int cpu; 746 int i; 747 struct list_head *lhp; 748 bool onl; 749 struct rcu_data *rdp; 750 struct rcu_node *rnp1; 751 752 raw_lockdep_assert_held_rcu_node(rnp); 753 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n", 754 __func__, rnp->grplo, rnp->grphi, rnp->level, 755 (long)rnp->gp_seq, (long)rnp->completedqs); 756 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) 757 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n", 758 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext); 759 pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p\n", 760 __func__, rnp->gp_tasks, rnp->boost_tasks, rnp->exp_tasks); 761 pr_info("%s: ->blkd_tasks", __func__); 762 i = 0; 763 list_for_each(lhp, &rnp->blkd_tasks) { 764 pr_cont(" %p", lhp); 765 if (++i >= ncheck) 766 break; 767 } 768 pr_cont("\n"); 769 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) { 770 rdp = per_cpu_ptr(&rcu_data, cpu); 771 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp)); 772 pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n", 773 cpu, ".o"[onl], 774 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags, 775 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags); 776 } 777 } 778 779 #else /* #ifdef CONFIG_PREEMPT_RCU */ 780 781 /* 782 * Tell them what RCU they are running. 783 */ 784 static void __init rcu_bootup_announce(void) 785 { 786 pr_info("Hierarchical RCU implementation.\n"); 787 rcu_bootup_announce_oddness(); 788 } 789 790 /* 791 * Note a quiescent state for PREEMPT=n. Because we do not need to know 792 * how many quiescent states passed, just if there was at least one since 793 * the start of the grace period, this just sets a flag. The caller must 794 * have disabled preemption. 795 */ 796 static void rcu_qs(void) 797 { 798 RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!"); 799 if (!__this_cpu_read(rcu_data.cpu_no_qs.s)) 800 return; 801 trace_rcu_grace_period(TPS("rcu_sched"), 802 __this_cpu_read(rcu_data.gp_seq), TPS("cpuqs")); 803 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false); 804 if (!__this_cpu_read(rcu_data.cpu_no_qs.b.exp)) 805 return; 806 __this_cpu_write(rcu_data.cpu_no_qs.b.exp, false); 807 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); 808 } 809 810 /* 811 * Register an urgently needed quiescent state. If there is an 812 * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight 813 * dyntick-idle quiescent state visible to other CPUs, which will in 814 * some cases serve for expedited as well as normal grace periods. 815 * Either way, register a lightweight quiescent state. 816 */ 817 void rcu_all_qs(void) 818 { 819 unsigned long flags; 820 821 if (!raw_cpu_read(rcu_data.rcu_urgent_qs)) 822 return; 823 preempt_disable(); 824 /* Load rcu_urgent_qs before other flags. */ 825 if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { 826 preempt_enable(); 827 return; 828 } 829 this_cpu_write(rcu_data.rcu_urgent_qs, false); 830 if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) { 831 local_irq_save(flags); 832 rcu_momentary_dyntick_idle(); 833 local_irq_restore(flags); 834 } 835 rcu_qs(); 836 preempt_enable(); 837 } 838 EXPORT_SYMBOL_GPL(rcu_all_qs); 839 840 /* 841 * Note a PREEMPT=n context switch. The caller must have disabled interrupts. 842 */ 843 void rcu_note_context_switch(bool preempt) 844 { 845 trace_rcu_utilization(TPS("Start context switch")); 846 rcu_qs(); 847 /* Load rcu_urgent_qs before other flags. */ 848 if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) 849 goto out; 850 this_cpu_write(rcu_data.rcu_urgent_qs, false); 851 if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) 852 rcu_momentary_dyntick_idle(); 853 if (!preempt) 854 rcu_tasks_qs(current); 855 out: 856 trace_rcu_utilization(TPS("End context switch")); 857 } 858 EXPORT_SYMBOL_GPL(rcu_note_context_switch); 859 860 /* 861 * Because preemptible RCU does not exist, there are never any preempted 862 * RCU readers. 863 */ 864 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) 865 { 866 return 0; 867 } 868 869 /* 870 * Because there is no preemptible RCU, there can be no readers blocked. 871 */ 872 static bool rcu_preempt_has_tasks(struct rcu_node *rnp) 873 { 874 return false; 875 } 876 877 /* 878 * Because there is no preemptible RCU, there can be no deferred quiescent 879 * states. 880 */ 881 static bool rcu_preempt_need_deferred_qs(struct task_struct *t) 882 { 883 return false; 884 } 885 static void rcu_preempt_deferred_qs(struct task_struct *t) { } 886 887 /* 888 * Because there is no preemptible RCU, there can be no readers blocked, 889 * so there is no need to check for blocked tasks. So check only for 890 * bogus qsmask values. 891 */ 892 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 893 { 894 WARN_ON_ONCE(rnp->qsmask); 895 } 896 897 /* 898 * Check to see if this CPU is in a non-context-switch quiescent state, 899 * namely user mode and idle loop. 900 */ 901 static void rcu_flavor_sched_clock_irq(int user) 902 { 903 if (user || rcu_is_cpu_rrupt_from_idle()) { 904 905 /* 906 * Get here if this CPU took its interrupt from user 907 * mode or from the idle loop, and if this is not a 908 * nested interrupt. In this case, the CPU is in 909 * a quiescent state, so note it. 910 * 911 * No memory barrier is required here because rcu_qs() 912 * references only CPU-local variables that other CPUs 913 * neither access nor modify, at least not while the 914 * corresponding CPU is online. 915 */ 916 917 rcu_qs(); 918 } 919 } 920 921 /* 922 * Because preemptible RCU does not exist, tasks cannot possibly exit 923 * while in preemptible RCU read-side critical sections. 924 */ 925 void exit_rcu(void) 926 { 927 } 928 929 /* 930 * Dump the guaranteed-empty blocked-tasks state. Trust but verify. 931 */ 932 static void 933 dump_blkd_tasks(struct rcu_node *rnp, int ncheck) 934 { 935 WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks)); 936 } 937 938 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 939 940 /* 941 * If boosting, set rcuc kthreads to realtime priority. 942 */ 943 static void rcu_cpu_kthread_setup(unsigned int cpu) 944 { 945 #ifdef CONFIG_RCU_BOOST 946 struct sched_param sp; 947 948 sp.sched_priority = kthread_prio; 949 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); 950 #endif /* #ifdef CONFIG_RCU_BOOST */ 951 } 952 953 #ifdef CONFIG_RCU_BOOST 954 955 /* 956 * Carry out RCU priority boosting on the task indicated by ->exp_tasks 957 * or ->boost_tasks, advancing the pointer to the next task in the 958 * ->blkd_tasks list. 959 * 960 * Note that irqs must be enabled: boosting the task can block. 961 * Returns 1 if there are more tasks needing to be boosted. 962 */ 963 static int rcu_boost(struct rcu_node *rnp) 964 { 965 unsigned long flags; 966 struct task_struct *t; 967 struct list_head *tb; 968 969 if (READ_ONCE(rnp->exp_tasks) == NULL && 970 READ_ONCE(rnp->boost_tasks) == NULL) 971 return 0; /* Nothing left to boost. */ 972 973 raw_spin_lock_irqsave_rcu_node(rnp, flags); 974 975 /* 976 * Recheck under the lock: all tasks in need of boosting 977 * might exit their RCU read-side critical sections on their own. 978 */ 979 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) { 980 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 981 return 0; 982 } 983 984 /* 985 * Preferentially boost tasks blocking expedited grace periods. 986 * This cannot starve the normal grace periods because a second 987 * expedited grace period must boost all blocked tasks, including 988 * those blocking the pre-existing normal grace period. 989 */ 990 if (rnp->exp_tasks != NULL) 991 tb = rnp->exp_tasks; 992 else 993 tb = rnp->boost_tasks; 994 995 /* 996 * We boost task t by manufacturing an rt_mutex that appears to 997 * be held by task t. We leave a pointer to that rt_mutex where 998 * task t can find it, and task t will release the mutex when it 999 * exits its outermost RCU read-side critical section. Then 1000 * simply acquiring this artificial rt_mutex will boost task 1001 * t's priority. (Thanks to tglx for suggesting this approach!) 1002 * 1003 * Note that task t must acquire rnp->lock to remove itself from 1004 * the ->blkd_tasks list, which it will do from exit() if from 1005 * nowhere else. We therefore are guaranteed that task t will 1006 * stay around at least until we drop rnp->lock. Note that 1007 * rnp->lock also resolves races between our priority boosting 1008 * and task t's exiting its outermost RCU read-side critical 1009 * section. 1010 */ 1011 t = container_of(tb, struct task_struct, rcu_node_entry); 1012 rt_mutex_init_proxy_locked(&rnp->boost_mtx, t); 1013 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1014 /* Lock only for side effect: boosts task t's priority. */ 1015 rt_mutex_lock(&rnp->boost_mtx); 1016 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ 1017 1018 return READ_ONCE(rnp->exp_tasks) != NULL || 1019 READ_ONCE(rnp->boost_tasks) != NULL; 1020 } 1021 1022 /* 1023 * Priority-boosting kthread, one per leaf rcu_node. 1024 */ 1025 static int rcu_boost_kthread(void *arg) 1026 { 1027 struct rcu_node *rnp = (struct rcu_node *)arg; 1028 int spincnt = 0; 1029 int more2boost; 1030 1031 trace_rcu_utilization(TPS("Start boost kthread@init")); 1032 for (;;) { 1033 rnp->boost_kthread_status = RCU_KTHREAD_WAITING; 1034 trace_rcu_utilization(TPS("End boost kthread@rcu_wait")); 1035 rcu_wait(rnp->boost_tasks || rnp->exp_tasks); 1036 trace_rcu_utilization(TPS("Start boost kthread@rcu_wait")); 1037 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; 1038 more2boost = rcu_boost(rnp); 1039 if (more2boost) 1040 spincnt++; 1041 else 1042 spincnt = 0; 1043 if (spincnt > 10) { 1044 rnp->boost_kthread_status = RCU_KTHREAD_YIELDING; 1045 trace_rcu_utilization(TPS("End boost kthread@rcu_yield")); 1046 schedule_timeout_interruptible(2); 1047 trace_rcu_utilization(TPS("Start boost kthread@rcu_yield")); 1048 spincnt = 0; 1049 } 1050 } 1051 /* NOTREACHED */ 1052 trace_rcu_utilization(TPS("End boost kthread@notreached")); 1053 return 0; 1054 } 1055 1056 /* 1057 * Check to see if it is time to start boosting RCU readers that are 1058 * blocking the current grace period, and, if so, tell the per-rcu_node 1059 * kthread to start boosting them. If there is an expedited grace 1060 * period in progress, it is always time to boost. 1061 * 1062 * The caller must hold rnp->lock, which this function releases. 1063 * The ->boost_kthread_task is immortal, so we don't need to worry 1064 * about it going away. 1065 */ 1066 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1067 __releases(rnp->lock) 1068 { 1069 raw_lockdep_assert_held_rcu_node(rnp); 1070 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { 1071 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1072 return; 1073 } 1074 if (rnp->exp_tasks != NULL || 1075 (rnp->gp_tasks != NULL && 1076 rnp->boost_tasks == NULL && 1077 rnp->qsmask == 0 && 1078 ULONG_CMP_GE(jiffies, rnp->boost_time))) { 1079 if (rnp->exp_tasks == NULL) 1080 rnp->boost_tasks = rnp->gp_tasks; 1081 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1082 rcu_wake_cond(rnp->boost_kthread_task, 1083 rnp->boost_kthread_status); 1084 } else { 1085 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1086 } 1087 } 1088 1089 /* 1090 * Is the current CPU running the RCU-callbacks kthread? 1091 * Caller must have preemption disabled. 1092 */ 1093 static bool rcu_is_callbacks_kthread(void) 1094 { 1095 return __this_cpu_read(rcu_data.rcu_cpu_kthread_task) == current; 1096 } 1097 1098 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) 1099 1100 /* 1101 * Do priority-boost accounting for the start of a new grace period. 1102 */ 1103 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) 1104 { 1105 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; 1106 } 1107 1108 /* 1109 * Create an RCU-boost kthread for the specified node if one does not 1110 * already exist. We only create this kthread for preemptible RCU. 1111 * Returns zero if all is well, a negated errno otherwise. 1112 */ 1113 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) 1114 { 1115 int rnp_index = rnp - rcu_get_root(); 1116 unsigned long flags; 1117 struct sched_param sp; 1118 struct task_struct *t; 1119 1120 if (!IS_ENABLED(CONFIG_PREEMPT_RCU)) 1121 return; 1122 1123 if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0) 1124 return; 1125 1126 rcu_state.boost = 1; 1127 1128 if (rnp->boost_kthread_task != NULL) 1129 return; 1130 1131 t = kthread_create(rcu_boost_kthread, (void *)rnp, 1132 "rcub/%d", rnp_index); 1133 if (WARN_ON_ONCE(IS_ERR(t))) 1134 return; 1135 1136 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1137 rnp->boost_kthread_task = t; 1138 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1139 sp.sched_priority = kthread_prio; 1140 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 1141 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ 1142 } 1143 1144 /* 1145 * Set the per-rcu_node kthread's affinity to cover all CPUs that are 1146 * served by the rcu_node in question. The CPU hotplug lock is still 1147 * held, so the value of rnp->qsmaskinit will be stable. 1148 * 1149 * We don't include outgoingcpu in the affinity set, use -1 if there is 1150 * no outgoing CPU. If there are no CPUs left in the affinity set, 1151 * this function allows the kthread to execute on any CPU. 1152 */ 1153 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) 1154 { 1155 struct task_struct *t = rnp->boost_kthread_task; 1156 unsigned long mask = rcu_rnp_online_cpus(rnp); 1157 cpumask_var_t cm; 1158 int cpu; 1159 1160 if (!t) 1161 return; 1162 if (!zalloc_cpumask_var(&cm, GFP_KERNEL)) 1163 return; 1164 for_each_leaf_node_possible_cpu(rnp, cpu) 1165 if ((mask & leaf_node_cpu_bit(rnp, cpu)) && 1166 cpu != outgoingcpu) 1167 cpumask_set_cpu(cpu, cm); 1168 if (cpumask_weight(cm) == 0) 1169 cpumask_setall(cm); 1170 set_cpus_allowed_ptr(t, cm); 1171 free_cpumask_var(cm); 1172 } 1173 1174 /* 1175 * Spawn boost kthreads -- called as soon as the scheduler is running. 1176 */ 1177 static void __init rcu_spawn_boost_kthreads(void) 1178 { 1179 struct rcu_node *rnp; 1180 1181 rcu_for_each_leaf_node(rnp) 1182 rcu_spawn_one_boost_kthread(rnp); 1183 } 1184 1185 static void rcu_prepare_kthreads(int cpu) 1186 { 1187 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 1188 struct rcu_node *rnp = rdp->mynode; 1189 1190 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ 1191 if (rcu_scheduler_fully_active) 1192 rcu_spawn_one_boost_kthread(rnp); 1193 } 1194 1195 #else /* #ifdef CONFIG_RCU_BOOST */ 1196 1197 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1198 __releases(rnp->lock) 1199 { 1200 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1201 } 1202 1203 static bool rcu_is_callbacks_kthread(void) 1204 { 1205 return false; 1206 } 1207 1208 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) 1209 { 1210 } 1211 1212 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) 1213 { 1214 } 1215 1216 static void __init rcu_spawn_boost_kthreads(void) 1217 { 1218 } 1219 1220 static void rcu_prepare_kthreads(int cpu) 1221 { 1222 } 1223 1224 #endif /* #else #ifdef CONFIG_RCU_BOOST */ 1225 1226 #if !defined(CONFIG_RCU_FAST_NO_HZ) 1227 1228 /* 1229 * Check to see if any future non-offloaded RCU-related work will need 1230 * to be done by the current CPU, even if none need be done immediately, 1231 * returning 1 if so. This function is part of the RCU implementation; 1232 * it is -not- an exported member of the RCU API. 1233 * 1234 * Because we not have RCU_FAST_NO_HZ, just check whether or not this 1235 * CPU has RCU callbacks queued. 1236 */ 1237 int rcu_needs_cpu(u64 basemono, u64 *nextevt) 1238 { 1239 *nextevt = KTIME_MAX; 1240 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) && 1241 !rcu_segcblist_is_offloaded(&this_cpu_ptr(&rcu_data)->cblist); 1242 } 1243 1244 /* 1245 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up 1246 * after it. 1247 */ 1248 static void rcu_cleanup_after_idle(void) 1249 { 1250 } 1251 1252 /* 1253 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n, 1254 * is nothing. 1255 */ 1256 static void rcu_prepare_for_idle(void) 1257 { 1258 } 1259 1260 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 1261 1262 /* 1263 * This code is invoked when a CPU goes idle, at which point we want 1264 * to have the CPU do everything required for RCU so that it can enter 1265 * the energy-efficient dyntick-idle mode. This is handled by a 1266 * state machine implemented by rcu_prepare_for_idle() below. 1267 * 1268 * The following three proprocessor symbols control this state machine: 1269 * 1270 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted 1271 * to sleep in dyntick-idle mode with RCU callbacks pending. This 1272 * is sized to be roughly one RCU grace period. Those energy-efficiency 1273 * benchmarkers who might otherwise be tempted to set this to a large 1274 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your 1275 * system. And if you are -that- concerned about energy efficiency, 1276 * just power the system down and be done with it! 1277 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is 1278 * permitted to sleep in dyntick-idle mode with only lazy RCU 1279 * callbacks pending. Setting this too high can OOM your system. 1280 * 1281 * The values below work well in practice. If future workloads require 1282 * adjustment, they can be converted into kernel config parameters, though 1283 * making the state machine smarter might be a better option. 1284 */ 1285 #define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */ 1286 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ 1287 1288 static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY; 1289 module_param(rcu_idle_gp_delay, int, 0644); 1290 static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY; 1291 module_param(rcu_idle_lazy_gp_delay, int, 0644); 1292 1293 /* 1294 * Try to advance callbacks on the current CPU, but only if it has been 1295 * awhile since the last time we did so. Afterwards, if there are any 1296 * callbacks ready for immediate invocation, return true. 1297 */ 1298 static bool __maybe_unused rcu_try_advance_all_cbs(void) 1299 { 1300 bool cbs_ready = false; 1301 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 1302 struct rcu_node *rnp; 1303 1304 /* Exit early if we advanced recently. */ 1305 if (jiffies == rdp->last_advance_all) 1306 return false; 1307 rdp->last_advance_all = jiffies; 1308 1309 rnp = rdp->mynode; 1310 1311 /* 1312 * Don't bother checking unless a grace period has 1313 * completed since we last checked and there are 1314 * callbacks not yet ready to invoke. 1315 */ 1316 if ((rcu_seq_completed_gp(rdp->gp_seq, 1317 rcu_seq_current(&rnp->gp_seq)) || 1318 unlikely(READ_ONCE(rdp->gpwrap))) && 1319 rcu_segcblist_pend_cbs(&rdp->cblist)) 1320 note_gp_changes(rdp); 1321 1322 if (rcu_segcblist_ready_cbs(&rdp->cblist)) 1323 cbs_ready = true; 1324 return cbs_ready; 1325 } 1326 1327 /* 1328 * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready 1329 * to invoke. If the CPU has callbacks, try to advance them. Tell the 1330 * caller to set the timeout based on whether or not there are non-lazy 1331 * callbacks. 1332 * 1333 * The caller must have disabled interrupts. 1334 */ 1335 int rcu_needs_cpu(u64 basemono, u64 *nextevt) 1336 { 1337 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 1338 unsigned long dj; 1339 1340 lockdep_assert_irqs_disabled(); 1341 1342 /* If no non-offloaded callbacks, RCU doesn't need the CPU. */ 1343 if (rcu_segcblist_empty(&rdp->cblist) || 1344 rcu_segcblist_is_offloaded(&this_cpu_ptr(&rcu_data)->cblist)) { 1345 *nextevt = KTIME_MAX; 1346 return 0; 1347 } 1348 1349 /* Attempt to advance callbacks. */ 1350 if (rcu_try_advance_all_cbs()) { 1351 /* Some ready to invoke, so initiate later invocation. */ 1352 invoke_rcu_core(); 1353 return 1; 1354 } 1355 rdp->last_accelerate = jiffies; 1356 1357 /* Request timer delay depending on laziness, and round. */ 1358 rdp->all_lazy = !rcu_segcblist_n_nonlazy_cbs(&rdp->cblist); 1359 if (rdp->all_lazy) { 1360 dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies; 1361 } else { 1362 dj = round_up(rcu_idle_gp_delay + jiffies, 1363 rcu_idle_gp_delay) - jiffies; 1364 } 1365 *nextevt = basemono + dj * TICK_NSEC; 1366 return 0; 1367 } 1368 1369 /* 1370 * Prepare a CPU for idle from an RCU perspective. The first major task 1371 * is to sense whether nohz mode has been enabled or disabled via sysfs. 1372 * The second major task is to check to see if a non-lazy callback has 1373 * arrived at a CPU that previously had only lazy callbacks. The third 1374 * major task is to accelerate (that is, assign grace-period numbers to) 1375 * any recently arrived callbacks. 1376 * 1377 * The caller must have disabled interrupts. 1378 */ 1379 static void rcu_prepare_for_idle(void) 1380 { 1381 bool needwake; 1382 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 1383 struct rcu_node *rnp; 1384 int tne; 1385 1386 lockdep_assert_irqs_disabled(); 1387 if (rcu_segcblist_is_offloaded(&rdp->cblist)) 1388 return; 1389 1390 /* Handle nohz enablement switches conservatively. */ 1391 tne = READ_ONCE(tick_nohz_active); 1392 if (tne != rdp->tick_nohz_enabled_snap) { 1393 if (!rcu_segcblist_empty(&rdp->cblist)) 1394 invoke_rcu_core(); /* force nohz to see update. */ 1395 rdp->tick_nohz_enabled_snap = tne; 1396 return; 1397 } 1398 if (!tne) 1399 return; 1400 1401 /* 1402 * If a non-lazy callback arrived at a CPU having only lazy 1403 * callbacks, invoke RCU core for the side-effect of recalculating 1404 * idle duration on re-entry to idle. 1405 */ 1406 if (rdp->all_lazy && rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)) { 1407 rdp->all_lazy = false; 1408 invoke_rcu_core(); 1409 return; 1410 } 1411 1412 /* 1413 * If we have not yet accelerated this jiffy, accelerate all 1414 * callbacks on this CPU. 1415 */ 1416 if (rdp->last_accelerate == jiffies) 1417 return; 1418 rdp->last_accelerate = jiffies; 1419 if (rcu_segcblist_pend_cbs(&rdp->cblist)) { 1420 rnp = rdp->mynode; 1421 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 1422 needwake = rcu_accelerate_cbs(rnp, rdp); 1423 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 1424 if (needwake) 1425 rcu_gp_kthread_wake(); 1426 } 1427 } 1428 1429 /* 1430 * Clean up for exit from idle. Attempt to advance callbacks based on 1431 * any grace periods that elapsed while the CPU was idle, and if any 1432 * callbacks are now ready to invoke, initiate invocation. 1433 */ 1434 static void rcu_cleanup_after_idle(void) 1435 { 1436 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 1437 1438 lockdep_assert_irqs_disabled(); 1439 if (rcu_segcblist_is_offloaded(&rdp->cblist)) 1440 return; 1441 if (rcu_try_advance_all_cbs()) 1442 invoke_rcu_core(); 1443 } 1444 1445 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 1446 1447 #ifdef CONFIG_RCU_NOCB_CPU 1448 1449 /* 1450 * Offload callback processing from the boot-time-specified set of CPUs 1451 * specified by rcu_nocb_mask. For the CPUs in the set, there are kthreads 1452 * created that pull the callbacks from the corresponding CPU, wait for 1453 * a grace period to elapse, and invoke the callbacks. These kthreads 1454 * are organized into GP kthreads, which manage incoming callbacks, wait for 1455 * grace periods, and awaken CB kthreads, and the CB kthreads, which only 1456 * invoke callbacks. Each GP kthread invokes its own CBs. The no-CBs CPUs 1457 * do a wake_up() on their GP kthread when they insert a callback into any 1458 * empty list, unless the rcu_nocb_poll boot parameter has been specified, 1459 * in which case each kthread actively polls its CPU. (Which isn't so great 1460 * for energy efficiency, but which does reduce RCU's overhead on that CPU.) 1461 * 1462 * This is intended to be used in conjunction with Frederic Weisbecker's 1463 * adaptive-idle work, which would seriously reduce OS jitter on CPUs 1464 * running CPU-bound user-mode computations. 1465 * 1466 * Offloading of callbacks can also be used as an energy-efficiency 1467 * measure because CPUs with no RCU callbacks queued are more aggressive 1468 * about entering dyntick-idle mode. 1469 */ 1470 1471 1472 /* 1473 * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. 1474 * The string after the "rcu_nocbs=" is either "all" for all CPUs, or a 1475 * comma-separated list of CPUs and/or CPU ranges. If an invalid list is 1476 * given, a warning is emitted and all CPUs are offloaded. 1477 */ 1478 static int __init rcu_nocb_setup(char *str) 1479 { 1480 alloc_bootmem_cpumask_var(&rcu_nocb_mask); 1481 if (!strcasecmp(str, "all")) 1482 cpumask_setall(rcu_nocb_mask); 1483 else 1484 if (cpulist_parse(str, rcu_nocb_mask)) { 1485 pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n"); 1486 cpumask_setall(rcu_nocb_mask); 1487 } 1488 return 1; 1489 } 1490 __setup("rcu_nocbs=", rcu_nocb_setup); 1491 1492 static int __init parse_rcu_nocb_poll(char *arg) 1493 { 1494 rcu_nocb_poll = true; 1495 return 0; 1496 } 1497 early_param("rcu_nocb_poll", parse_rcu_nocb_poll); 1498 1499 /* 1500 * Don't bother bypassing ->cblist if the call_rcu() rate is low. 1501 * After all, the main point of bypassing is to avoid lock contention 1502 * on ->nocb_lock, which only can happen at high call_rcu() rates. 1503 */ 1504 int nocb_nobypass_lim_per_jiffy = 16 * 1000 / HZ; 1505 module_param(nocb_nobypass_lim_per_jiffy, int, 0); 1506 1507 /* 1508 * Acquire the specified rcu_data structure's ->nocb_bypass_lock. If the 1509 * lock isn't immediately available, increment ->nocb_lock_contended to 1510 * flag the contention. 1511 */ 1512 static void rcu_nocb_bypass_lock(struct rcu_data *rdp) 1513 { 1514 lockdep_assert_irqs_disabled(); 1515 if (raw_spin_trylock(&rdp->nocb_bypass_lock)) 1516 return; 1517 atomic_inc(&rdp->nocb_lock_contended); 1518 WARN_ON_ONCE(smp_processor_id() != rdp->cpu); 1519 smp_mb__after_atomic(); /* atomic_inc() before lock. */ 1520 raw_spin_lock(&rdp->nocb_bypass_lock); 1521 smp_mb__before_atomic(); /* atomic_dec() after lock. */ 1522 atomic_dec(&rdp->nocb_lock_contended); 1523 } 1524 1525 /* 1526 * Spinwait until the specified rcu_data structure's ->nocb_lock is 1527 * not contended. Please note that this is extremely special-purpose, 1528 * relying on the fact that at most two kthreads and one CPU contend for 1529 * this lock, and also that the two kthreads are guaranteed to have frequent 1530 * grace-period-duration time intervals between successive acquisitions 1531 * of the lock. This allows us to use an extremely simple throttling 1532 * mechanism, and further to apply it only to the CPU doing floods of 1533 * call_rcu() invocations. Don't try this at home! 1534 */ 1535 static void rcu_nocb_wait_contended(struct rcu_data *rdp) 1536 { 1537 WARN_ON_ONCE(smp_processor_id() != rdp->cpu); 1538 while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended))) 1539 cpu_relax(); 1540 } 1541 1542 /* 1543 * Conditionally acquire the specified rcu_data structure's 1544 * ->nocb_bypass_lock. 1545 */ 1546 static bool rcu_nocb_bypass_trylock(struct rcu_data *rdp) 1547 { 1548 lockdep_assert_irqs_disabled(); 1549 return raw_spin_trylock(&rdp->nocb_bypass_lock); 1550 } 1551 1552 /* 1553 * Release the specified rcu_data structure's ->nocb_bypass_lock. 1554 */ 1555 static void rcu_nocb_bypass_unlock(struct rcu_data *rdp) 1556 { 1557 lockdep_assert_irqs_disabled(); 1558 raw_spin_unlock(&rdp->nocb_bypass_lock); 1559 } 1560 1561 /* 1562 * Acquire the specified rcu_data structure's ->nocb_lock, but only 1563 * if it corresponds to a no-CBs CPU. 1564 */ 1565 static void rcu_nocb_lock(struct rcu_data *rdp) 1566 { 1567 lockdep_assert_irqs_disabled(); 1568 if (!rcu_segcblist_is_offloaded(&rdp->cblist)) 1569 return; 1570 raw_spin_lock(&rdp->nocb_lock); 1571 } 1572 1573 /* 1574 * Release the specified rcu_data structure's ->nocb_lock, but only 1575 * if it corresponds to a no-CBs CPU. 1576 */ 1577 static void rcu_nocb_unlock(struct rcu_data *rdp) 1578 { 1579 if (rcu_segcblist_is_offloaded(&rdp->cblist)) { 1580 lockdep_assert_irqs_disabled(); 1581 raw_spin_unlock(&rdp->nocb_lock); 1582 } 1583 } 1584 1585 /* 1586 * Release the specified rcu_data structure's ->nocb_lock and restore 1587 * interrupts, but only if it corresponds to a no-CBs CPU. 1588 */ 1589 static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp, 1590 unsigned long flags) 1591 { 1592 if (rcu_segcblist_is_offloaded(&rdp->cblist)) { 1593 lockdep_assert_irqs_disabled(); 1594 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 1595 } else { 1596 local_irq_restore(flags); 1597 } 1598 } 1599 1600 /* Lockdep check that ->cblist may be safely accessed. */ 1601 static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp) 1602 { 1603 lockdep_assert_irqs_disabled(); 1604 if (rcu_segcblist_is_offloaded(&rdp->cblist) && 1605 cpu_online(rdp->cpu)) 1606 lockdep_assert_held(&rdp->nocb_lock); 1607 } 1608 1609 /* 1610 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended 1611 * grace period. 1612 */ 1613 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq) 1614 { 1615 swake_up_all(sq); 1616 } 1617 1618 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp) 1619 { 1620 return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1]; 1621 } 1622 1623 static void rcu_init_one_nocb(struct rcu_node *rnp) 1624 { 1625 init_swait_queue_head(&rnp->nocb_gp_wq[0]); 1626 init_swait_queue_head(&rnp->nocb_gp_wq[1]); 1627 } 1628 1629 /* Is the specified CPU a no-CBs CPU? */ 1630 bool rcu_is_nocb_cpu(int cpu) 1631 { 1632 if (cpumask_available(rcu_nocb_mask)) 1633 return cpumask_test_cpu(cpu, rcu_nocb_mask); 1634 return false; 1635 } 1636 1637 /* 1638 * Kick the GP kthread for this NOCB group. Caller holds ->nocb_lock 1639 * and this function releases it. 1640 */ 1641 static void wake_nocb_gp(struct rcu_data *rdp, bool force, 1642 unsigned long flags) 1643 __releases(rdp->nocb_lock) 1644 { 1645 bool needwake = false; 1646 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; 1647 1648 lockdep_assert_held(&rdp->nocb_lock); 1649 if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) { 1650 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, 1651 TPS("AlreadyAwake")); 1652 rcu_nocb_unlock_irqrestore(rdp, flags); 1653 return; 1654 } 1655 del_timer(&rdp->nocb_timer); 1656 rcu_nocb_unlock_irqrestore(rdp, flags); 1657 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); 1658 if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) { 1659 WRITE_ONCE(rdp_gp->nocb_gp_sleep, false); 1660 needwake = true; 1661 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake")); 1662 } 1663 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); 1664 if (needwake) 1665 wake_up_process(rdp_gp->nocb_gp_kthread); 1666 } 1667 1668 /* 1669 * Arrange to wake the GP kthread for this NOCB group at some future 1670 * time when it is safe to do so. 1671 */ 1672 static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype, 1673 const char *reason) 1674 { 1675 if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT) 1676 mod_timer(&rdp->nocb_timer, jiffies + 1); 1677 if (rdp->nocb_defer_wakeup < waketype) 1678 WRITE_ONCE(rdp->nocb_defer_wakeup, waketype); 1679 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason); 1680 } 1681 1682 /* 1683 * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL. 1684 * However, if there is a callback to be enqueued and if ->nocb_bypass 1685 * proves to be initially empty, just return false because the no-CB GP 1686 * kthread may need to be awakened in this case. 1687 * 1688 * Note that this function always returns true if rhp is NULL. 1689 */ 1690 static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, 1691 unsigned long j) 1692 { 1693 struct rcu_cblist rcl; 1694 1695 WARN_ON_ONCE(!rcu_segcblist_is_offloaded(&rdp->cblist)); 1696 rcu_lockdep_assert_cblist_protected(rdp); 1697 lockdep_assert_held(&rdp->nocb_bypass_lock); 1698 if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) { 1699 raw_spin_unlock(&rdp->nocb_bypass_lock); 1700 return false; 1701 } 1702 /* Note: ->cblist.len already accounts for ->nocb_bypass contents. */ 1703 if (rhp) 1704 rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */ 1705 rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp); 1706 rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl); 1707 WRITE_ONCE(rdp->nocb_bypass_first, j); 1708 rcu_nocb_bypass_unlock(rdp); 1709 return true; 1710 } 1711 1712 /* 1713 * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL. 1714 * However, if there is a callback to be enqueued and if ->nocb_bypass 1715 * proves to be initially empty, just return false because the no-CB GP 1716 * kthread may need to be awakened in this case. 1717 * 1718 * Note that this function always returns true if rhp is NULL. 1719 */ 1720 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, 1721 unsigned long j) 1722 { 1723 if (!rcu_segcblist_is_offloaded(&rdp->cblist)) 1724 return true; 1725 rcu_lockdep_assert_cblist_protected(rdp); 1726 rcu_nocb_bypass_lock(rdp); 1727 return rcu_nocb_do_flush_bypass(rdp, rhp, j); 1728 } 1729 1730 /* 1731 * If the ->nocb_bypass_lock is immediately available, flush the 1732 * ->nocb_bypass queue into ->cblist. 1733 */ 1734 static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j) 1735 { 1736 rcu_lockdep_assert_cblist_protected(rdp); 1737 if (!rcu_segcblist_is_offloaded(&rdp->cblist) || 1738 !rcu_nocb_bypass_trylock(rdp)) 1739 return; 1740 WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j)); 1741 } 1742 1743 /* 1744 * See whether it is appropriate to use the ->nocb_bypass list in order 1745 * to control contention on ->nocb_lock. A limited number of direct 1746 * enqueues are permitted into ->cblist per jiffy. If ->nocb_bypass 1747 * is non-empty, further callbacks must be placed into ->nocb_bypass, 1748 * otherwise rcu_barrier() breaks. Use rcu_nocb_flush_bypass() to switch 1749 * back to direct use of ->cblist. However, ->nocb_bypass should not be 1750 * used if ->cblist is empty, because otherwise callbacks can be stranded 1751 * on ->nocb_bypass because we cannot count on the current CPU ever again 1752 * invoking call_rcu(). The general rule is that if ->nocb_bypass is 1753 * non-empty, the corresponding no-CBs grace-period kthread must not be 1754 * in an indefinite sleep state. 1755 * 1756 * Finally, it is not permitted to use the bypass during early boot, 1757 * as doing so would confuse the auto-initialization code. Besides 1758 * which, there is no point in worrying about lock contention while 1759 * there is only one CPU in operation. 1760 */ 1761 static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp, 1762 bool *was_alldone, unsigned long flags) 1763 { 1764 unsigned long c; 1765 unsigned long cur_gp_seq; 1766 unsigned long j = jiffies; 1767 long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); 1768 1769 if (!rcu_segcblist_is_offloaded(&rdp->cblist)) { 1770 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); 1771 return false; /* Not offloaded, no bypassing. */ 1772 } 1773 lockdep_assert_irqs_disabled(); 1774 1775 // Don't use ->nocb_bypass during early boot. 1776 if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) { 1777 rcu_nocb_lock(rdp); 1778 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); 1779 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); 1780 return false; 1781 } 1782 1783 // If we have advanced to a new jiffy, reset counts to allow 1784 // moving back from ->nocb_bypass to ->cblist. 1785 if (j == rdp->nocb_nobypass_last) { 1786 c = rdp->nocb_nobypass_count + 1; 1787 } else { 1788 WRITE_ONCE(rdp->nocb_nobypass_last, j); 1789 c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy; 1790 if (ULONG_CMP_LT(rdp->nocb_nobypass_count, 1791 nocb_nobypass_lim_per_jiffy)) 1792 c = 0; 1793 else if (c > nocb_nobypass_lim_per_jiffy) 1794 c = nocb_nobypass_lim_per_jiffy; 1795 } 1796 WRITE_ONCE(rdp->nocb_nobypass_count, c); 1797 1798 // If there hasn't yet been all that many ->cblist enqueues 1799 // this jiffy, tell the caller to enqueue onto ->cblist. But flush 1800 // ->nocb_bypass first. 1801 if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy) { 1802 rcu_nocb_lock(rdp); 1803 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); 1804 if (*was_alldone) 1805 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, 1806 TPS("FirstQ")); 1807 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, j)); 1808 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); 1809 return false; // Caller must enqueue the callback. 1810 } 1811 1812 // If ->nocb_bypass has been used too long or is too full, 1813 // flush ->nocb_bypass to ->cblist. 1814 if ((ncbs && j != READ_ONCE(rdp->nocb_bypass_first)) || 1815 ncbs >= qhimark) { 1816 rcu_nocb_lock(rdp); 1817 if (!rcu_nocb_flush_bypass(rdp, rhp, j)) { 1818 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); 1819 if (*was_alldone) 1820 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, 1821 TPS("FirstQ")); 1822 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); 1823 return false; // Caller must enqueue the callback. 1824 } 1825 if (j != rdp->nocb_gp_adv_time && 1826 rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && 1827 rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) { 1828 rcu_advance_cbs_nowake(rdp->mynode, rdp); 1829 rdp->nocb_gp_adv_time = j; 1830 } 1831 rcu_nocb_unlock_irqrestore(rdp, flags); 1832 return true; // Callback already enqueued. 1833 } 1834 1835 // We need to use the bypass. 1836 rcu_nocb_wait_contended(rdp); 1837 rcu_nocb_bypass_lock(rdp); 1838 ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); 1839 rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */ 1840 rcu_cblist_enqueue(&rdp->nocb_bypass, rhp); 1841 if (!ncbs) { 1842 WRITE_ONCE(rdp->nocb_bypass_first, j); 1843 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ")); 1844 } 1845 rcu_nocb_bypass_unlock(rdp); 1846 smp_mb(); /* Order enqueue before wake. */ 1847 if (ncbs) { 1848 local_irq_restore(flags); 1849 } else { 1850 // No-CBs GP kthread might be indefinitely asleep, if so, wake. 1851 rcu_nocb_lock(rdp); // Rare during call_rcu() flood. 1852 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) { 1853 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, 1854 TPS("FirstBQwake")); 1855 __call_rcu_nocb_wake(rdp, true, flags); 1856 } else { 1857 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, 1858 TPS("FirstBQnoWake")); 1859 rcu_nocb_unlock_irqrestore(rdp, flags); 1860 } 1861 } 1862 return true; // Callback already enqueued. 1863 } 1864 1865 /* 1866 * Awaken the no-CBs grace-period kthead if needed, either due to it 1867 * legitimately being asleep or due to overload conditions. 1868 * 1869 * If warranted, also wake up the kthread servicing this CPUs queues. 1870 */ 1871 static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone, 1872 unsigned long flags) 1873 __releases(rdp->nocb_lock) 1874 { 1875 unsigned long cur_gp_seq; 1876 unsigned long j; 1877 long len; 1878 struct task_struct *t; 1879 1880 // If we are being polled or there is no kthread, just leave. 1881 t = READ_ONCE(rdp->nocb_gp_kthread); 1882 if (rcu_nocb_poll || !t) { 1883 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, 1884 TPS("WakeNotPoll")); 1885 rcu_nocb_unlock_irqrestore(rdp, flags); 1886 return; 1887 } 1888 // Need to actually to a wakeup. 1889 len = rcu_segcblist_n_cbs(&rdp->cblist); 1890 if (was_alldone) { 1891 rdp->qlen_last_fqs_check = len; 1892 if (!irqs_disabled_flags(flags)) { 1893 /* ... if queue was empty ... */ 1894 wake_nocb_gp(rdp, false, flags); 1895 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, 1896 TPS("WakeEmpty")); 1897 } else { 1898 wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE, 1899 TPS("WakeEmptyIsDeferred")); 1900 rcu_nocb_unlock_irqrestore(rdp, flags); 1901 } 1902 } else if (len > rdp->qlen_last_fqs_check + qhimark) { 1903 /* ... or if many callbacks queued. */ 1904 rdp->qlen_last_fqs_check = len; 1905 j = jiffies; 1906 if (j != rdp->nocb_gp_adv_time && 1907 rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && 1908 rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) { 1909 rcu_advance_cbs_nowake(rdp->mynode, rdp); 1910 rdp->nocb_gp_adv_time = j; 1911 } 1912 smp_mb(); /* Enqueue before timer_pending(). */ 1913 if ((rdp->nocb_cb_sleep || 1914 !rcu_segcblist_ready_cbs(&rdp->cblist)) && 1915 !timer_pending(&rdp->nocb_bypass_timer)) 1916 wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE, 1917 TPS("WakeOvfIsDeferred")); 1918 rcu_nocb_unlock_irqrestore(rdp, flags); 1919 } else { 1920 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot")); 1921 rcu_nocb_unlock_irqrestore(rdp, flags); 1922 } 1923 return; 1924 } 1925 1926 /* Wake up the no-CBs GP kthread to flush ->nocb_bypass. */ 1927 static void do_nocb_bypass_wakeup_timer(struct timer_list *t) 1928 { 1929 unsigned long flags; 1930 struct rcu_data *rdp = from_timer(rdp, t, nocb_bypass_timer); 1931 1932 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer")); 1933 rcu_nocb_lock_irqsave(rdp, flags); 1934 smp_mb__after_spinlock(); /* Timer expire before wakeup. */ 1935 __call_rcu_nocb_wake(rdp, true, flags); 1936 } 1937 1938 /* 1939 * No-CBs GP kthreads come here to wait for additional callbacks to show up 1940 * or for grace periods to end. 1941 */ 1942 static void nocb_gp_wait(struct rcu_data *my_rdp) 1943 { 1944 bool bypass = false; 1945 long bypass_ncbs; 1946 int __maybe_unused cpu = my_rdp->cpu; 1947 unsigned long cur_gp_seq; 1948 unsigned long flags; 1949 bool gotcbs = false; 1950 unsigned long j = jiffies; 1951 bool needwait_gp = false; // This prevents actual uninitialized use. 1952 bool needwake; 1953 bool needwake_gp; 1954 struct rcu_data *rdp; 1955 struct rcu_node *rnp; 1956 unsigned long wait_gp_seq = 0; // Suppress "use uninitialized" warning. 1957 1958 /* 1959 * Each pass through the following loop checks for CBs and for the 1960 * nearest grace period (if any) to wait for next. The CB kthreads 1961 * and the global grace-period kthread are awakened if needed. 1962 */ 1963 for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) { 1964 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check")); 1965 rcu_nocb_lock_irqsave(rdp, flags); 1966 bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); 1967 if (bypass_ncbs && 1968 (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) || 1969 bypass_ncbs > 2 * qhimark)) { 1970 // Bypass full or old, so flush it. 1971 (void)rcu_nocb_try_flush_bypass(rdp, j); 1972 bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); 1973 } else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) { 1974 rcu_nocb_unlock_irqrestore(rdp, flags); 1975 continue; /* No callbacks here, try next. */ 1976 } 1977 if (bypass_ncbs) { 1978 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, 1979 TPS("Bypass")); 1980 bypass = true; 1981 } 1982 rnp = rdp->mynode; 1983 if (bypass) { // Avoid race with first bypass CB. 1984 WRITE_ONCE(my_rdp->nocb_defer_wakeup, 1985 RCU_NOCB_WAKE_NOT); 1986 del_timer(&my_rdp->nocb_timer); 1987 } 1988 // Advance callbacks if helpful and low contention. 1989 needwake_gp = false; 1990 if (!rcu_segcblist_restempty(&rdp->cblist, 1991 RCU_NEXT_READY_TAIL) || 1992 (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && 1993 rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) { 1994 raw_spin_lock_rcu_node(rnp); /* irqs disabled. */ 1995 needwake_gp = rcu_advance_cbs(rnp, rdp); 1996 raw_spin_unlock_rcu_node(rnp); /* irqs disabled. */ 1997 } 1998 // Need to wait on some grace period? 1999 WARN_ON_ONCE(!rcu_segcblist_restempty(&rdp->cblist, 2000 RCU_NEXT_READY_TAIL)); 2001 if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq)) { 2002 if (!needwait_gp || 2003 ULONG_CMP_LT(cur_gp_seq, wait_gp_seq)) 2004 wait_gp_seq = cur_gp_seq; 2005 needwait_gp = true; 2006 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, 2007 TPS("NeedWaitGP")); 2008 } 2009 if (rcu_segcblist_ready_cbs(&rdp->cblist)) { 2010 needwake = rdp->nocb_cb_sleep; 2011 WRITE_ONCE(rdp->nocb_cb_sleep, false); 2012 smp_mb(); /* CB invocation -after- GP end. */ 2013 } else { 2014 needwake = false; 2015 } 2016 rcu_nocb_unlock_irqrestore(rdp, flags); 2017 if (needwake) { 2018 swake_up_one(&rdp->nocb_cb_wq); 2019 gotcbs = true; 2020 } 2021 if (needwake_gp) 2022 rcu_gp_kthread_wake(); 2023 } 2024 2025 my_rdp->nocb_gp_bypass = bypass; 2026 my_rdp->nocb_gp_gp = needwait_gp; 2027 my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0; 2028 if (bypass && !rcu_nocb_poll) { 2029 // At least one child with non-empty ->nocb_bypass, so set 2030 // timer in order to avoid stranding its callbacks. 2031 raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags); 2032 mod_timer(&my_rdp->nocb_bypass_timer, j + 2); 2033 raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags); 2034 } 2035 if (rcu_nocb_poll) { 2036 /* Polling, so trace if first poll in the series. */ 2037 if (gotcbs) 2038 trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Poll")); 2039 schedule_timeout_interruptible(1); 2040 } else if (!needwait_gp) { 2041 /* Wait for callbacks to appear. */ 2042 trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Sleep")); 2043 swait_event_interruptible_exclusive(my_rdp->nocb_gp_wq, 2044 !READ_ONCE(my_rdp->nocb_gp_sleep)); 2045 trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("EndSleep")); 2046 } else { 2047 rnp = my_rdp->mynode; 2048 trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("StartWait")); 2049 swait_event_interruptible_exclusive( 2050 rnp->nocb_gp_wq[rcu_seq_ctr(wait_gp_seq) & 0x1], 2051 rcu_seq_done(&rnp->gp_seq, wait_gp_seq) || 2052 !READ_ONCE(my_rdp->nocb_gp_sleep)); 2053 trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("EndWait")); 2054 } 2055 if (!rcu_nocb_poll) { 2056 raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags); 2057 if (bypass) 2058 del_timer(&my_rdp->nocb_bypass_timer); 2059 WRITE_ONCE(my_rdp->nocb_gp_sleep, true); 2060 raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags); 2061 } 2062 my_rdp->nocb_gp_seq = -1; 2063 WARN_ON(signal_pending(current)); 2064 } 2065 2066 /* 2067 * No-CBs grace-period-wait kthread. There is one of these per group 2068 * of CPUs, but only once at least one CPU in that group has come online 2069 * at least once since boot. This kthread checks for newly posted 2070 * callbacks from any of the CPUs it is responsible for, waits for a 2071 * grace period, then awakens all of the rcu_nocb_cb_kthread() instances 2072 * that then have callback-invocation work to do. 2073 */ 2074 static int rcu_nocb_gp_kthread(void *arg) 2075 { 2076 struct rcu_data *rdp = arg; 2077 2078 for (;;) { 2079 WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1); 2080 nocb_gp_wait(rdp); 2081 cond_resched_tasks_rcu_qs(); 2082 } 2083 return 0; 2084 } 2085 2086 /* 2087 * Invoke any ready callbacks from the corresponding no-CBs CPU, 2088 * then, if there are no more, wait for more to appear. 2089 */ 2090 static void nocb_cb_wait(struct rcu_data *rdp) 2091 { 2092 unsigned long cur_gp_seq; 2093 unsigned long flags; 2094 bool needwake_gp = false; 2095 struct rcu_node *rnp = rdp->mynode; 2096 2097 local_irq_save(flags); 2098 rcu_momentary_dyntick_idle(); 2099 local_irq_restore(flags); 2100 local_bh_disable(); 2101 rcu_do_batch(rdp); 2102 local_bh_enable(); 2103 lockdep_assert_irqs_enabled(); 2104 rcu_nocb_lock_irqsave(rdp, flags); 2105 if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && 2106 rcu_seq_done(&rnp->gp_seq, cur_gp_seq) && 2107 raw_spin_trylock_rcu_node(rnp)) { /* irqs already disabled. */ 2108 needwake_gp = rcu_advance_cbs(rdp->mynode, rdp); 2109 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 2110 } 2111 if (rcu_segcblist_ready_cbs(&rdp->cblist)) { 2112 rcu_nocb_unlock_irqrestore(rdp, flags); 2113 if (needwake_gp) 2114 rcu_gp_kthread_wake(); 2115 return; 2116 } 2117 2118 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep")); 2119 WRITE_ONCE(rdp->nocb_cb_sleep, true); 2120 rcu_nocb_unlock_irqrestore(rdp, flags); 2121 if (needwake_gp) 2122 rcu_gp_kthread_wake(); 2123 swait_event_interruptible_exclusive(rdp->nocb_cb_wq, 2124 !READ_ONCE(rdp->nocb_cb_sleep)); 2125 if (!smp_load_acquire(&rdp->nocb_cb_sleep)) { /* VVV */ 2126 /* ^^^ Ensure CB invocation follows _sleep test. */ 2127 return; 2128 } 2129 WARN_ON(signal_pending(current)); 2130 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty")); 2131 } 2132 2133 /* 2134 * Per-rcu_data kthread, but only for no-CBs CPUs. Repeatedly invoke 2135 * nocb_cb_wait() to do the dirty work. 2136 */ 2137 static int rcu_nocb_cb_kthread(void *arg) 2138 { 2139 struct rcu_data *rdp = arg; 2140 2141 // Each pass through this loop does one callback batch, and, 2142 // if there are no more ready callbacks, waits for them. 2143 for (;;) { 2144 nocb_cb_wait(rdp); 2145 cond_resched_tasks_rcu_qs(); 2146 } 2147 return 0; 2148 } 2149 2150 /* Is a deferred wakeup of rcu_nocb_kthread() required? */ 2151 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) 2152 { 2153 return READ_ONCE(rdp->nocb_defer_wakeup); 2154 } 2155 2156 /* Do a deferred wakeup of rcu_nocb_kthread(). */ 2157 static void do_nocb_deferred_wakeup_common(struct rcu_data *rdp) 2158 { 2159 unsigned long flags; 2160 int ndw; 2161 2162 rcu_nocb_lock_irqsave(rdp, flags); 2163 if (!rcu_nocb_need_deferred_wakeup(rdp)) { 2164 rcu_nocb_unlock_irqrestore(rdp, flags); 2165 return; 2166 } 2167 ndw = READ_ONCE(rdp->nocb_defer_wakeup); 2168 WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); 2169 wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags); 2170 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake")); 2171 } 2172 2173 /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */ 2174 static void do_nocb_deferred_wakeup_timer(struct timer_list *t) 2175 { 2176 struct rcu_data *rdp = from_timer(rdp, t, nocb_timer); 2177 2178 do_nocb_deferred_wakeup_common(rdp); 2179 } 2180 2181 /* 2182 * Do a deferred wakeup of rcu_nocb_kthread() from fastpath. 2183 * This means we do an inexact common-case check. Note that if 2184 * we miss, ->nocb_timer will eventually clean things up. 2185 */ 2186 static void do_nocb_deferred_wakeup(struct rcu_data *rdp) 2187 { 2188 if (rcu_nocb_need_deferred_wakeup(rdp)) 2189 do_nocb_deferred_wakeup_common(rdp); 2190 } 2191 2192 void __init rcu_init_nohz(void) 2193 { 2194 int cpu; 2195 bool need_rcu_nocb_mask = false; 2196 struct rcu_data *rdp; 2197 2198 #if defined(CONFIG_NO_HZ_FULL) 2199 if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask)) 2200 need_rcu_nocb_mask = true; 2201 #endif /* #if defined(CONFIG_NO_HZ_FULL) */ 2202 2203 if (!cpumask_available(rcu_nocb_mask) && need_rcu_nocb_mask) { 2204 if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) { 2205 pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n"); 2206 return; 2207 } 2208 } 2209 if (!cpumask_available(rcu_nocb_mask)) 2210 return; 2211 2212 #if defined(CONFIG_NO_HZ_FULL) 2213 if (tick_nohz_full_running) 2214 cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask); 2215 #endif /* #if defined(CONFIG_NO_HZ_FULL) */ 2216 2217 if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) { 2218 pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n"); 2219 cpumask_and(rcu_nocb_mask, cpu_possible_mask, 2220 rcu_nocb_mask); 2221 } 2222 if (cpumask_empty(rcu_nocb_mask)) 2223 pr_info("\tOffload RCU callbacks from CPUs: (none).\n"); 2224 else 2225 pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n", 2226 cpumask_pr_args(rcu_nocb_mask)); 2227 if (rcu_nocb_poll) 2228 pr_info("\tPoll for callbacks from no-CBs CPUs.\n"); 2229 2230 for_each_cpu(cpu, rcu_nocb_mask) { 2231 rdp = per_cpu_ptr(&rcu_data, cpu); 2232 if (rcu_segcblist_empty(&rdp->cblist)) 2233 rcu_segcblist_init(&rdp->cblist); 2234 rcu_segcblist_offload(&rdp->cblist); 2235 } 2236 rcu_organize_nocb_kthreads(); 2237 } 2238 2239 /* Initialize per-rcu_data variables for no-CBs CPUs. */ 2240 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) 2241 { 2242 init_swait_queue_head(&rdp->nocb_cb_wq); 2243 init_swait_queue_head(&rdp->nocb_gp_wq); 2244 raw_spin_lock_init(&rdp->nocb_lock); 2245 raw_spin_lock_init(&rdp->nocb_bypass_lock); 2246 raw_spin_lock_init(&rdp->nocb_gp_lock); 2247 timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0); 2248 timer_setup(&rdp->nocb_bypass_timer, do_nocb_bypass_wakeup_timer, 0); 2249 rcu_cblist_init(&rdp->nocb_bypass); 2250 } 2251 2252 /* 2253 * If the specified CPU is a no-CBs CPU that does not already have its 2254 * rcuo CB kthread, spawn it. Additionally, if the rcuo GP kthread 2255 * for this CPU's group has not yet been created, spawn it as well. 2256 */ 2257 static void rcu_spawn_one_nocb_kthread(int cpu) 2258 { 2259 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 2260 struct rcu_data *rdp_gp; 2261 struct task_struct *t; 2262 2263 /* 2264 * If this isn't a no-CBs CPU or if it already has an rcuo kthread, 2265 * then nothing to do. 2266 */ 2267 if (!rcu_is_nocb_cpu(cpu) || rdp->nocb_cb_kthread) 2268 return; 2269 2270 /* If we didn't spawn the GP kthread first, reorganize! */ 2271 rdp_gp = rdp->nocb_gp_rdp; 2272 if (!rdp_gp->nocb_gp_kthread) { 2273 t = kthread_run(rcu_nocb_gp_kthread, rdp_gp, 2274 "rcuog/%d", rdp_gp->cpu); 2275 if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__)) 2276 return; 2277 WRITE_ONCE(rdp_gp->nocb_gp_kthread, t); 2278 } 2279 2280 /* Spawn the kthread for this CPU. */ 2281 t = kthread_run(rcu_nocb_cb_kthread, rdp, 2282 "rcuo%c/%d", rcu_state.abbr, cpu); 2283 if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__)) 2284 return; 2285 WRITE_ONCE(rdp->nocb_cb_kthread, t); 2286 WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread); 2287 } 2288 2289 /* 2290 * If the specified CPU is a no-CBs CPU that does not already have its 2291 * rcuo kthread, spawn it. 2292 */ 2293 static void rcu_spawn_cpu_nocb_kthread(int cpu) 2294 { 2295 if (rcu_scheduler_fully_active) 2296 rcu_spawn_one_nocb_kthread(cpu); 2297 } 2298 2299 /* 2300 * Once the scheduler is running, spawn rcuo kthreads for all online 2301 * no-CBs CPUs. This assumes that the early_initcall()s happen before 2302 * non-boot CPUs come online -- if this changes, we will need to add 2303 * some mutual exclusion. 2304 */ 2305 static void __init rcu_spawn_nocb_kthreads(void) 2306 { 2307 int cpu; 2308 2309 for_each_online_cpu(cpu) 2310 rcu_spawn_cpu_nocb_kthread(cpu); 2311 } 2312 2313 /* How many CB CPU IDs per GP kthread? Default of -1 for sqrt(nr_cpu_ids). */ 2314 static int rcu_nocb_gp_stride = -1; 2315 module_param(rcu_nocb_gp_stride, int, 0444); 2316 2317 /* 2318 * Initialize GP-CB relationships for all no-CBs CPU. 2319 */ 2320 static void __init rcu_organize_nocb_kthreads(void) 2321 { 2322 int cpu; 2323 bool firsttime = true; 2324 int ls = rcu_nocb_gp_stride; 2325 int nl = 0; /* Next GP kthread. */ 2326 struct rcu_data *rdp; 2327 struct rcu_data *rdp_gp = NULL; /* Suppress misguided gcc warn. */ 2328 struct rcu_data *rdp_prev = NULL; 2329 2330 if (!cpumask_available(rcu_nocb_mask)) 2331 return; 2332 if (ls == -1) { 2333 ls = nr_cpu_ids / int_sqrt(nr_cpu_ids); 2334 rcu_nocb_gp_stride = ls; 2335 } 2336 2337 /* 2338 * Each pass through this loop sets up one rcu_data structure. 2339 * Should the corresponding CPU come online in the future, then 2340 * we will spawn the needed set of rcu_nocb_kthread() kthreads. 2341 */ 2342 for_each_cpu(cpu, rcu_nocb_mask) { 2343 rdp = per_cpu_ptr(&rcu_data, cpu); 2344 if (rdp->cpu >= nl) { 2345 /* New GP kthread, set up for CBs & next GP. */ 2346 nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls; 2347 rdp->nocb_gp_rdp = rdp; 2348 rdp_gp = rdp; 2349 if (!firsttime && dump_tree) 2350 pr_cont("\n"); 2351 firsttime = false; 2352 pr_alert("%s: No-CB GP kthread CPU %d:", __func__, cpu); 2353 } else { 2354 /* Another CB kthread, link to previous GP kthread. */ 2355 rdp->nocb_gp_rdp = rdp_gp; 2356 rdp_prev->nocb_next_cb_rdp = rdp; 2357 pr_alert(" %d", cpu); 2358 } 2359 rdp_prev = rdp; 2360 } 2361 } 2362 2363 /* 2364 * Bind the current task to the offloaded CPUs. If there are no offloaded 2365 * CPUs, leave the task unbound. Splat if the bind attempt fails. 2366 */ 2367 void rcu_bind_current_to_nocb(void) 2368 { 2369 if (cpumask_available(rcu_nocb_mask) && cpumask_weight(rcu_nocb_mask)) 2370 WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask)); 2371 } 2372 EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb); 2373 2374 /* 2375 * Dump out nocb grace-period kthread state for the specified rcu_data 2376 * structure. 2377 */ 2378 static void show_rcu_nocb_gp_state(struct rcu_data *rdp) 2379 { 2380 struct rcu_node *rnp = rdp->mynode; 2381 2382 pr_info("nocb GP %d %c%c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu\n", 2383 rdp->cpu, 2384 "kK"[!!rdp->nocb_gp_kthread], 2385 "lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)], 2386 "dD"[!!rdp->nocb_defer_wakeup], 2387 "tT"[timer_pending(&rdp->nocb_timer)], 2388 "bB"[timer_pending(&rdp->nocb_bypass_timer)], 2389 "sS"[!!rdp->nocb_gp_sleep], 2390 ".W"[swait_active(&rdp->nocb_gp_wq)], 2391 ".W"[swait_active(&rnp->nocb_gp_wq[0])], 2392 ".W"[swait_active(&rnp->nocb_gp_wq[1])], 2393 ".B"[!!rdp->nocb_gp_bypass], 2394 ".G"[!!rdp->nocb_gp_gp], 2395 (long)rdp->nocb_gp_seq, 2396 rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops)); 2397 } 2398 2399 /* Dump out nocb kthread state for the specified rcu_data structure. */ 2400 static void show_rcu_nocb_state(struct rcu_data *rdp) 2401 { 2402 struct rcu_segcblist *rsclp = &rdp->cblist; 2403 bool waslocked; 2404 bool wastimer; 2405 bool wassleep; 2406 2407 if (rdp->nocb_gp_rdp == rdp) 2408 show_rcu_nocb_gp_state(rdp); 2409 2410 pr_info(" CB %d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%c%c%c q%ld\n", 2411 rdp->cpu, rdp->nocb_gp_rdp->cpu, 2412 "kK"[!!rdp->nocb_cb_kthread], 2413 "bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)], 2414 "cC"[!!atomic_read(&rdp->nocb_lock_contended)], 2415 "lL"[raw_spin_is_locked(&rdp->nocb_lock)], 2416 "sS"[!!rdp->nocb_cb_sleep], 2417 ".W"[swait_active(&rdp->nocb_cb_wq)], 2418 jiffies - rdp->nocb_bypass_first, 2419 jiffies - rdp->nocb_nobypass_last, 2420 rdp->nocb_nobypass_count, 2421 ".D"[rcu_segcblist_ready_cbs(rsclp)], 2422 ".W"[!rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)], 2423 ".R"[!rcu_segcblist_restempty(rsclp, RCU_WAIT_TAIL)], 2424 ".N"[!rcu_segcblist_restempty(rsclp, RCU_NEXT_READY_TAIL)], 2425 ".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)], 2426 rcu_segcblist_n_cbs(&rdp->cblist)); 2427 2428 /* It is OK for GP kthreads to have GP state. */ 2429 if (rdp->nocb_gp_rdp == rdp) 2430 return; 2431 2432 waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock); 2433 wastimer = timer_pending(&rdp->nocb_timer); 2434 wassleep = swait_active(&rdp->nocb_gp_wq); 2435 if (!rdp->nocb_defer_wakeup && !rdp->nocb_gp_sleep && 2436 !waslocked && !wastimer && !wassleep) 2437 return; /* Nothing untowards. */ 2438 2439 pr_info(" !!! %c%c%c%c %c\n", 2440 "lL"[waslocked], 2441 "dD"[!!rdp->nocb_defer_wakeup], 2442 "tT"[wastimer], 2443 "sS"[!!rdp->nocb_gp_sleep], 2444 ".W"[wassleep]); 2445 } 2446 2447 #else /* #ifdef CONFIG_RCU_NOCB_CPU */ 2448 2449 /* No ->nocb_lock to acquire. */ 2450 static void rcu_nocb_lock(struct rcu_data *rdp) 2451 { 2452 } 2453 2454 /* No ->nocb_lock to release. */ 2455 static void rcu_nocb_unlock(struct rcu_data *rdp) 2456 { 2457 } 2458 2459 /* No ->nocb_lock to release. */ 2460 static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp, 2461 unsigned long flags) 2462 { 2463 local_irq_restore(flags); 2464 } 2465 2466 /* Lockdep check that ->cblist may be safely accessed. */ 2467 static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp) 2468 { 2469 lockdep_assert_irqs_disabled(); 2470 } 2471 2472 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq) 2473 { 2474 } 2475 2476 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp) 2477 { 2478 return NULL; 2479 } 2480 2481 static void rcu_init_one_nocb(struct rcu_node *rnp) 2482 { 2483 } 2484 2485 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, 2486 unsigned long j) 2487 { 2488 return true; 2489 } 2490 2491 static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp, 2492 bool *was_alldone, unsigned long flags) 2493 { 2494 return false; 2495 } 2496 2497 static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty, 2498 unsigned long flags) 2499 { 2500 WARN_ON_ONCE(1); /* Should be dead code! */ 2501 } 2502 2503 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) 2504 { 2505 } 2506 2507 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) 2508 { 2509 return false; 2510 } 2511 2512 static void do_nocb_deferred_wakeup(struct rcu_data *rdp) 2513 { 2514 } 2515 2516 static void rcu_spawn_cpu_nocb_kthread(int cpu) 2517 { 2518 } 2519 2520 static void __init rcu_spawn_nocb_kthreads(void) 2521 { 2522 } 2523 2524 static void show_rcu_nocb_state(struct rcu_data *rdp) 2525 { 2526 } 2527 2528 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ 2529 2530 /* 2531 * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the 2532 * grace-period kthread will do force_quiescent_state() processing? 2533 * The idea is to avoid waking up RCU core processing on such a 2534 * CPU unless the grace period has extended for too long. 2535 * 2536 * This code relies on the fact that all NO_HZ_FULL CPUs are also 2537 * CONFIG_RCU_NOCB_CPU CPUs. 2538 */ 2539 static bool rcu_nohz_full_cpu(void) 2540 { 2541 #ifdef CONFIG_NO_HZ_FULL 2542 if (tick_nohz_full_cpu(smp_processor_id()) && 2543 (!rcu_gp_in_progress() || 2544 ULONG_CMP_LT(jiffies, READ_ONCE(rcu_state.gp_start) + HZ))) 2545 return true; 2546 #endif /* #ifdef CONFIG_NO_HZ_FULL */ 2547 return false; 2548 } 2549 2550 /* 2551 * Bind the RCU grace-period kthreads to the housekeeping CPU. 2552 */ 2553 static void rcu_bind_gp_kthread(void) 2554 { 2555 if (!tick_nohz_full_enabled()) 2556 return; 2557 housekeeping_affine(current, HK_FLAG_RCU); 2558 } 2559 2560 /* Record the current task on dyntick-idle entry. */ 2561 static void rcu_dynticks_task_enter(void) 2562 { 2563 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) 2564 WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id()); 2565 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */ 2566 } 2567 2568 /* Record no current task on dyntick-idle exit. */ 2569 static void rcu_dynticks_task_exit(void) 2570 { 2571 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) 2572 WRITE_ONCE(current->rcu_tasks_idle_cpu, -1); 2573 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */ 2574 } 2575